patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -0,0 +1,10 @@ +class CreateTestClientRequests < ActiveRecord::Migration + def change + create_table :test_client_requests do |t| + t.decimal :amount + t.string :project_title + + t.timestamps null: false + end + end +end
1
1
16,008
did you mean to leave this in here?
18F-C2
rb
@@ -54,6 +54,7 @@ var ( labels = flag.String("labels", "", "List of label KEY=VALUE pairs to add. Keys must start with a lowercase character and contain only hyphens (-), underscores (_), lowercase characters, and numbers. Values must contain only hyphens (-), underscores (_), lowercase characters, and numbers.") storageLocation = flag.String("storage_location", "", "Location for the imported image which can be any GCS location. If the location parameter is not included, images are created in the multi-region associated with the source disk, image, snapshot or GCS bucket.") uefiCompatible = flag.Bool("uefi_compatible", false, "Enables UEFI booting, which is an alternative system boot method. Most public images use the GRUB bootloader as their primary boot method.") + sysprepWindows = flag.Bool("sysprep_windows", false, "Whether to generalize image using Windows Sysprep.") ) func importEntry() (*daisy.Workflow, error) {
1
// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // GCE VM image import tool package main import ( "flag" "os" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging/service" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_vm_image_import/importer" "github.com/GoogleCloudPlatform/compute-image-tools/daisy" ) var ( clientID = flag.String(importer.ClientIDFlagKey, "", "Identifies the client of the importer, e.g. `gcloud` or `pantheon`") imageName = flag.String(importer.ImageNameFlagKey, "", "Image name to be imported.") dataDisk = flag.Bool("data_disk", false, "Specifies that the disk has no bootable OS installed on it. Imports the disk without making it bootable or installing Google tools on it. ") osID = flag.String("os", "", "Specifies the OS of the image being imported. OS must be one of: centos-6, centos-7, debian-8, debian-9, opensuse-15, sles-12-byol, sles-15-byol, rhel-6, rhel-6-byol, rhel-7, rhel-7-byol, ubuntu-1404, ubuntu-1604, ubuntu-1804, windows-10-byol, windows-2008r2, windows-2008r2-byol, windows-2012, windows-2012-byol, windows-2012r2, windows-2012r2-byol, windows-2016, windows-2016-byol, windows-7-byol.") customTranWorkflow = flag.String("custom_translate_workflow", "", "Specifies the custom workflow used to do translation") sourceFile = flag.String("source_file", "", "Google Cloud Storage URI of the virtual disk file to import. For example: gs://my-bucket/my-image.vmdk") sourceImage = flag.String("source_image", "", "Compute Engine image from which to import") noGuestEnvironment = flag.Bool("no_guest_environment", false, "Google Guest Environment will not be installed on the image.") family = flag.String("family", "", "Family to set for the translated image") description = flag.String("description", "", "Description to set for the translated image") network = flag.String("network", "", "Name of the network in your project to use for the image import. The network must have access to Google Cloud Storage. If not specified, the network named default is used.") subnet = flag.String("subnet", "", "Name of the subnetwork in your project to use for the image import. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. Zone should be specified if this field is specified.") zone = flag.String("zone", "", "Zone of the image to import. The zone in which to do the work of importing the image. Overrides the default compute/zone property value for this command invocation.") timeout = flag.String("timeout", "", "Maximum time a build can last before it is failed as TIMEOUT. For example, specifying 2h will fail the process after 2 hours. See $ gcloud topic datetimes for information on duration formats.") project = flag.String("project", "", "project to run in, overrides what is set in workflow") scratchBucketGcsPath = flag.String("scratch_bucket_gcs_path", "", "GCS scratch bucket to use, overrides what is set in workflow") oauth = flag.String("oauth", "", "path to oauth json file, overrides what is set in workflow") ce = flag.String("compute_endpoint_override", "", "API endpoint to override default") gcsLogsDisabled = flag.Bool("disable_gcs_logging", false, "do not stream logs to GCS") cloudLogsDisabled = flag.Bool("disable_cloud_logging", false, "do not stream logs to Cloud Logging") stdoutLogsDisabled = flag.Bool("disable_stdout_logging", false, "do not display individual workflow logs on stdout") kmsKey = flag.String("kms_key", "", "ID of the key or fully qualified identifier for the key. This flag must be specified if any of the other arguments below are specified.") kmsKeyring = flag.String("kms_keyring", "", "The KMS keyring of the key.") kmsLocation = flag.String("kms_location", "", "The Cloud location for the key.") kmsProject = flag.String("kms_project", "", "The Cloud project for the key") noExternalIP = flag.Bool("no_external_ip", false, "VPC doesn't allow external IPs") labels = flag.String("labels", "", "List of label KEY=VALUE pairs to add. Keys must start with a lowercase character and contain only hyphens (-), underscores (_), lowercase characters, and numbers. Values must contain only hyphens (-), underscores (_), lowercase characters, and numbers.") storageLocation = flag.String("storage_location", "", "Location for the imported image which can be any GCS location. If the location parameter is not included, images are created in the multi-region associated with the source disk, image, snapshot or GCS bucket.") uefiCompatible = flag.Bool("uefi_compatible", false, "Enables UEFI booting, which is an alternative system boot method. Most public images use the GRUB bootloader as their primary boot method.") ) func importEntry() (*daisy.Workflow, error) { currentExecutablePath := string(os.Args[0]) return importer.Run(*clientID, *imageName, *dataDisk, *osID, *customTranWorkflow, *sourceFile, *sourceImage, *noGuestEnvironment, *family, *description, *network, *subnet, *zone, *timeout, project, *scratchBucketGcsPath, *oauth, *ce, *gcsLogsDisabled, *cloudLogsDisabled, *stdoutLogsDisabled, *kmsKey, *kmsKeyring, *kmsLocation, *kmsProject, *noExternalIP, *labels, currentExecutablePath, *storageLocation, *uefiCompatible) } func main() { flag.Parse() paramLog := service.InputParams{ ImageImportParams: &service.ImageImportParams{ CommonParams: &service.CommonParams{ ClientID: *clientID, Network: *network, Subnet: *subnet, Zone: *zone, Timeout: *timeout, Project: *project, ObfuscatedProject: service.Hash(*project), Labels: *labels, ScratchBucketGcsPath: *scratchBucketGcsPath, Oauth: *oauth, ComputeEndpointOverride: *ce, DisableGcsLogging: *gcsLogsDisabled, DisableCloudLogging: *cloudLogsDisabled, DisableStdoutLogging: *stdoutLogsDisabled, }, ImageName: *imageName, DataDisk: *dataDisk, OS: *osID, SourceFile: *sourceFile, SourceImage: *sourceImage, NoGuestEnvironment: *noGuestEnvironment, Family: *family, Description: *description, NoExternalIP: *noExternalIP, HasKmsKey: *kmsKey != "", HasKmsKeyring: *kmsKeyring != "", HasKmsLocation: *kmsLocation != "", HasKmsProject: *kmsProject != "", StorageLocation: *storageLocation, }, } if err := service.RunWithServerLogging(service.ImageImportAction, paramLog, project, importEntry); err != nil { os.Exit(1) } }
1
10,339
Mention that it only applies to Windows. This is kind of implied, but better to be explicit.
GoogleCloudPlatform-compute-image-tools
go
@@ -28,6 +28,8 @@ var ( autoStake = true index = uint64(10) senderKey = identityset.PrivateKey(27) + zero = "0" + negtive = "-10" ) func TestCreateStake(t *testing.T) {
1
// Copyright (c) 2020 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "encoding/hex" "math/big" "testing" "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" "github.com/iotexproject/iotex-core/test/identityset" ) var ( gaslimit = uint64(1000000) gasprice = big.NewInt(10) canAddress = "io1xpq62aw85uqzrccg9y5hnryv8ld2nkpycc3gza" payload = []byte("payload") amount = big.NewInt(10) nonce = uint64(0) duration = uint32(1000) autoStake = true index = uint64(10) senderKey = identityset.PrivateKey(27) ) func TestCreateStake(t *testing.T) { require := require.New(t) stake, err := NewCreateStake(nonce, canAddress, amount.Text(10), duration, autoStake, payload, gaslimit, gasprice) require.NoError(err) ser := stake.Serialize() require.Equal("0a29696f3178707136326177383575717a72636367397935686e727976386c64326e6b7079636333677a611202313018e80720012a077061796c6f6164", hex.EncodeToString(ser)) require.NoError(err) require.Equal(gaslimit, stake.GasLimit()) require.Equal(gasprice, stake.GasPrice()) require.Equal(nonce, stake.Nonce()) require.Equal(amount, stake.Amount()) require.Equal(payload, stake.Payload()) require.Equal(canAddress, stake.Candidate()) require.Equal(duration, stake.Duration()) require.True(stake.AutoStake()) gas, err := stake.IntrinsicGas() require.NoError(err) require.Equal(uint64(10700), gas) cost, err := stake.Cost() require.NoError(err) require.Equal("107010", cost.Text(10)) proto := stake.Proto() cs2 := &CreateStake{} require.NoError(cs2.LoadProto(proto)) require.Equal(amount, cs2.Amount()) require.Equal(payload, cs2.Payload()) require.Equal(canAddress, cs2.Candidate()) require.Equal(duration, cs2.Duration()) require.True(cs2.AutoStake()) } func TestCreateStakeSignVerify(t *testing.T) { require := require.New(t) require.Equal("cfa6ef757dee2e50351620dca002d32b9c090cfda55fb81f37f1d26b273743f1", senderKey.HexString()) stake, err := NewCreateStake(nonce, canAddress, amount.Text(10), duration, autoStake, payload, gaslimit, gasprice) require.NoError(err) bd := &EnvelopeBuilder{} elp := bd.SetGasLimit(gaslimit). SetGasPrice(gasprice). SetAction(stake).Build() h := elp.Hash() require.Equal("219483a7309db9f1c41ac3fa0aadecfbdbeb0448b0dfaee54daec4ec178aa9f1", hex.EncodeToString(h[:])) // sign selp, err := Sign(elp, senderKey) require.NoError(err) require.NotNil(selp) ser, err := proto.Marshal(selp.Proto()) require.NoError(err) require.Equal("0a4a080118c0843d22023130c2023d0a29696f3178707136326177383575717a72636367397935686e727976386c64326e6b7079636333677a611202313018e80720012a077061796c6f6164124104755ce6d8903f6b3793bddb4ea5d3589d637de2d209ae0ea930815c82db564ee8cc448886f639e8a0c7e94e99a5c1335b583c0bc76ef30dd6a1038ed9da8daf331a415db41c974bc1d8edd59fad54c4eac41250981640c44183c1c3ed9e45873bf15c02f3575de59233aefd7ec6eecfa7254bf4b67501e96bea8a4d54a18b4e0e4fec01", hex.EncodeToString(ser)) hash := selp.Hash() require.Equal("a324d56f5b50e86aab27c0c6d33f9699f36d3ed8e27967a56e644f582bbd5e2d", hex.EncodeToString(hash[:])) // verify signature require.NoError(Verify(selp)) }
1
21,173
again: it is a bad practice to have some global parameters with such common names for unit test purpose.
iotexproject-iotex-core
go
@@ -21,10 +21,7 @@ class Shopware6Connector private ?string $token; - /** - * @var \DateTimeInterface - */ - private $expiresAt; + private \DateTimeInterface $expiresAt; public function __construct(Configurator $configurator) {
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types=1); namespace Ergonode\ExporterShopware6\Infrastructure\Connector; use Ergonode\ExporterShopware6\Infrastructure\Connector\Action\PostAccessToken; use GuzzleHttp\Client; use GuzzleHttp\Exception\GuzzleException; use Psr\Http\Message\ResponseInterface; use Symfony\Component\HttpFoundation\Response; use Ergonode\ExporterShopware6\Domain\Entity\Shopware6Channel; class Shopware6Connector { private Configurator $configurator; private ?string $token; /** * @var \DateTimeInterface */ private $expiresAt; public function __construct(Configurator $configurator) { $this->configurator = $configurator; $this->token = null; $this->expiresAt = new \DateTimeImmutable(); } /** * @return object|string|null * * @throws /Exception */ public function execute(Shopware6Channel $channel, ActionInterface $action) { if ($this->token === null || $this->expiresAt <= (new \DateTime())) { $this->requestToken($channel); } return $this->request($channel, $action); } /** * @return array|object|string|null * * @throws GuzzleException */ private function request(Shopware6Channel $channel, ActionInterface $action) { try { $config = [ 'base_uri' => $channel->getHost(), ]; $this->configurator->configure($action, $this->token); $client = new Client($config); $response = $client->send($action->getRequest()); $contents = $this->resolveResponse($response); return $action->parseContent($contents); } catch (GuzzleException $exception) { //todo log throw $exception; } catch (\Exception $exception) { //todo log throw $exception; } } /** * @throws GuzzleException */ private function requestToken(Shopware6Channel $channel): void { $post = new PostAccessToken($channel); $data = $this->request($channel, $post); $this->token = $data['access_token']; $this->expiresAt = $this->calculateExpiryTime((int) $data['expires_in']); } private function calculateExpiryTime(int $expiresIn): \DateTimeInterface { $expiryTimestamp = (new \DateTime())->getTimestamp() + $expiresIn; return (new \DateTimeImmutable())->setTimestamp($expiryTimestamp); } private function resolveResponse(ResponseInterface $response): ?string { $statusCode = $response->getStatusCode(); $contents = $response->getBody()->getContents(); switch ($statusCode) { case Response::HTTP_OK: case Response::HTTP_CREATED: case Response::HTTP_ACCEPTED: return $contents; case Response::HTTP_NO_CONTENT: return null; } throw new \RuntimeException(sprintf('Unsupported response status "%s" ', $statusCode)); } }
1
9,029
Should we use `\DateTimeInterface` or `DateTimeInterface` and declaration of `DateTimeInterface` in `use`?
ergonode-backend
php
@@ -7,11 +7,15 @@ import ( "errors" "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation" "github.com/aws/copilot-cli/internal/pkg/deploy" + "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack" ) +const taskStackPrefix = "task-" + // DeployTask deploys a task stack and waits until the deployment is done. // If the task stack doesn't exist, then it creates the stack. // If the task stack already exists, it updates the stack.
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cloudformation import ( "errors" "fmt" "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation" "github.com/aws/copilot-cli/internal/pkg/deploy" "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack" ) // DeployTask deploys a task stack and waits until the deployment is done. // If the task stack doesn't exist, then it creates the stack. // If the task stack already exists, it updates the stack. // If the task stack doesn't have any changes, it returns nil func (cf CloudFormation) DeployTask(input *deploy.CreateTaskResourcesInput, opts ...cloudformation.StackOption) error { conf := stack.NewTaskStackConfig(input) stack, err := toStack(conf) if err != nil { return err } for _, opt := range opts { opt(stack) } err = cf.cfnClient.CreateAndWait(stack) if err == nil { return nil } var errAlreadyExists *cloudformation.ErrStackAlreadyExists if !errors.As(err, &errAlreadyExists) { return fmt.Errorf("create stack: %w", err) } err = cf.cfnClient.UpdateAndWait(stack) if err == nil { return nil } var errChangeSetEmpty *cloudformation.ErrChangeSetEmpty if !errors.As(err, &errChangeSetEmpty) { return fmt.Errorf("update stack: %w", err) } return nil }
1
16,017
What do you think of moving this stack related constant to the `stack` pkg?
aws-copilot-cli
go
@@ -100,6 +100,9 @@ func TxnPool(s *transactions.SignedTxn, ctx Context, verificationPool execpool.B if s.Txn.Src() == zeroAddress { return errors.New("empty address") } + if !proto.SupportRekeying && (s.AuthAddr != basics.Address{}) { + return errors.New("nonempty AuthAddr but rekeying not supported") + } outCh := make(chan error, 1) cx := asyncVerifyContext{s: s, outCh: outCh, ctx: &ctx}
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package verify import ( "context" "encoding/binary" "errors" "fmt" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/execpool" "github.com/algorand/go-algorand/util/metrics" ) var logicGoodTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_ledger_logic_ok", Description: "Total transaction scripts executed and accepted"}) var logicRejTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_ledger_logic_rej", Description: "Total transaction scripts executed and rejected"}) var logicErrTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_ledger_logic_err", Description: "Total transaction scripts executed and errored"}) // Context encapsulates the context needed to perform stateless checks // on a signed transaction. type Context struct { Params Group []transactions.SignedTxn GroupIndex int } // Params is the set of parameters external to a transaction which // stateless checks are performed against. // // For efficient caching, these parameters should either be constant // or change slowly over time. // // Group data are omitted because they are committed to in the // transaction and its ID. type Params struct { CurrSpecAddrs transactions.SpecialAddresses CurrProto protocol.ConsensusVersion } // PrepareContexts prepares verification contexts for a transaction // group. func PrepareContexts(group []transactions.SignedTxn, contextHdr bookkeeping.BlockHeader) []Context { ctxs := make([]Context, len(group)) for i := range group { spec := transactions.SpecialAddresses{ FeeSink: contextHdr.FeeSink, RewardsPool: contextHdr.RewardsPool, } ctx := Context{ Params: Params{ CurrSpecAddrs: spec, CurrProto: contextHdr.CurrentProtocol, }, Group: group, GroupIndex: i, } ctxs[i] = ctx } return ctxs } // TxnPool verifies that a SignedTxn has a good signature and that the underlying // transaction is properly constructed. // Note that this does not check whether a payset is valid against the ledger: // a SignedTxn may be well-formed, but a payset might contain an overspend. // // This version of verify is performing the verification over the provided execution pool. func TxnPool(s *transactions.SignedTxn, ctx Context, verificationPool execpool.BacklogPool) error { proto, ok := config.Consensus[ctx.CurrProto] if !ok { return protocol.Error(ctx.CurrProto) } if err := s.Txn.WellFormed(ctx.CurrSpecAddrs, proto); err != nil { return err } zeroAddress := basics.Address{} if s.Txn.Src() == zeroAddress { return errors.New("empty address") } outCh := make(chan error, 1) cx := asyncVerifyContext{s: s, outCh: outCh, ctx: &ctx} verificationPool.EnqueueBacklog(context.Background(), stxnAsyncVerify, &cx, nil) if err, hasErr := <-outCh; hasErr { return err } return nil } // Txn verifies a SignedTxn as being signed and having no obviously inconsistent data. // Block-assembly time checks of LogicSig and accounting rules may still block the txn. func Txn(s *transactions.SignedTxn, ctx Context) error { proto, ok := config.Consensus[ctx.CurrProto] if !ok { return protocol.Error(ctx.CurrProto) } if err := s.Txn.WellFormed(ctx.CurrSpecAddrs, proto); err != nil { return err } zeroAddress := basics.Address{} if s.Txn.Src() == zeroAddress { return errors.New("empty address") } return stxnVerifyCore(s, &ctx) } type asyncVerifyContext struct { s *transactions.SignedTxn outCh chan error ctx *Context } func stxnAsyncVerify(arg interface{}) interface{} { cx := arg.(*asyncVerifyContext) err := stxnVerifyCore(cx.s, cx.ctx) if err != nil { cx.outCh <- err } else { close(cx.outCh) } return nil } func stxnVerifyCore(s *transactions.SignedTxn, ctx *Context) error { numSigs := 0 hasSig := false hasMsig := false hasLogicSig := false if s.Sig != (crypto.Signature{}) { numSigs++ hasSig = true } if !s.Msig.Blank() { numSigs++ hasMsig = true } if !s.Lsig.Blank() { numSigs++ hasLogicSig = true } if numSigs == 0 { return errors.New("signedtxn has no sig") } if numSigs > 1 { return errors.New("signedtxn should only have one of Sig or Msig or LogicSig") } if hasSig { if crypto.SignatureVerifier(s.Txn.Src()).Verify(s.Txn, s.Sig) { return nil } return errors.New("signature validation failed") } if hasMsig { if ok, _ := crypto.MultisigVerify(s.Txn, crypto.Digest(s.Txn.Src()), s.Msig); ok { return nil } return errors.New("multisig validation failed") } if hasLogicSig { return LogicSig(s, ctx) } return errors.New("has one mystery sig. WAT?") } // LogicSigSanityCheck checks that the signature is valid and that the program is basically well formed. // It does not evaluate the logic. func LogicSigSanityCheck(txn *transactions.SignedTxn, ctx *Context) error { lsig := txn.Lsig proto, ok := config.Consensus[ctx.CurrProto] if !ok { return protocol.Error(ctx.CurrProto) } if proto.LogicSigVersion == 0 { return errors.New("LogicSig not enabled") } if len(lsig.Logic) == 0 { return errors.New("LogicSig.Logic empty") } version, vlen := binary.Uvarint(lsig.Logic) if vlen <= 0 { return errors.New("LogicSig.Logic bad version") } if version > proto.LogicSigVersion { return errors.New("LogicSig.Logic version too new") } if uint64(lsig.Len()) > proto.LogicSigMaxSize { return errors.New("LogicSig.Logic too long") } ep := logic.EvalParams{ Txn: txn, Proto: &proto, TxnGroup: ctx.Group, GroupIndex: ctx.GroupIndex, } cost, err := logic.Check(lsig.Logic, ep) if err != nil { return err } if cost > int(proto.LogicSigMaxCost) { return fmt.Errorf("LogicSig.Logic too slow, %d > %d", cost, proto.LogicSigMaxCost) } hasMsig := false numSigs := 0 if lsig.Sig != (crypto.Signature{}) { numSigs++ } if !lsig.Msig.Blank() { hasMsig = true numSigs++ } if numSigs == 0 { // if the txn.Sender == hash(Logic) then this is a (potentially) valid operation on a contract-only account program := logic.Program(lsig.Logic) lhash := crypto.HashObj(&program) if crypto.Digest(txn.Txn.Sender) == lhash { return nil } return errors.New("LogicNot signed and not a Logic-only account") } if numSigs > 1 { return errors.New("LogicSig should only have one of Sig or Msig but has more than one") } if !hasMsig { program := logic.Program(lsig.Logic) if !crypto.SignatureVerifier(txn.Txn.Src()).Verify(&program, lsig.Sig) { return errors.New("logic signature validation failed") } } else { program := logic.Program(lsig.Logic) if ok, _ := crypto.MultisigVerify(&program, crypto.Digest(txn.Txn.Src()), lsig.Msig); !ok { return errors.New("logic multisig validation failed") } } return nil } // LogicSig checks that the signature is valid, executing the program. func LogicSig(txn *transactions.SignedTxn, ctx *Context) error { proto, ok := config.Consensus[ctx.CurrProto] if !ok { return protocol.Error(ctx.CurrProto) } err := LogicSigSanityCheck(txn, ctx) if err != nil { return err } ep := logic.EvalParams{ Txn: txn, Proto: &proto, TxnGroup: ctx.Group, GroupIndex: ctx.GroupIndex, } pass, err := logic.Eval(txn.Lsig.Logic, ep) if err != nil { logicErrTotal.Inc(nil) return fmt.Errorf("transaction %v: rejected by logic err=%v", txn.ID(), err) } if !pass { logicRejTotal.Inc(nil) return fmt.Errorf("transaction %v: rejected by logic", txn.ID()) } logicGoodTotal.Inc(nil) return nil }
1
38,028
Hm, I guess that you're doing this here since `WellFormed` is on a `transactions.Transaction` and not a `transactions.SignedTxn`, but quickly grepping through our code, it looks like we always a `SignedTxn` around when calling `WellFormed` (except maybe some tests?)... this doesn't have to happen here, but maybe we should update that in a separate PR since this really feels like a `WellFormed` check.
algorand-go-algorand
go
@@ -39,4 +39,6 @@ storiesOf( 'Global', module ) <ModulesList /> </WithTestRegistry> ); + }, { + padding: 0, } );
1
/** * Modules List stories. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import { storiesOf } from '@storybook/react'; /** * Internal dependencies */ import ModulesList from '../assets/js/components/ModulesList'; import { provideModuleRegistrations, provideModules, WithTestRegistry } from '../tests/js/utils'; storiesOf( 'Global', module ) .add( 'Modules List', () => { const setupRegistry = ( registry ) => { provideModules( registry ); provideModuleRegistrations( registry ); }; return ( <WithTestRegistry callback={ setupRegistry } > <ModulesList /> </WithTestRegistry> ); } );
1
38,265
This story also needs the default padding.
google-site-kit-wp
js
@@ -745,7 +745,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { maybeThrowDeterministicException(); } if (!LuceneTestCase.slowFileExists(in, name)) { - throw randomState.nextBoolean() ? new FileNotFoundException(name + " in dir=" + in) : new NoSuchFileException(name + " in dir=" + in); + throw new NoSuchFileException(name + " in dir=" + in); } // cannot open a file for input if it's still open for output.
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.store; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.AccessDeniedException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoDeletionPolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.apache.lucene.util.ThrottledIndexOutput; /** * This is a Directory Wrapper that adds methods * intended to be used only by unit tests. * It also adds a number of features useful for testing: * <ul> * <li> Instances created by {@link LuceneTestCase#newDirectory()} are tracked * to ensure they are closed by the test. * <li> When a MockDirectoryWrapper is closed, it will throw an exception if * it has any open files against it (with a stacktrace indicating where * they were opened from). * <li> When a MockDirectoryWrapper is closed, it runs CheckIndex to test if * the index was corrupted. * <li> MockDirectoryWrapper simulates some "features" of Windows, such as * refusing to write/delete to open files. * </ul> */ public class MockDirectoryWrapper extends BaseDirectoryWrapper { long maxSize; // Max actual bytes used. This is set by MockRAMOutputStream: long maxUsedSize; double randomIOExceptionRate; double randomIOExceptionRateOnOpen; Random randomState; boolean assertNoDeleteOpenFile = false; boolean trackDiskUsage = false; boolean useSlowOpenClosers = LuceneTestCase.TEST_NIGHTLY; boolean allowRandomFileNotFoundException = true; boolean allowReadingFilesStillOpenForWrite = false; private Set<String> unSyncedFiles; private Set<String> createdFiles; private Set<String> openFilesForWrite = new HashSet<>(); ConcurrentMap<String,RuntimeException> openLocks = new ConcurrentHashMap<>(); volatile boolean crashed; private ThrottledIndexOutput throttledOutput; private Throttling throttling = LuceneTestCase.TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER; // for testing boolean alwaysCorrupt; final AtomicInteger inputCloneCount = new AtomicInteger(); // use this for tracking files for crash. // additionally: provides debugging information in case you leave one open private Map<Closeable,Exception> openFileHandles = Collections.synchronizedMap(new IdentityHashMap<Closeable,Exception>()); // NOTE: we cannot initialize the Map here due to the // order in which our constructor actually does this // member initialization vs when it calls super. It seems // like super is called, then our members are initialized: private Map<String,Integer> openFiles; // Only tracked if noDeleteOpenFile is true: if an attempt // is made to delete an open file, we enroll it here. private Set<String> openFilesDeleted; private synchronized void init() { if (openFiles == null) { openFiles = new HashMap<>(); openFilesDeleted = new HashSet<>(); } if (createdFiles == null) createdFiles = new HashSet<>(); if (unSyncedFiles == null) unSyncedFiles = new HashSet<>(); } public MockDirectoryWrapper(Random random, Directory delegate) { super(delegate); // must make a private random since our methods are // called from different threads; else test failures may // not be reproducible from the original seed this.randomState = new Random(random.nextInt()); this.throttledOutput = new ThrottledIndexOutput(ThrottledIndexOutput .mBitsToBytes(40 + randomState.nextInt(10)), 1 + randomState.nextInt(5), null); init(); } public int getInputCloneCount() { return inputCloneCount.get(); } boolean verboseClone; /** * If set to true, we print a fake exception * with filename and stacktrace on every indexinput clone() */ public void setVerboseClone(boolean v) { verboseClone = v; } public void setTrackDiskUsage(boolean v) { trackDiskUsage = v; } /** If set to true (the default), when we throw random * IOException on openInput or createOutput, we may * sometimes throw FileNotFoundException or * NoSuchFileException. */ public void setAllowRandomFileNotFoundException(boolean value) { allowRandomFileNotFoundException = value; } /** If set to true, you can open an inputstream on a file * that is still open for writes. */ public void setAllowReadingFilesStillOpenForWrite(boolean value) { allowReadingFilesStillOpenForWrite = value; } /** * Enum for controlling hard disk throttling. * Set via {@link MockDirectoryWrapper #setThrottling(Throttling)} * <p> * WARNING: can make tests very slow. */ public static enum Throttling { /** always emulate a slow hard disk. could be very slow! */ ALWAYS, /** sometimes (0.5% of the time) emulate a slow hard disk. */ SOMETIMES, /** never throttle output */ NEVER } public void setThrottling(Throttling throttling) { this.throttling = throttling; } /** * Add a rare small sleep to catch race conditions in open/close * <p> * You can enable this if you need it. */ public void setUseSlowOpenClosers(boolean v) { useSlowOpenClosers = v; } @Override public synchronized void sync(Collection<String> names) throws IOException { maybeYield(); maybeThrowDeterministicException(); if (crashed) { throw new IOException("cannot sync after crash"); } // always pass thru fsync, directories rely on this. // 90% of time, we use DisableFsyncFS which omits the real calls. for (String name : names) { // randomly fail with IOE on any file maybeThrowIOException(name); in.sync(Collections.singleton(name)); unSyncedFiles.remove(name); } } @Override public synchronized void rename(String source, String dest) throws IOException { maybeYield(); maybeThrowDeterministicException(); if (crashed) { throw new IOException("cannot rename after crash"); } if (openFiles.containsKey(source) && assertNoDeleteOpenFile) { throw fillOpenTrace(new AssertionError("MockDirectoryWrapper: source file \"" + source + "\" is still open: cannot rename"), source, true); } if (openFiles.containsKey(dest) && assertNoDeleteOpenFile) { throw fillOpenTrace(new AssertionError("MockDirectoryWrapper: dest file \"" + dest + "\" is still open: cannot rename"), dest, true); } boolean success = false; try { in.rename(source, dest); success = true; } finally { if (success) { // we don't do this stuff with lucene's commit, but it's just for completeness if (unSyncedFiles.contains(source)) { unSyncedFiles.remove(source); unSyncedFiles.add(dest); } openFilesDeleted.remove(source); createdFiles.remove(source); createdFiles.add(dest); } } } @Override public synchronized void syncMetaData() throws IOException { maybeYield(); maybeThrowDeterministicException(); if (crashed) { throw new IOException("cannot sync metadata after crash"); } in.syncMetaData(); } public synchronized final long sizeInBytes() throws IOException { long size = 0; for (String file : in.listAll()) { // hack 2: see TODO in ExtrasFS (ideally it would always return 0 byte // size for extras it creates, even though the size of non-regular files is not defined) if (!file.startsWith("extra")) { size += in.fileLength(file); } } return size; } public synchronized void corruptUnknownFiles() throws IOException { if (LuceneTestCase.VERBOSE) { System.out.println("MDW: corrupt unknown files"); } Set<String> knownFiles = new HashSet<>(); for(String fileName : listAll()) { if (fileName.startsWith(IndexFileNames.SEGMENTS)) { if (LuceneTestCase.VERBOSE) { System.out.println("MDW: read " + fileName + " to gather files it references"); } SegmentInfos infos; try { infos = SegmentInfos.readCommit(this, fileName); } catch (IOException ioe) { if (LuceneTestCase.VERBOSE) { System.out.println("MDW: exception reading segment infos " + fileName + "; files: " + Arrays.toString(listAll())); } throw ioe; } knownFiles.addAll(infos.files(true)); } } Set<String> toCorrupt = new HashSet<>(); Matcher m = IndexFileNames.CODEC_FILE_PATTERN.matcher(""); for(String fileName : listAll()) { m.reset(fileName); if (knownFiles.contains(fileName) == false && fileName.endsWith("write.lock") == false && (m.matches() || fileName.startsWith(IndexFileNames.PENDING_SEGMENTS))) { toCorrupt.add(fileName); } } corruptFiles(toCorrupt); } public synchronized void corruptFiles(Collection<String> files) throws IOException { boolean disabled = TestUtil.disableVirusChecker(in); try { _corruptFiles(files); } finally { if (disabled) { TestUtil.enableVirusChecker(in); } } } private synchronized void _corruptFiles(Collection<String> files) throws IOException { // TODO: we should also mess with any recent file renames, file deletions, if // syncMetaData was not called!! // Must make a copy because we change the incoming unsyncedFiles // when we create temp files, delete, etc., below: final List<String> filesToCorrupt = new ArrayList<>(files); // sort the files otherwise we have reproducibility issues // across JVMs if the incoming collection is a hashSet etc. CollectionUtil.timSort(filesToCorrupt); for(String name : filesToCorrupt) { int damage = randomState.nextInt(6); if (alwaysCorrupt && damage == 3) { damage = 4; } String action = null; switch(damage) { case 0: action = "deleted"; deleteFile(name); break; case 1: action = "zeroed"; // Zero out file entirely long length; try { length = fileLength(name); } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } // Delete original and write zeros back: deleteFile(name); byte[] zeroes = new byte[256]; long upto = 0; try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState))) { while(upto < length) { final int limit = (int) Math.min(length-upto, zeroes.length); out.writeBytes(zeroes, 0, limit); upto += limit; } } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } break; case 2: { action = "partially truncated"; // Partially Truncate the file: // First, make temp file and copy only half this // file over: String tempFileName = null; try (IndexOutput tempOut = in.createTempOutput("name", "mdw_corrupt", LuceneTestCase.newIOContext(randomState)); IndexInput ii = in.openInput(name, LuceneTestCase.newIOContext(randomState))) { tempFileName = tempOut.getName(); tempOut.copyBytes(ii, ii.length()/2); } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } // Delete original and copy bytes back: deleteFile(name); try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState)); IndexInput ii = in.openInput(tempFileName, LuceneTestCase.newIOContext(randomState))) { out.copyBytes(ii, ii.length()); } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } deleteFile(tempFileName); } break; case 3: // The file survived intact: action = "didn't change"; break; case 4: // Corrupt one bit randomly in the file: { String tempFileName = null; try (IndexOutput tempOut = in.createTempOutput("name", "mdw_corrupt", LuceneTestCase.newIOContext(randomState)); IndexInput ii = in.openInput(name, LuceneTestCase.newIOContext(randomState))) { tempFileName = tempOut.getName(); if (ii.length() > 0) { // Copy first part unchanged: long byteToCorrupt = (long) (randomState.nextDouble() * ii.length()); if (byteToCorrupt > 0) { tempOut.copyBytes(ii, byteToCorrupt); } // Randomly flip one bit from this byte: byte b = ii.readByte(); int bitToFlip = randomState.nextInt(8); b = (byte) (b ^ (1 << bitToFlip)); tempOut.writeByte(b); action = "flip bit " + bitToFlip + " of byte " + byteToCorrupt + " out of " + ii.length() + " bytes"; // Copy last part unchanged: long bytesLeft = ii.length() - byteToCorrupt - 1; if (bytesLeft > 0) { tempOut.copyBytes(ii, bytesLeft); } } else { action = "didn't change"; } } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } // Delete original and copy bytes back: deleteFile(name); try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState)); IndexInput ii = in.openInput(tempFileName, LuceneTestCase.newIOContext(randomState))) { out.copyBytes(ii, ii.length()); } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } deleteFile(tempFileName); } break; case 5: action = "fully truncated"; // Totally truncate the file to zero bytes deleteFile(name); try (IndexOutput out = in.createOutput(name, LuceneTestCase.newIOContext(randomState))) { out.getFilePointer(); // just fake access to prevent compiler warning } catch (IOException ioe) { throw new RuntimeException("hit unexpected IOException while trying to corrupt file " + name, ioe); } break; default: throw new AssertionError(); } if (LuceneTestCase.VERBOSE) { System.out.println("MockDirectoryWrapper: " + action + " unsynced file: " + name); } } } /** Simulates a crash of OS or machine by overwriting * unsynced files. */ public synchronized void crash() throws IOException { openFiles = new HashMap<>(); openFilesForWrite = new HashSet<>(); openFilesDeleted = new HashSet<>(); // first force-close all files, so we can corrupt on windows etc. // clone the file map, as these guys want to remove themselves on close. Map<Closeable,Exception> m = new IdentityHashMap<>(openFileHandles); for (Closeable f : m.keySet()) { try { f.close(); } catch (Exception ignored) {} } corruptFiles(unSyncedFiles); crashed = true; unSyncedFiles = new HashSet<>(); } public synchronized void clearCrash() { crashed = false; openLocks.clear(); } public void setMaxSizeInBytes(long maxSize) { this.maxSize = maxSize; } public long getMaxSizeInBytes() { return this.maxSize; } /** * Returns the peek actual storage used (bytes) in this * directory. */ public long getMaxUsedSizeInBytes() { return this.maxUsedSize; } public void resetMaxUsedSizeInBytes() throws IOException { this.maxUsedSize = sizeInBytes(); } /** * Trip a test assert if there is an attempt * to delete an open file. */ public void setAssertNoDeleteOpenFile(boolean value) { this.assertNoDeleteOpenFile = value; } public boolean getAssertNoDeleteOpenFile() { return assertNoDeleteOpenFile; } /** * If 0.0, no exceptions will be thrown. Else this should * be a double 0.0 - 1.0. We will randomly throw an * IOException on the first write to an OutputStream based * on this probability. */ public void setRandomIOExceptionRate(double rate) { randomIOExceptionRate = rate; } public double getRandomIOExceptionRate() { return randomIOExceptionRate; } /** * If 0.0, no exceptions will be thrown during openInput * and createOutput. Else this should * be a double 0.0 - 1.0 and we will randomly throw an * IOException in openInput and createOutput with * this probability. */ public void setRandomIOExceptionRateOnOpen(double rate) { randomIOExceptionRateOnOpen = rate; } public double getRandomIOExceptionRateOnOpen() { return randomIOExceptionRateOnOpen; } void maybeThrowIOException(String message) throws IOException { if (randomState.nextDouble() < randomIOExceptionRate) { IOException ioe = new IOException("a random IOException" + (message == null ? "" : " (" + message + ")")); if (LuceneTestCase.VERBOSE) { System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception" + (message == null ? "" : " (" + message + ")")); ioe.printStackTrace(System.out); } throw ioe; } } void maybeThrowIOExceptionOnOpen(String name) throws IOException { if (randomState.nextDouble() < randomIOExceptionRateOnOpen) { if (LuceneTestCase.VERBOSE) { System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception during open file=" + name); new Throwable().printStackTrace(System.out); } if (allowRandomFileNotFoundException == false || randomState.nextBoolean()) { throw new IOException("a random IOException (" + name + ")"); } else { throw randomState.nextBoolean() ? new FileNotFoundException("a random IOException (" + name + ")") : new NoSuchFileException("a random IOException (" + name + ")"); } } } /** returns current open file handle count */ public synchronized long getFileHandleCount() { return openFileHandles.size(); } @Override public synchronized void deleteFile(String name) throws IOException { maybeYield(); maybeThrowDeterministicException(); if (crashed) { throw new IOException("cannot delete after crash"); } if (openFiles.containsKey(name)) { openFilesDeleted.add(name); if (assertNoDeleteOpenFile) { throw fillOpenTrace(new IOException("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot delete"), name, true); } } else { openFilesDeleted.remove(name); } unSyncedFiles.remove(name); in.deleteFile(name); createdFiles.remove(name); } // sets the cause of the incoming ioe to be the stack // trace when the offending file name was opened private synchronized <T extends Throwable> T fillOpenTrace(T t, String name, boolean input) { for(Map.Entry<Closeable,Exception> ent : openFileHandles.entrySet()) { if (input && ent.getKey() instanceof MockIndexInputWrapper && ((MockIndexInputWrapper) ent.getKey()).name.equals(name)) { t.initCause(ent.getValue()); break; } else if (!input && ent.getKey() instanceof MockIndexOutputWrapper && ((MockIndexOutputWrapper) ent.getKey()).name.equals(name)) { t.initCause(ent.getValue()); break; } } return t; } private void maybeYield() { if (randomState.nextBoolean()) { Thread.yield(); } } public synchronized Set<String> getOpenDeletedFiles() { return new HashSet<>(openFilesDeleted); } private boolean failOnCreateOutput = true; public void setFailOnCreateOutput(boolean v) { failOnCreateOutput = v; } @Override public synchronized IndexOutput createOutput(String name, IOContext context) throws IOException { maybeThrowDeterministicException(); maybeThrowIOExceptionOnOpen(name); maybeYield(); if (failOnCreateOutput) { maybeThrowDeterministicException(); } if (crashed) { throw new IOException("cannot createOutput after crash"); } init(); if (createdFiles.contains(name)) { throw new FileAlreadyExistsException("File \"" + name + "\" was already written to."); } if (assertNoDeleteOpenFile && openFiles.containsKey(name)) { throw new AssertionError("MockDirectoryWrapper: file \"" + name + "\" is still open: cannot overwrite"); } unSyncedFiles.add(name); createdFiles.add(name); //System.out.println(Thread.currentThread().getName() + ": MDW: create " + name); IndexOutput delegateOutput = in.createOutput(name, LuceneTestCase.newIOContext(randomState, context)); final IndexOutput io = new MockIndexOutputWrapper(this, delegateOutput, name); addFileHandle(io, name, Handle.Output); openFilesForWrite.add(name); return maybeThrottle(name, io); } private IndexOutput maybeThrottle(String name, IndexOutput output) { // throttling REALLY slows down tests, so don't do it very often for SOMETIMES. if (throttling == Throttling.ALWAYS || (throttling == Throttling.SOMETIMES && randomState.nextInt(200) == 0)) { if (LuceneTestCase.VERBOSE) { System.out.println("MockDirectoryWrapper: throttling indexOutput (" + name + ")"); } return throttledOutput.newFromDelegate(output); } else { return output; } } @Override public synchronized IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException { maybeThrowDeterministicException(); maybeThrowIOExceptionOnOpen("temp: prefix=" + prefix + " suffix=" + suffix); maybeYield(); if (failOnCreateOutput) { maybeThrowDeterministicException(); } if (crashed) { throw new IOException("cannot createTempOutput after crash"); } init(); IndexOutput delegateOutput = in.createTempOutput(prefix, suffix, LuceneTestCase.newIOContext(randomState, context)); String name = delegateOutput.getName(); if (name.toLowerCase(Locale.ROOT).endsWith(".tmp") == false) { throw new IllegalStateException("wrapped directory failed to use .tmp extension: got: " + name); } unSyncedFiles.add(name); createdFiles.add(name); final IndexOutput io = new MockIndexOutputWrapper(this, delegateOutput, name); addFileHandle(io, name, Handle.Output); openFilesForWrite.add(name); return maybeThrottle(name, io); } private static enum Handle { Input, Output, Slice } synchronized void addFileHandle(Closeable c, String name, Handle handle) { Integer v = openFiles.get(name); if (v != null) { v = Integer.valueOf(v.intValue()+1); openFiles.put(name, v); } else { openFiles.put(name, Integer.valueOf(1)); } openFileHandles.put(c, new RuntimeException("unclosed Index" + handle.name() + ": " + name)); } private boolean failOnOpenInput = true; public void setFailOnOpenInput(boolean v) { failOnOpenInput = v; } @Override public synchronized IndexInput openInput(String name, IOContext context) throws IOException { maybeThrowDeterministicException(); maybeThrowIOExceptionOnOpen(name); maybeYield(); if (failOnOpenInput) { maybeThrowDeterministicException(); } if (!LuceneTestCase.slowFileExists(in, name)) { throw randomState.nextBoolean() ? new FileNotFoundException(name + " in dir=" + in) : new NoSuchFileException(name + " in dir=" + in); } // cannot open a file for input if it's still open for output. if (!allowReadingFilesStillOpenForWrite && openFilesForWrite.contains(name)) { throw fillOpenTrace(new AccessDeniedException("MockDirectoryWrapper: file \"" + name + "\" is still open for writing"), name, false); } IndexInput delegateInput = in.openInput(name, LuceneTestCase.newIOContext(randomState, context)); final IndexInput ii; int randomInt = randomState.nextInt(500); if (useSlowOpenClosers && randomInt == 0) { if (LuceneTestCase.VERBOSE) { System.out.println("MockDirectoryWrapper: using SlowClosingMockIndexInputWrapper for file " + name); } ii = new SlowClosingMockIndexInputWrapper(this, name, delegateInput); } else if (useSlowOpenClosers && randomInt == 1) { if (LuceneTestCase.VERBOSE) { System.out.println("MockDirectoryWrapper: using SlowOpeningMockIndexInputWrapper for file " + name); } ii = new SlowOpeningMockIndexInputWrapper(this, name, delegateInput); } else { ii = new MockIndexInputWrapper(this, name, delegateInput, null); } addFileHandle(ii, name, Handle.Input); return ii; } // NOTE: This is off by default; see LUCENE-5574 private volatile boolean assertNoUnreferencedFilesOnClose; public void setAssertNoUnrefencedFilesOnClose(boolean v) { assertNoUnreferencedFilesOnClose = v; } @Override public synchronized void close() throws IOException { if (isOpen) { isOpen = false; } else { in.close(); // but call it again on our wrapped dir return; } boolean success = false; try { // files that we tried to delete, but couldn't because readers were open. // all that matters is that we tried! (they will eventually go away) // still open when we tried to delete maybeYield(); if (openFiles == null) { openFiles = new HashMap<>(); openFilesDeleted = new HashSet<>(); } if (openFiles.size() > 0) { // print the first one as it's very verbose otherwise Exception cause = null; Iterator<Exception> stacktraces = openFileHandles.values().iterator(); if (stacktraces.hasNext()) { cause = stacktraces.next(); } // RuntimeException instead of IOException because // super() does not throw IOException currently: throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still " + openFiles.size() + " open files: " + openFiles, cause); } if (openLocks.size() > 0) { Exception cause = null; Iterator<RuntimeException> stacktraces = openLocks.values().iterator(); if (stacktraces.hasNext()) { cause = stacktraces.next(); } throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open locks: " + openLocks, cause); } randomIOExceptionRate = 0.0; randomIOExceptionRateOnOpen = 0.0; if ((getCheckIndexOnClose() || assertNoUnreferencedFilesOnClose) && DirectoryReader.indexExists(this)) { if (getCheckIndexOnClose()) { if (LuceneTestCase.VERBOSE) { System.out.println("\nNOTE: MockDirectoryWrapper: now crush"); } crash(); // corrupt any unsynced-files if (LuceneTestCase.VERBOSE) { System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex"); } TestUtil.checkIndex(this, getCrossCheckTermVectorsOnClose(), true, null); } // TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles if (assertNoUnreferencedFilesOnClose) { if (LuceneTestCase.VERBOSE) { System.out.println("MDW: now assert no unref'd files at close"); } // now look for unreferenced files: discount ones that we tried to delete but could not Set<String> allFiles = new HashSet<>(Arrays.asList(listAll())); String[] startFiles = allFiles.toArray(new String[0]); IndexWriterConfig iwc = new IndexWriterConfig(null); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); // We must do this before opening writer otherwise writer will be angry if there are pending deletions: TestUtil.disableVirusChecker(in); new IndexWriter(in, iwc).rollback(); String[] endFiles = in.listAll(); Set<String> startSet = new TreeSet<>(Arrays.asList(startFiles)); Set<String> endSet = new TreeSet<>(Arrays.asList(endFiles)); startFiles = startSet.toArray(new String[0]); endFiles = endSet.toArray(new String[0]); if (!Arrays.equals(startFiles, endFiles)) { List<String> removed = new ArrayList<>(); for(String fileName : startFiles) { if (!endSet.contains(fileName)) { removed.add(fileName); } } List<String> added = new ArrayList<>(); for(String fileName : endFiles) { if (!startSet.contains(fileName)) { added.add(fileName); } } String extras; if (removed.size() != 0) { extras = "\n\nThese files were removed: " + removed; } else { extras = ""; } if (added.size() != 0) { extras += "\n\nThese files were added (waaaaaaaaaat!): " + added; } throw new RuntimeException("unreferenced files: before delete:\n " + Arrays.toString(startFiles) + "\n after delete:\n " + Arrays.toString(endFiles) + extras); } DirectoryReader ir1 = DirectoryReader.open(this); int numDocs1 = ir1.numDocs(); ir1.close(); new IndexWriter(this, new IndexWriterConfig(null)).close(); DirectoryReader ir2 = DirectoryReader.open(this); int numDocs2 = ir2.numDocs(); ir2.close(); assert numDocs1 == numDocs2 : "numDocs changed after opening/closing IW: before=" + numDocs1 + " after=" + numDocs2; } } success = true; } finally { if (success) { IOUtils.close(in); } else { IOUtils.closeWhileHandlingException(in); } } } synchronized void removeOpenFile(Closeable c, String name) { Integer v = openFiles.get(name); // Could be null when crash() was called if (v != null) { if (v.intValue() == 1) { openFiles.remove(name); } else { v = Integer.valueOf(v.intValue()-1); openFiles.put(name, v); } } openFileHandles.remove(c); } public synchronized void removeIndexOutput(IndexOutput out, String name) { openFilesForWrite.remove(name); removeOpenFile(out, name); } public synchronized void removeIndexInput(IndexInput in, String name) { removeOpenFile(in, name); } /** * Objects that represent fail-able conditions. Objects of a derived * class are created and registered with the mock directory. After * register, each object will be invoked once for each first write * of a file, giving the object a chance to throw an IOException. */ public static class Failure { /** * eval is called on the first write of every new file. */ public void eval(MockDirectoryWrapper dir) throws IOException { } /** * reset should set the state of the failure to its default * (freshly constructed) state. Reset is convenient for tests * that want to create one failure object and then reuse it in * multiple cases. This, combined with the fact that Failure * subclasses are often anonymous classes makes reset difficult to * do otherwise. * * A typical example of use is * Failure failure = new Failure() { ... }; * ... * mock.failOn(failure.reset()) */ public Failure reset() { return this; } protected boolean doFail; public void setDoFail() { doFail = true; } public void clearDoFail() { doFail = false; } } ArrayList<Failure> failures; /** * add a Failure object to the list of objects to be evaluated * at every potential failure point */ synchronized public void failOn(Failure fail) { if (failures == null) { failures = new ArrayList<>(); } failures.add(fail); } /** * Iterate through the failures list, giving each object a * chance to throw an IOE */ synchronized void maybeThrowDeterministicException() throws IOException { if (failures != null) { for(int i = 0; i < failures.size(); i++) { try { failures.get(i).eval(this); } catch (Throwable t) { if (LuceneTestCase.VERBOSE) { System.out.println("MockDirectoryWrapper: throw exc"); t.printStackTrace(System.out); } throw IOUtils.rethrowAlways(t); } } } } @Override public synchronized String[] listAll() throws IOException { maybeYield(); return in.listAll(); } @Override public synchronized long fileLength(String name) throws IOException { maybeYield(); return in.fileLength(name); } @Override public synchronized Lock obtainLock(String name) throws IOException { maybeYield(); return super.obtainLock(name); // TODO: consider mocking locks, but not all the time, can hide bugs } /** Use this when throwing fake {@code IOException}, * e.g. from {@link MockDirectoryWrapper.Failure}. */ public static class FakeIOException extends IOException { } @Override public String toString() { if (maxSize != 0) { return "MockDirectoryWrapper(" + in + ", current=" + maxUsedSize + ",max=" + maxSize + ")"; } else { return super.toString(); } } // don't override optional methods like copyFrom: we need the default impl for things like disk // full checks. we randomly exercise "raw" directories anyway. We ensure default impls are used: @Override public final ChecksumIndexInput openChecksumInput(String name, IOContext context) throws IOException { return super.openChecksumInput(name, context); } @Override public final void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { super.copyFrom(from, src, dest, context); } @Override protected final void ensureOpen() throws AlreadyClosedException { super.ensureOpen(); } }
1
38,198
Hmm why did we remove the randomness about which (confusingly) different exception to throw here? This randomness was (is?) useful to help test that Lucene indeed catches `FNFE` and `NSFE` interchangeably.
apache-lucene-solr
java
@@ -1215,10 +1215,10 @@ ostree_repo_list_collection_refs (OstreeRepo *self, continue; } - if (match_collection_id != NULL && g_strcmp0 (match_collection_id, current_collection_id) != 0) + if (match_collection_id != NULL && g_strcmp0 (match_collection_id, remote_collection_id) != 0) continue; else - current_collection_id = remote_collection_id; + current_collection_id = remote_collection_id; } if (!glnx_opendirat (dfd_iter.fd, dent->d_name, TRUE, &subdir_fd, error))
1
/* -*- mode: C; c-file-style: "gnu"; indent-tabs-mode: nil; -*- * * Copyright (C) 2011,2013 Colin Walters <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #include "config.h" #include "ostree-core-private.h" #include "ostree-repo-private.h" #include "otutil.h" #include "ot-fs-utils.h" /* This is polymorphic in @collection_id: if non-%NULL, @refs will be treated as of * type OstreeCollectionRef ↦ checksum. Otherwise, it will be treated as of type * refspec ↦ checksum. */ static gboolean add_ref_to_set (const char *remote, const char *collection_id, int base_fd, const char *path, GHashTable *refs, GCancellable *cancellable, GError **error) { g_return_val_if_fail (remote == NULL || collection_id == NULL, FALSE); gsize len; char *contents = glnx_file_get_contents_utf8_at (base_fd, path, &len, cancellable, error); if (!contents) return FALSE; g_strchomp (contents); if (collection_id == NULL) { g_autoptr(GString) refname = g_string_new (""); if (remote) { g_string_append (refname, remote); g_string_append_c (refname, ':'); } g_string_append (refname, path); g_hash_table_insert (refs, g_string_free (g_steal_pointer (&refname), FALSE), contents); } else { g_hash_table_insert (refs, ostree_collection_ref_new (collection_id, path), contents); } return TRUE; } static gboolean write_checksum_file_at (OstreeRepo *self, int dfd, const char *name, const char *sha256, GCancellable *cancellable, GError **error) { if (!ostree_validate_checksum_string (sha256, error)) return FALSE; if (ostree_validate_checksum_string (name, NULL)) return glnx_throw (error, "Rev name '%s' looks like a checksum", name); if (!*name) return glnx_throw (error, "Invalid empty ref name"); const char *lastslash = strrchr (name, '/'); if (lastslash) { char *parent = strdupa (name); parent[lastslash - name] = '\0'; if (!glnx_shutil_mkdir_p_at (dfd, parent, 0777, cancellable, error)) return FALSE; } { size_t l = strlen (sha256); char *bufnl = alloca (l + 2); g_autoptr(GError) temp_error = NULL; memcpy (bufnl, sha256, l); bufnl[l] = '\n'; bufnl[l+1] = '\0'; if (!_ostree_repo_file_replace_contents (self, dfd, name, (guint8*)bufnl, l + 1, cancellable, &temp_error)) { if (g_error_matches (temp_error, G_IO_ERROR, G_IO_ERROR_IS_DIRECTORY)) { g_autoptr(GHashTable) refs = NULL; GHashTableIter hashiter; gpointer hashkey, hashvalue; g_clear_error (&temp_error); /* FIXME: Conflict detection needs to be extended to collection–refs * using ostree_repo_list_collection_refs(). */ if (!ostree_repo_list_refs (self, name, &refs, cancellable, error)) return FALSE; g_hash_table_iter_init (&hashiter, refs); while ((g_hash_table_iter_next (&hashiter, &hashkey, &hashvalue))) { if (strcmp (name, (char *)hashkey) != 0) return glnx_throw (error, "Conflict: %s exists under %s when attempting write", (char*)hashkey, name); } if (!glnx_shutil_rm_rf_at (dfd, name, cancellable, error)) return FALSE; if (!_ostree_repo_file_replace_contents (self, dfd, name, (guint8*)bufnl, l + 1, cancellable, error)) return FALSE; } else { g_propagate_error (error, g_steal_pointer (&temp_error)); return FALSE; } } } return TRUE; } static gboolean find_ref_in_remotes (OstreeRepo *self, const char *rev, int *out_fd, GError **error) { g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; glnx_fd_close int ret_fd = -1; if (!glnx_dirfd_iterator_init_at (self->repo_dir_fd, "refs/remotes", TRUE, &dfd_iter, error)) return FALSE; while (TRUE) { struct dirent *dent = NULL; glnx_fd_close int remote_dfd = -1; if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dfd_iter, &dent, NULL, error)) return FALSE; if (dent == NULL) break; if (dent->d_type != DT_DIR) continue; if (!glnx_opendirat (dfd_iter.fd, dent->d_name, TRUE, &remote_dfd, error)) return FALSE; if (!ot_openat_ignore_enoent (remote_dfd, rev, &ret_fd, error)) return FALSE; if (ret_fd != -1) break; } *out_fd = ret_fd; ret_fd = -1; return TRUE; } static gboolean resolve_refspec (OstreeRepo *self, const char *remote, const char *ref, gboolean allow_noent, gboolean fallback_remote, char **out_rev, GError **error); static gboolean resolve_refspec_fallback (OstreeRepo *self, const char *remote, const char *ref, gboolean allow_noent, gboolean fallback_remote, char **out_rev, GCancellable *cancellable, GError **error) { g_autofree char *ret_rev = NULL; if (self->parent_repo) { if (!resolve_refspec (self->parent_repo, remote, ref, allow_noent, fallback_remote, &ret_rev, error)) return FALSE; } else if (!allow_noent) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND, "Refspec '%s%s%s' not found", remote ? remote : "", remote ? ":" : "", ref); return FALSE; } ot_transfer_out_value (out_rev, &ret_rev); return TRUE; } static gboolean resolve_refspec (OstreeRepo *self, const char *remote, const char *ref, gboolean allow_noent, gboolean fallback_remote, char **out_rev, GError **error) { __attribute__((unused)) GCancellable *cancellable = NULL; g_autofree char *ret_rev = NULL; glnx_fd_close int target_fd = -1; g_return_val_if_fail (ref != NULL, FALSE); /* We intentionally don't allow a ref that looks like a checksum */ if (ostree_validate_checksum_string (ref, NULL)) { ret_rev = g_strdup (ref); } else if (remote != NULL) { const char *remote_ref = glnx_strjoina ("refs/remotes/", remote, "/", ref); if (!ot_openat_ignore_enoent (self->repo_dir_fd, remote_ref, &target_fd, error)) return FALSE; } else { const char *local_ref = glnx_strjoina ("refs/heads/", ref); if (!ot_openat_ignore_enoent (self->repo_dir_fd, local_ref, &target_fd, error)) return FALSE; if (target_fd == -1 && fallback_remote) { local_ref = glnx_strjoina ("refs/remotes/", ref); if (!ot_openat_ignore_enoent (self->repo_dir_fd, local_ref, &target_fd, error)) return FALSE; if (target_fd == -1) { if (!find_ref_in_remotes (self, ref, &target_fd, error)) return FALSE; } } } if (target_fd != -1) { ret_rev = glnx_fd_readall_utf8 (target_fd, NULL, NULL, error); if (!ret_rev) { g_prefix_error (error, "Couldn't open ref '%s': ", ref); return FALSE; } g_strchomp (ret_rev); if (!ostree_validate_checksum_string (ret_rev, error)) return FALSE; } else { if (!resolve_refspec_fallback (self, remote, ref, allow_noent, fallback_remote, &ret_rev, cancellable, error)) return FALSE; } ot_transfer_out_value (out_rev, &ret_rev); return TRUE; } /** * ostree_repo_resolve_partial_checksum: * @self: Repo * @refspec: A refspec * @full_checksum (out) (transfer full): A full checksum corresponding to the truncated ref given * @error: Error * * Look up the existing refspec checksums. If the given ref is a unique truncated beginning * of a valid checksum it will return that checksum in the parameter @full_checksum */ static gboolean ostree_repo_resolve_partial_checksum (OstreeRepo *self, const char *refspec, char **full_checksum, GError **error) { static const char hexchars[] = "0123456789abcdef"; g_autofree char *ret_rev = NULL; g_return_val_if_fail (error == NULL || *error == NULL, FALSE); /* If the input is longer than OSTREE_SHA256_STRING_LEN chars or contains non-hex chars, don't bother looking for it as an object */ const gsize off = strspn (refspec, hexchars); if (off > OSTREE_SHA256_STRING_LEN || refspec[off] != '\0') return TRUE; /* this looks through all objects and adds them to the ref_list if: a) they are a commit object AND b) the obj checksum starts with the partual checksum defined by "refspec" */ g_autoptr(GHashTable) ref_list = NULL; if (!ostree_repo_list_commit_objects_starting_with (self, refspec, &ref_list, NULL, error)) return FALSE; guint length = g_hash_table_size (ref_list); GHashTableIter hashiter; gpointer key, value; GVariant *first_commit = NULL; g_hash_table_iter_init (&hashiter, ref_list); if (g_hash_table_iter_next (&hashiter, &key, &value)) first_commit = (GVariant*) key; OstreeObjectType objtype; const char *checksum = NULL; if (first_commit) ostree_object_name_deserialize (first_commit, &checksum, &objtype); /* length more than one - multiple commits match partial refspec: is not unique */ if (length > 1) return glnx_throw (error, "Refspec %s not unique", refspec); /* length is 1 - a single matching commit gives us our revision */ else if (length == 1) ret_rev = g_strdup (checksum); /* Note: if length is 0, then code will return TRUE because there is no error, but it will return full_checksum = NULL to signal to continue parsing */ ot_transfer_out_value (full_checksum, &ret_rev); return TRUE; } static gboolean _ostree_repo_resolve_rev_internal (OstreeRepo *self, const char *refspec, gboolean allow_noent, gboolean fallback_remote, char **out_rev, GError **error) { g_autofree char *ret_rev = NULL; g_return_val_if_fail (refspec != NULL, FALSE); if (ostree_validate_checksum_string (refspec, NULL)) { ret_rev = g_strdup (refspec); } else if (!ostree_repo_resolve_partial_checksum (self, refspec, &ret_rev, error)) return FALSE; if (!ret_rev) { if (error != NULL && *error != NULL) return FALSE; if (g_str_has_suffix (refspec, "^")) { g_autofree char *parent_refspec = NULL; g_autofree char *parent_rev = NULL; g_autoptr(GVariant) commit = NULL; parent_refspec = g_strdup (refspec); parent_refspec[strlen(parent_refspec) - 1] = '\0'; if (!ostree_repo_resolve_rev (self, parent_refspec, allow_noent, &parent_rev, error)) return FALSE; if (!ostree_repo_load_variant (self, OSTREE_OBJECT_TYPE_COMMIT, parent_rev, &commit, error)) return FALSE; if (!(ret_rev = ostree_commit_get_parent (commit))) return glnx_throw (error, "Commit %s has no parent", parent_rev); } else { g_autofree char *remote = NULL; g_autofree char *ref = NULL; if (!ostree_parse_refspec (refspec, &remote, &ref, error)) return FALSE; if (!resolve_refspec (self, remote, ref, allow_noent, fallback_remote, &ret_rev, error)) return FALSE; } } ot_transfer_out_value (out_rev, &ret_rev); return TRUE; } /** * ostree_repo_resolve_rev: * @self: Repo * @refspec: A refspec * @allow_noent: Do not throw an error if refspec does not exist * @out_rev: (out) (transfer full): A checksum,or %NULL if @allow_noent is true and it does not exist * @error: Error * * Look up the given refspec, returning the checksum it references in * the parameter @out_rev. Will fall back on remote directory if cannot * find the given refspec in local. */ gboolean ostree_repo_resolve_rev (OstreeRepo *self, const char *refspec, gboolean allow_noent, char **out_rev, GError **error) { return _ostree_repo_resolve_rev_internal (self, refspec, allow_noent, TRUE, out_rev, error); } /** * ostree_repo_resolve_rev_ext: * @self: Repo * @refspec: A refspec * @allow_noent: Do not throw an error if refspec does not exist * @flags: Options controlling behavior * @out_rev: (out) (transfer full): A checksum,or %NULL if @allow_noent is true and it does not exist * @error: Error * * Look up the given refspec, returning the checksum it references in * the parameter @out_rev. Differently from ostree_repo_resolve_rev(), * this will not fall back to searching through remote repos if a * local ref is specified but not found. */ gboolean ostree_repo_resolve_rev_ext (OstreeRepo *self, const char *refspec, gboolean allow_noent, OstreeRepoResolveRevExtFlags flags, char **out_rev, GError **error) { return _ostree_repo_resolve_rev_internal (self, refspec, allow_noent, FALSE, out_rev, error); } static gboolean enumerate_refs_recurse (OstreeRepo *repo, const char *remote, OstreeRepoListRefsExtFlags flags, const char *collection_id, int base_dfd, GString *base_path, int child_dfd, const char *path, GHashTable *refs, GCancellable *cancellable, GError **error) { g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; const gboolean aliases_only = (flags & OSTREE_REPO_LIST_REFS_EXT_ALIASES) > 0; if (!glnx_dirfd_iterator_init_at (child_dfd, path, FALSE, &dfd_iter, error)) return FALSE; while (TRUE) { guint len = base_path->len; struct dirent *dent = NULL; if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dfd_iter, &dent, cancellable, error)) return FALSE; if (dent == NULL) break; g_string_append (base_path, dent->d_name); if (dent->d_type == DT_DIR) { g_string_append_c (base_path, '/'); if (!enumerate_refs_recurse (repo, remote, flags, collection_id, base_dfd, base_path, dfd_iter.fd, dent->d_name, refs, cancellable, error)) return FALSE; } else { if (aliases_only && dent->d_type == DT_LNK) { g_autofree char *target = glnx_readlinkat_malloc (base_dfd, base_path->str, cancellable, error); const char *resolved_target = target; if (!target) return FALSE; while (g_str_has_prefix (resolved_target, "../")) resolved_target += 3; g_hash_table_insert (refs, g_strdup (base_path->str), g_strdup (resolved_target)); } else if ((!aliases_only && dent->d_type == DT_REG) || dent->d_type == DT_LNK) { if (!add_ref_to_set (remote, collection_id, base_dfd, base_path->str, refs, cancellable, error)) return FALSE; } } g_string_truncate (base_path, len); } return TRUE; } static gboolean _ostree_repo_list_refs_internal (OstreeRepo *self, gboolean cut_prefix, OstreeRepoListRefsExtFlags flags, const char *refspec_prefix, GHashTable **out_all_refs, GCancellable *cancellable, GError **error) { g_autoptr(GHashTable) ret_all_refs = NULL; g_autofree char *remote = NULL; g_autofree char *ref_prefix = NULL; ret_all_refs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); if (refspec_prefix) { struct stat stbuf; const char *prefix_path; const char *path; if (!ostree_parse_refspec (refspec_prefix, &remote, &ref_prefix, error)) return FALSE; if (!(flags & OSTREE_REPO_LIST_REFS_EXT_EXCLUDE_REMOTES) && remote) { prefix_path = glnx_strjoina ("refs/remotes/", remote, "/"); path = glnx_strjoina (prefix_path, ref_prefix); } else { prefix_path = "refs/heads/"; path = glnx_strjoina (prefix_path, ref_prefix); } if (fstatat (self->repo_dir_fd, path, &stbuf, 0) < 0) { if (errno != ENOENT) return glnx_throw_errno (error); } else { if (S_ISDIR (stbuf.st_mode)) { glnx_fd_close int base_fd = -1; g_autoptr(GString) base_path = g_string_new (""); if (!cut_prefix) g_string_printf (base_path, "%s/", ref_prefix); if (!glnx_opendirat (self->repo_dir_fd, cut_prefix ? path : prefix_path, TRUE, &base_fd, error)) return FALSE; if (!enumerate_refs_recurse (self, remote, flags, NULL, base_fd, base_path, base_fd, cut_prefix ? "." : ref_prefix, ret_all_refs, cancellable, error)) return FALSE; } else { glnx_fd_close int prefix_dfd = -1; if (!glnx_opendirat (self->repo_dir_fd, prefix_path, TRUE, &prefix_dfd, error)) return FALSE; if (!add_ref_to_set (remote, NULL, prefix_dfd, ref_prefix, ret_all_refs, cancellable, error)) return FALSE; } } } else { g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; g_autoptr(GString) base_path = g_string_new (""); glnx_fd_close int refs_heads_dfd = -1; if (!glnx_opendirat (self->repo_dir_fd, "refs/heads", TRUE, &refs_heads_dfd, error)) return FALSE; if (!enumerate_refs_recurse (self, NULL, flags, NULL, refs_heads_dfd, base_path, refs_heads_dfd, ".", ret_all_refs, cancellable, error)) return FALSE; if (!(flags & OSTREE_REPO_LIST_REFS_EXT_EXCLUDE_REMOTES)) { g_string_truncate (base_path, 0); if (!glnx_dirfd_iterator_init_at (self->repo_dir_fd, "refs/remotes", TRUE, &dfd_iter, error)) return FALSE; while (TRUE) { struct dirent *dent; glnx_fd_close int remote_dfd = -1; if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dfd_iter, &dent, cancellable, error)) return FALSE; if (!dent) break; if (dent->d_type != DT_DIR) continue; if (!glnx_opendirat (dfd_iter.fd, dent->d_name, TRUE, &remote_dfd, error)) return FALSE; if (!enumerate_refs_recurse (self, dent->d_name, flags, NULL, remote_dfd, base_path, remote_dfd, ".", ret_all_refs, cancellable, error)) return FALSE; } } } ot_transfer_out_value (out_all_refs, &ret_all_refs); return TRUE; } /** * ostree_repo_list_refs: * @self: Repo * @refspec_prefix: (allow-none): Only list refs which match this prefix * @out_all_refs: (out) (element-type utf8 utf8): Mapping from ref to checksum * @cancellable: Cancellable * @error: Error * * If @refspec_prefix is %NULL, list all local and remote refspecs, * with their current values in @out_all_refs. Otherwise, only list * refspecs which have @refspec_prefix as a prefix. */ gboolean ostree_repo_list_refs (OstreeRepo *self, const char *refspec_prefix, GHashTable **out_all_refs, GCancellable *cancellable, GError **error) { return _ostree_repo_list_refs_internal (self, TRUE, OSTREE_REPO_LIST_REFS_EXT_NONE, refspec_prefix, out_all_refs, cancellable, error); } /** * ostree_repo_list_refs_ext: * @self: Repo * @refspec_prefix: (allow-none): Only list refs which match this prefix * @out_all_refs: (out) (element-type utf8 utf8): Mapping from ref to checksum * @flags: Options controlling listing behavior * @cancellable: Cancellable * @error: Error * * If @refspec_prefix is %NULL, list all local and remote refspecs, * with their current values in @out_all_refs. Otherwise, only list * refspecs which have @refspec_prefix as a prefix. Differently from * ostree_repo_list_refs(), the prefix will not be removed from the ref * name. */ gboolean ostree_repo_list_refs_ext (OstreeRepo *self, const char *refspec_prefix, GHashTable **out_all_refs, OstreeRepoListRefsExtFlags flags, GCancellable *cancellable, GError **error) { return _ostree_repo_list_refs_internal (self, FALSE, flags, refspec_prefix, out_all_refs, cancellable, error); } /** * ostree_repo_remote_list_refs: * @self: Repo * @remote_name: Name of the remote. * @out_all_refs: (out) (element-type utf8 utf8): Mapping from ref to checksum * @cancellable: Cancellable * @error: Error * */ gboolean ostree_repo_remote_list_refs (OstreeRepo *self, const char *remote_name, GHashTable **out_all_refs, GCancellable *cancellable, GError **error) { g_autoptr(GBytes) summary_bytes = NULL; g_autoptr(GHashTable) ret_all_refs = NULL; if (!ostree_repo_remote_fetch_summary (self, remote_name, &summary_bytes, NULL, cancellable, error)) return FALSE; if (summary_bytes == NULL) { return glnx_throw (error, "Remote refs not available; server has no summary file"); } else { g_autoptr(GVariant) summary = NULL; g_autoptr(GVariant) ref_map = NULL; GVariantIter iter; GVariant *child; ret_all_refs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); summary = g_variant_new_from_bytes (OSTREE_SUMMARY_GVARIANT_FORMAT, summary_bytes, FALSE); ref_map = g_variant_get_child_value (summary, 0); g_variant_iter_init (&iter, ref_map); while ((child = g_variant_iter_next_value (&iter)) != NULL) { const char *ref_name = NULL; g_autoptr(GVariant) csum_v = NULL; char tmp_checksum[OSTREE_SHA256_STRING_LEN+1]; g_variant_get_child (child, 0, "&s", &ref_name); if (ref_name != NULL) { g_variant_get_child (child, 1, "(t@aya{sv})", NULL, &csum_v, NULL); const guchar *csum_bytes = ostree_checksum_bytes_peek_validate (csum_v, error); if (csum_bytes == NULL) return FALSE; ostree_checksum_inplace_from_bytes (csum_bytes, tmp_checksum); g_hash_table_insert (ret_all_refs, g_strdup (ref_name), g_strdup (tmp_checksum)); } g_variant_unref (child); } } ot_transfer_out_value (out_all_refs, &ret_all_refs); return TRUE; } #ifdef OSTREE_ENABLE_EXPERIMENTAL_API static gboolean remote_list_collection_refs_process_refs (OstreeRepo *self, const gchar *remote_name, const gchar *summary_collection_id, GVariant *summary_refs, GHashTable *ret_all_refs, GError **error) { gsize j, n; for (j = 0, n = g_variant_n_children (summary_refs); j < n; j++) { const guchar *csum_bytes; g_autoptr(GVariant) ref_v = NULL, csum_v = NULL; gchar tmp_checksum[OSTREE_SHA256_STRING_LEN + 1]; const gchar *ref_name; /* Check the ref name. */ ref_v = g_variant_get_child_value (summary_refs, j); g_variant_get_child (ref_v, 0, "&s", &ref_name); if (!ostree_validate_rev (ref_name, error)) return FALSE; /* Check the commit checksum. */ g_variant_get_child (ref_v, 1, "(t@ay@a{sv})", NULL, &csum_v, NULL); csum_bytes = ostree_checksum_bytes_peek_validate (csum_v, error); if (csum_bytes == NULL) return FALSE; ostree_checksum_inplace_from_bytes (csum_bytes, tmp_checksum); g_hash_table_insert (ret_all_refs, ostree_collection_ref_new (summary_collection_id, ref_name), g_strdup (tmp_checksum)); } return TRUE; } /** * ostree_repo_remote_list_collection_refs: * @self: Repo * @remote_name: Name of the remote. * @out_all_refs: (out) (element-type OstreeCollectionRef utf8): Mapping from collection–ref to checksum * @cancellable: Cancellable * @error: Error * * List refs advertised by @remote_name, including refs which are part of * collections. If the repository at @remote_name has a collection ID set, its * refs will be returned with that collection ID; otherwise, they will be returned * with a %NULL collection ID in each #OstreeCollectionRef key in @out_all_refs. * Any refs for other collections stored in the repository will also be returned. * No filtering is performed. * * Since: 2017.10 */ gboolean ostree_repo_remote_list_collection_refs (OstreeRepo *self, const char *remote_name, GHashTable **out_all_refs, GCancellable *cancellable, GError **error) { g_autoptr(GBytes) summary_bytes = NULL; g_autoptr(GHashTable) ret_all_refs = NULL; /* (element-type OstreeCollectionRef utf8) */ g_autoptr(GVariant) summary_v = NULL; g_autoptr(GVariant) additional_metadata_v = NULL; g_autoptr(GVariant) summary_refs = NULL; const char *summary_collection_id; g_autoptr(GVariantIter) summary_collection_map = NULL; if (!ostree_repo_remote_fetch_summary (self, remote_name, &summary_bytes, NULL, cancellable, error)) return FALSE; if (summary_bytes == NULL) return glnx_throw (error, "Remote refs not available; server has no summary file"); ret_all_refs = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify) ostree_collection_ref_free, g_free); summary_v = g_variant_new_from_bytes (OSTREE_SUMMARY_GVARIANT_FORMAT, summary_bytes, FALSE); additional_metadata_v = g_variant_get_child_value (summary_v, 1); /* List the refs in the main map. */ if (!g_variant_lookup (additional_metadata_v, OSTREE_SUMMARY_COLLECTION_ID, "&s", &summary_collection_id)) summary_collection_id = NULL; summary_refs = g_variant_get_child_value (summary_v, 0); if (!remote_list_collection_refs_process_refs (self, remote_name, summary_collection_id, summary_refs, ret_all_refs, error)) return FALSE; /* List the refs in the collection map. */ if (!g_variant_lookup (additional_metadata_v, OSTREE_SUMMARY_COLLECTION_MAP, "a{sa(s(taya{sv}))}", &summary_collection_map)) summary_collection_map = NULL; while (summary_collection_map != NULL && g_variant_iter_loop (summary_collection_map, "{s@a(s(taya{sv}))}", &summary_collection_id, &summary_refs)) { if (!remote_list_collection_refs_process_refs (self, remote_name, summary_collection_id, summary_refs, ret_all_refs, error)) return FALSE; } ot_transfer_out_value (out_all_refs, &ret_all_refs); return TRUE; } #endif /* OSTREE_ENABLE_EXPERIMENTAL_API */ static char * relative_symlink_to (const char *relpath, const char *target) { g_assert (*relpath); g_assert (*target && *target != '/'); g_autoptr(GString) buf = g_string_new (""); while (TRUE) { const char *slash = strchr (relpath, '/'); if (!slash) break; relpath = slash + 1; g_string_append (buf, "../"); } g_string_append (buf, target); return g_string_free (g_steal_pointer (&buf), FALSE); } /* May specify @rev or @alias */ gboolean _ostree_repo_write_ref (OstreeRepo *self, const char *remote, const OstreeCollectionRef *ref, const char *rev, const char *alias, GCancellable *cancellable, GError **error) { glnx_fd_close int dfd = -1; g_return_val_if_fail (remote == NULL || ref->collection_id == NULL, FALSE); g_return_val_if_fail (!(rev != NULL && alias != NULL), FALSE); if (remote != NULL && !ostree_validate_remote_name (remote, error)) return FALSE; if (ref->collection_id != NULL && !ostree_validate_collection_id (ref->collection_id, error)) return FALSE; if (!ostree_validate_rev (ref->ref_name, error)) return FALSE; if (remote == NULL && (ref->collection_id == NULL || g_strcmp0 (ref->collection_id, ostree_repo_get_collection_id (self)) == 0)) { if (!glnx_opendirat (self->repo_dir_fd, "refs/heads", TRUE, &dfd, error)) { g_prefix_error (error, "Opening %s: ", "refs/heads"); return FALSE; } } else if (remote == NULL && ref->collection_id != NULL) { glnx_fd_close int refs_mirrors_dfd = -1; /* refs/mirrors might not exist in older repositories, so create it. */ if (!glnx_shutil_mkdir_p_at_open (self->repo_dir_fd, "refs/mirrors", 0777, &refs_mirrors_dfd, cancellable, error)) { g_prefix_error (error, "Opening %s: ", "refs/mirrors"); return FALSE; } if (rev != NULL) { /* Ensure we have a dir for the collection */ if (!glnx_shutil_mkdir_p_at (refs_mirrors_dfd, ref->collection_id, 0777, cancellable, error)) return FALSE; } dfd = glnx_opendirat_with_errno (refs_mirrors_dfd, ref->collection_id, TRUE); if (dfd < 0 && (errno != ENOENT || rev != NULL)) return glnx_throw_errno_prefix (error, "Opening mirrors/ dir %s", ref->collection_id); } else { glnx_fd_close int refs_remotes_dfd = -1; if (!glnx_opendirat (self->repo_dir_fd, "refs/remotes", TRUE, &refs_remotes_dfd, error)) { g_prefix_error (error, "Opening %s: ", "refs/remotes"); return FALSE; } if (rev != NULL) { /* Ensure we have a dir for the remote */ if (!glnx_shutil_mkdir_p_at (refs_remotes_dfd, remote, 0777, cancellable, error)) return FALSE; } dfd = glnx_opendirat_with_errno (refs_remotes_dfd, remote, TRUE); if (dfd < 0 && (errno != ENOENT || rev != NULL)) return glnx_throw_errno_prefix (error, "Opening remotes/ dir %s", remote); } if (rev == NULL && alias == NULL) { if (dfd >= 0) { if (unlinkat (dfd, ref->ref_name, 0) != 0) { if (errno != ENOENT) return glnx_throw_errno (error); } } } else if (rev != NULL) { if (!write_checksum_file_at (self, dfd, ref->ref_name, rev, cancellable, error)) return FALSE; } else if (alias != NULL) { const char *lastslash = strrchr (ref->ref_name, '/'); if (lastslash) { char *parent = strdupa (ref->ref_name); parent[lastslash - ref->ref_name] = '\0'; if (!glnx_shutil_mkdir_p_at (dfd, parent, 0755, cancellable, error)) return FALSE; } g_autofree char *reltarget = relative_symlink_to (ref->ref_name, alias); g_autofree char *tmplink = NULL; if (!_ostree_make_temporary_symlink_at (self->tmp_dir_fd, reltarget, &tmplink, cancellable, error)) return FALSE; if (!glnx_renameat (self->tmp_dir_fd, tmplink, dfd, ref->ref_name, error)) return FALSE; } if (!_ostree_repo_update_mtime (self, error)) return FALSE; return TRUE; } gboolean _ostree_repo_update_refs (OstreeRepo *self, GHashTable *refs, /* (element-type utf8 utf8) */ GCancellable *cancellable, GError **error) { GHashTableIter hash_iter; gpointer key, value; g_hash_table_iter_init (&hash_iter, refs); while (g_hash_table_iter_next (&hash_iter, &key, &value)) { const char *refspec = key; const char *rev = value; g_autofree char *remote = NULL; g_autofree char *ref_name = NULL; if (!ostree_parse_refspec (refspec, &remote, &ref_name, error)) return FALSE; const OstreeCollectionRef ref = { NULL, ref_name }; if (!_ostree_repo_write_ref (self, remote, &ref, rev, NULL, cancellable, error)) return FALSE; } return TRUE; } gboolean _ostree_repo_update_collection_refs (OstreeRepo *self, GHashTable *refs, /* (element-type OstreeCollectionRef utf8) */ GCancellable *cancellable, GError **error) { GHashTableIter hash_iter; gpointer key, value; g_hash_table_iter_init (&hash_iter, refs); while (g_hash_table_iter_next (&hash_iter, &key, &value)) { const OstreeCollectionRef *ref = key; const char *rev = value; if (!_ostree_repo_write_ref (self, NULL, ref, rev, NULL, cancellable, error)) return FALSE; } return TRUE; } /** * ostree_repo_list_collection_refs: * @self: Repo * @match_collection_id: (nullable): If non-%NULL, only list refs from this collection * @out_all_refs: (out) (element-type OstreeCollectionRef utf8): Mapping from collection–ref to checksum * @flags: Options controlling listing behavior * @cancellable: Cancellable * @error: Error * * List all local, mirrored, and remote refs, mapping them to the commit * checksums they currently point to in @out_all_refs. If @match_collection_id * is specified, the results will be limited to those with an equal collection * ID. * * #OstreeCollectionRefs are guaranteed to be returned with their collection ID * set to a non-%NULL value; so no refs from `refs/heads` will be listed if no * collection ID is configured for the repository * (ostree_repo_get_collection_id()). * * If you want to exclude refs from `refs/remotes`, use * %OSTREE_REPO_LIST_REFS_EXT_EXCLUDE_REMOTES in @flags. * * Returns: %TRUE on success, %FALSE otherwise * Since: 2017.8 */ gboolean ostree_repo_list_collection_refs (OstreeRepo *self, const char *match_collection_id, GHashTable **out_all_refs, OstreeRepoListRefsExtFlags flags, GCancellable *cancellable, GError **error) { g_return_val_if_fail (OSTREE_IS_REPO (self), FALSE); g_return_val_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable), FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); if (match_collection_id != NULL && !ostree_validate_collection_id (match_collection_id, error)) return FALSE; const gchar *refs_dirs[] = { "refs/mirrors", "refs/remotes", NULL }; if (flags & OSTREE_REPO_LIST_REFS_EXT_EXCLUDE_REMOTES) refs_dirs[1] = NULL; g_autoptr(GHashTable) ret_all_refs = NULL; ret_all_refs = g_hash_table_new_full (ostree_collection_ref_hash, ostree_collection_ref_equal, (GDestroyNotify) ostree_collection_ref_free, g_free); g_auto(GLnxDirFdIterator) dfd_iter = { 0, }; g_autoptr(GString) base_path = g_string_new (""); const gchar *main_collection_id = ostree_repo_get_collection_id (self); if (main_collection_id != NULL && (match_collection_id == NULL || g_strcmp0 (match_collection_id, main_collection_id) == 0)) { glnx_fd_close int refs_heads_dfd = -1; if (!glnx_opendirat (self->repo_dir_fd, "refs/heads", TRUE, &refs_heads_dfd, error)) return FALSE; if (!enumerate_refs_recurse (self, NULL, flags, main_collection_id, refs_heads_dfd, base_path, refs_heads_dfd, ".", ret_all_refs, cancellable, error)) return FALSE; } g_string_truncate (base_path, 0); for (const char **iter = refs_dirs; iter && *iter; iter++) { const char *refs_dir = *iter; gboolean refs_dir_exists = FALSE; if (!ot_dfd_iter_init_allow_noent (self->repo_dir_fd, refs_dir, &dfd_iter, &refs_dir_exists, error)) return FALSE; while (refs_dir_exists) { struct dirent *dent; glnx_fd_close int subdir_fd = -1; const gchar *current_collection_id; g_autofree gchar *remote_collection_id = NULL; if (!glnx_dirfd_iterator_next_dent_ensure_dtype (&dfd_iter, &dent, cancellable, error)) return FALSE; if (!dent) break; if (dent->d_type != DT_DIR) continue; if (g_strcmp0 (refs_dir, "refs/mirrors") == 0) { if (match_collection_id != NULL && g_strcmp0 (match_collection_id, dent->d_name) != 0) continue; else current_collection_id = dent->d_name; } else /* refs_dir = "refs/remotes" */ { g_autoptr(GError) local_error = NULL; if (!ostree_repo_get_remote_option (self, dent->d_name, "collection-id", NULL, &remote_collection_id, &local_error) || !ostree_validate_collection_id (remote_collection_id, &local_error)) { g_debug ("Ignoring remote ‘%s’ due to no valid collection ID being configured for it: %s", dent->d_name, local_error->message); g_clear_error (&local_error); continue; } if (match_collection_id != NULL && g_strcmp0 (match_collection_id, current_collection_id) != 0) continue; else current_collection_id = remote_collection_id; } if (!glnx_opendirat (dfd_iter.fd, dent->d_name, TRUE, &subdir_fd, error)) return FALSE; if (!enumerate_refs_recurse (self, NULL, flags, current_collection_id, subdir_fd, base_path, subdir_fd, ".", ret_all_refs, cancellable, error)) return FALSE; } } ot_transfer_out_value (out_all_refs, &ret_all_refs); return TRUE; }
1
11,944
Hm, so before in that case `current_collection_id` looks like it was actually an uninitialized pointer. It seems weird to me that the tests pass both ways. I haven't looked closely at the tests - is there something wrong there that causes this to spuriously pass before?
ostreedev-ostree
c
@@ -46,9 +46,13 @@ * If you will separate the definitions for the parsers, you must revise the * code related to the symbol table. */ typedef enum { - K_IGNORE = -2, - K_UNDEFINED, - K_CONSTANT, + /* parser private items */ + K_IGNORE = -16, + K_DEFINE, + + K_UNDEFINED = KEYWORD_NONE, + /* the followings items are also used as indices for VerilogKinds[] and SystemVerilogKinds[] */ + K_CONSTANT= 0, K_EVENT, K_FUNCTION, K_MODULE,
1
/* * Copyright (c) 2003, Darren Hiebert * * This source code is released for free distribution under the terms of the * GNU General Public License version 2 or (at your option) any later version. * * This module contains functions for generating tags for the Verilog HDL * (Hardware Description Language). * * Language definition documents: * http://www.eg.bucknell.edu/~cs320/verilog/verilog-manual.html * http://www.sutherland-hdl.com/on-line_ref_guide/vlog_ref_top.html * http://www.verilog.com/VerilogBNF.html * http://eesun.free.fr/DOC/VERILOG/verilog_manual1.html */ /* * INCLUDE FILES */ #include "general.h" /* must always come first */ #include <string.h> #include "debug.h" #include "entry.h" #include "keyword.h" #include "options.h" #include "parse.h" #include "read.h" #include "routines.h" #include "xtag.h" /* * MACROS */ #define NUMBER_LANGUAGES 2 /* Indicates number of defined indexes */ #define IDX_SYSTEMVERILOG 0 #define IDX_VERILOG 1 /* * DATA DECLARATIONS */ /* A callback function searching a symbol from the cork symbol table assumes * this kind definitions are shared in Verilog and SystemVerilog parsers. * If you will separate the definitions for the parsers, you must revise the * code related to the symbol table. */ typedef enum { K_IGNORE = -2, K_UNDEFINED, K_CONSTANT, K_EVENT, K_FUNCTION, K_MODULE, K_NET, K_PORT, K_REGISTER, K_TASK, K_BLOCK, K_ASSERTION, K_CLASS, K_COVERGROUP, K_ENUM, K_INTERFACE, K_MODPORT, K_PACKAGE, K_PROGRAM, K_PROTOTYPE, K_PROPERTY, K_STRUCT, K_TYPEDEF } verilogKind; typedef struct { const char *keyword; verilogKind kind; short isValid [NUMBER_LANGUAGES]; } keywordAssoc; typedef struct sTokenInfo { verilogKind kind; vString* name; /* the name of the token */ unsigned long lineNumber; /* line number where token was found */ MIOPos filePosition; /* file position where token was found */ struct sTokenInfo* scope; /* context of keyword */ int nestLevel; /* Current nest level */ verilogKind lastKind; /* Kind of last found tag */ vString* blockName; /* Current block name */ vString* inheritance; /* Class inheritance */ bool prototype; /* Is only a prototype */ bool classScope; /* Context is local to the current sub-context */ } tokenInfo; /* * DATA DEFINITIONS */ static int Ungetc; static int Lang_verilog; static int Lang_systemverilog; static kindDefinition VerilogKinds [] = { { true, 'c', "constant", "constants (define, parameter, specparam)" }, { true, 'e', "event", "events" }, { true, 'f', "function", "functions" }, { true, 'm', "module", "modules" }, { true, 'n', "net", "net data types" }, { true, 'p', "port", "ports" }, { true, 'r', "register", "register data types" }, { true, 't', "task", "tasks" }, { true, 'b', "block", "blocks" } }; static kindDefinition SystemVerilogKinds [] = { { true, 'c', "constant", "constants (define, parameter, specparam, enum values)" }, { true, 'e', "event", "events" }, { true, 'f', "function", "functions" }, { true, 'm', "module", "modules" }, { true, 'n', "net", "net data types" }, { true, 'p', "port", "ports" }, { true, 'r', "register", "register data types" }, { true, 't', "task", "tasks" }, { true, 'b', "block", "blocks" }, { true, 'A', "assert", "assertions" }, { true, 'C', "class", "classes" }, { true, 'V', "covergroup","covergroups" }, { true, 'E', "enum", "enumerators" }, { true, 'I', "interface", "interfaces" }, { true, 'M', "modport", "modports" }, { true, 'K', "package", "packages" }, { true, 'P', "program", "programs" }, { false,'Q', "prototype", "prototypes" }, { true, 'R', "property", "properties" }, { true, 'S', "struct", "structs and unions" }, { true, 'T', "typedef", "type declarations" } }; static const keywordAssoc KeywordTable [] = { /* SystemVerilog */ /* | Verilog */ /* keyword keyword ID | | */ { "`define", K_CONSTANT, { 1, 1 } }, { "event", K_EVENT, { 1, 1 } }, { "function", K_FUNCTION, { 1, 1 } }, { "inout", K_PORT, { 1, 1 } }, { "input", K_PORT, { 1, 1 } }, { "integer", K_REGISTER, { 1, 1 } }, { "module", K_MODULE, { 1, 1 } }, { "output", K_PORT, { 1, 1 } }, { "parameter", K_CONSTANT, { 1, 1 } }, { "localparam",K_CONSTANT, { 1, 1 } }, { "genvar", K_REGISTER, { 1, 1 } }, { "real", K_REGISTER, { 1, 1 } }, { "realtime", K_REGISTER, { 1, 1 } }, { "reg", K_REGISTER, { 1, 1 } }, { "specparam", K_CONSTANT, { 1, 1 } }, { "supply0", K_NET, { 1, 1 } }, { "supply1", K_NET, { 1, 1 } }, { "task", K_TASK, { 1, 1 } }, { "time", K_REGISTER, { 1, 1 } }, { "tri0", K_NET, { 1, 1 } }, { "tri1", K_NET, { 1, 1 } }, { "triand", K_NET, { 1, 1 } }, { "tri", K_NET, { 1, 1 } }, { "trior", K_NET, { 1, 1 } }, { "trireg", K_NET, { 1, 1 } }, { "uwire", K_NET, { 1, 1 } }, { "wand", K_NET, { 1, 1 } }, { "wire", K_NET, { 1, 1 } }, { "wor", K_NET, { 1, 1 } }, { "begin", K_BLOCK, { 1, 1 } }, { "end", K_BLOCK, { 1, 1 } }, { "signed", K_IGNORE, { 1, 1 } }, { "automatic", K_IGNORE, { 1, 0 } }, { "assert", K_ASSERTION, { 1, 0 } }, { "assume", K_ASSERTION, { 1, 0 } }, { "bit", K_REGISTER, { 1, 0 } }, { "byte", K_REGISTER, { 1, 0 } }, { "chandle", K_REGISTER, { 1, 0 } }, { "class", K_CLASS, { 1, 0 } }, { "const", K_IGNORE, { 1, 0 } }, { "cover", K_ASSERTION, { 1, 0 } }, { "covergroup",K_COVERGROUP,{ 1, 0 } }, { "enum", K_ENUM, { 1, 0 } }, { "extern", K_IGNORE, { 1, 0 } }, { "int", K_REGISTER, { 1, 0 } }, { "interface", K_INTERFACE, { 1, 0 } }, { "local", K_IGNORE, { 1, 0 } }, { "logic", K_REGISTER, { 1, 0 } }, { "longint", K_REGISTER, { 1, 0 } }, { "modport", K_MODPORT, { 1, 0 } }, { "package", K_PACKAGE, { 1, 0 } }, { "program", K_PROGRAM, { 1, 0 } }, { "property", K_PROPERTY, { 1, 0 } }, { "pure", K_IGNORE, { 1, 0 } }, { "rand", K_IGNORE, { 1, 0 } }, { "randc", K_IGNORE, { 1, 0 } }, { "ref", K_PORT, { 1, 0 } }, { "sequence", K_PROPERTY, { 1, 0 } }, { "shortint", K_REGISTER, { 1, 0 } }, { "shortreal", K_REGISTER, { 1, 0 } }, { "static", K_IGNORE, { 1, 0 } }, { "string", K_REGISTER, { 1, 0 } }, { "struct", K_STRUCT, { 1, 0 } }, { "type", K_IGNORE, { 1, 0 } }, { "typedef", K_TYPEDEF, { 1, 0 } }, { "union", K_STRUCT, { 1, 0 } }, { "unsigned", K_IGNORE, { 1, 0 } }, { "virtual", K_IGNORE, { 1, 0 } }, { "var", K_REGISTER, { 1, 0 } }, { "void", K_IGNORE, { 1, 0 } } }; static tokenInfo *currentContext = NULL; static tokenInfo *tagContents = NULL; /* * FUNCTION DEFINITIONS */ static short isContainer (tokenInfo const* token) { switch (token->kind) { case K_MODULE: case K_TASK: case K_FUNCTION: case K_BLOCK: case K_CLASS: case K_COVERGROUP: case K_INTERFACE: case K_PACKAGE: case K_PROGRAM: case K_PROPERTY: case K_TYPEDEF: case K_ENUM: return true; default: return false; } } static short isTempContext (tokenInfo const* token) { switch (token->kind) { case K_TYPEDEF: case K_ENUM: return true; default: return false; } } static short isVariable (tokenInfo const* token) { switch (token->kind) { case K_CONSTANT: case K_EVENT: case K_NET: case K_PORT: case K_REGISTER: return true; default: return false; } } static short hasSimplePortList (tokenInfo const* token) { switch (token->kind) { case K_TASK: case K_FUNCTION: case K_CLASS: case K_INTERFACE: case K_PROGRAM: case K_PROPERTY: return true; default: return false; } } static short isPrototype (tokenInfo const* token) { if (strcmp (vStringValue (token->name), "extern") == 0 || strcmp (vStringValue (token->name), "pure") == 0 ) { return true; } else { return false; } } static tokenInfo *newToken (void) { tokenInfo *const token = xMalloc (1, tokenInfo); token->kind = K_UNDEFINED; token->name = vStringNew (); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); token->scope = NULL; token->nestLevel = 0; token->lastKind = K_UNDEFINED; token->blockName = vStringNew (); token->inheritance = vStringNew (); token->prototype = false; token->classScope = false; return token; } static void deleteToken (tokenInfo * const token) { if (token != NULL) { vStringDelete (token->name); vStringDelete (token->blockName); vStringDelete (token->inheritance); eFree (token); } } static tokenInfo *pushToken (tokenInfo * const token, tokenInfo * const tokenPush) { tokenPush->scope = token; return tokenPush; } static tokenInfo *popToken (tokenInfo * const token) { tokenInfo *localToken; if (token != NULL) { localToken = token->scope; deleteToken (token); return localToken; } return NULL; } static void pruneTokens (tokenInfo * token) { while ((token = popToken (token))); } static const char *getNameForKind (const verilogKind kind) { if (isInputLanguage (Lang_systemverilog)) return (SystemVerilogKinds[kind]).name; else /* isInputLanguage (Lang_verilog) */ return (VerilogKinds[kind]).name; } static char kindEnabled (const verilogKind kind) { if (isInputLanguage (Lang_systemverilog)) return SystemVerilogKinds[kind].enabled; else /* isInputLanguage (Lang_verilog) */ return VerilogKinds[kind].enabled; } static void buildKeywordHash (const langType language, unsigned int idx) { size_t i; const size_t count = ARRAY_SIZE (KeywordTable); for (i = 0 ; i < count ; ++i) { const keywordAssoc *p = &KeywordTable [i]; if (p->isValid [idx]) addKeyword (p->keyword, language, (int) p->kind); } } static void initializeVerilog (const langType language) { Lang_verilog = language; buildKeywordHash (language, IDX_VERILOG); } static void initializeSystemVerilog (const langType language) { Lang_systemverilog = language; buildKeywordHash (language, IDX_SYSTEMVERILOG); } static void vUngetc (int c) { Assert (Ungetc == '\0'); Ungetc = c; } /* Mostly copied from cppSkipOverCComment() in cpreprocessor.c. * * cppSkipOverCComment() uses the internal ungetc buffer of * CPreProcessor. On the other hand, the Verilog parser uses * getcFromInputFile() directly. getcFromInputFile() uses just * another internal ungetc buffer. Using them mixed way will * cause a trouble. */ static int verilogSkipOverCComment (void) { int c = getcFromInputFile(); while (c != EOF) { if (c != '*') c = getcFromInputFile (); else { const int next = getcFromInputFile (); if (next != '/') c = next; else { c = SPACE; /* replace comment with space */ break; } } } return c; } static int vGetc (void) { int c; if (Ungetc == '\0') c = getcFromInputFile (); else { c = Ungetc; Ungetc = '\0'; } if (c == '/') { int c2 = getcFromInputFile (); if (c2 == EOF) return EOF; else if (c2 == '/') /* strip comment until end-of-line */ { do c = getcFromInputFile (); while (c != '\n' && c != EOF); } else if (c2 == '*') /* strip block comment */ { c = verilogSkipOverCComment(); } else { ungetcToInputFile (c2); } } else if (c == '"') /* strip string contents */ { int c2; do c2 = getcFromInputFile (); while (c2 != '"' && c2 != EOF); c = '@'; } return c; } static bool isIdentifierCharacter (const int c) { return (bool)(isalnum (c) || c == '_' || c == '`'); } static int skipWhite (int c) { while (isspace (c)) c = vGetc (); return c; } static int skipPastMatch (const char *const pair) { const int begin = pair [0], end = pair [1]; int matchLevel = 1; int c; do { c = vGetc (); if (c == begin) ++matchLevel; else if (c == end) --matchLevel; } while (c != EOF && matchLevel > 0); return vGetc (); } static void skipToSemiColon (void) { int c; do { c = vGetc (); } while (c != EOF && c != ';'); } static bool readIdentifier (tokenInfo *const token, int c) { vStringClear (token->name); if (isIdentifierCharacter (c)) { while (isIdentifierCharacter (c)) { vStringPut (token->name, c); c = vGetc (); } vUngetc (c); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); } return (bool)(vStringLength (token->name) > 0); } static int skipMacro (int c) { tokenInfo *token = newToken ();; if (c == '`') { /* Skip keyword */ if (isIdentifierCharacter (c = vGetc ())) { readIdentifier (token, c); c = vGetc (); /* Skip next keyword if macro is `ifdef or `ifndef or `elsif*/ if (strcmp (vStringValue (token->name), "ifdef") == 0 || strcmp (vStringValue (token->name), "ifndef") == 0 || strcmp (vStringValue (token->name), "elsif") == 0) { verbose ("%c\n", c); c = skipWhite (c); readIdentifier (token, c); c = vGetc (); verbose ("Skipping conditional macro %s\n", vStringValue (token->name)); } /* Skip macro functions */ else { c = skipWhite (c); if (c == '(') { c = skipPastMatch ("()"); } } } } deleteToken (token); return c; } static verilogKind getKindForToken (tokenInfo *const token) { return (verilogKind) lookupKeyword (vStringValue (token->name), getInputLanguage () ); } static void updateKind (tokenInfo *const token) { token->kind = getKindForToken (token); } static void createContext (tokenInfo *const scope) { if (scope) { vString *contextName = vStringNew (); /* Determine full context name */ if (currentContext->kind != K_UNDEFINED) { vStringCopy (contextName, currentContext->name); vStringPut (contextName, '.'); } vStringCat (contextName, scope->name); /* Create context */ currentContext = pushToken (currentContext, scope); vStringCopy (currentContext->name, contextName); vStringDelete (contextName); verbose ("Created new context %s (kind %d)\n", vStringValue (currentContext->name), currentContext->kind); } } static void dropEndContext (tokenInfo *const token) { verbose ("current context %s; context kind %0d; nest level %0d\n", vStringValue (currentContext->name), currentContext->kind, currentContext->nestLevel); vString *endTokenName = vStringNewInit("end"); if ((currentContext->kind == K_COVERGROUP && strcmp (vStringValue (token->name), "endgroup") == 0) || (currentContext->kind == K_BLOCK && currentContext->nestLevel == 0 && strcmp (vStringValue (token->name), vStringValue (endTokenName)) == 0) ) { verbose ("Dropping context %s\n", vStringValue (currentContext->name)); currentContext = popToken (currentContext); } else { vStringCatS (endTokenName, getNameForKind (currentContext->kind)); if (strcmp (vStringValue (token->name), vStringValue (endTokenName)) == 0) { verbose ("Dropping context %s\n", vStringValue (currentContext->name)); currentContext = popToken (currentContext); if (currentContext->classScope) { verbose ("Dropping local context %s\n", vStringValue (currentContext->name)); currentContext = popToken (currentContext); } } } vStringDelete(endTokenName); } static void createTag (tokenInfo *const token) { tagEntryInfo tag; verilogKind kind; /* Determine if kind is prototype */ if (currentContext->prototype) { kind = K_PROTOTYPE; } else { kind = token->kind; } /* Do nothing it tag name is empty or tag kind is disabled */ if (vStringLength (token->name) == 0 || ! kindEnabled (kind)) { verbose ("Unexpected empty token or kind disabled\n"); return; } /* Create tag */ initTagEntry (&tag, vStringValue (token->name), kind); tag.lineNumber = token->lineNumber; tag.filePosition = token->filePosition; verbose ("Adding tag %s (kind %d)", vStringValue (token->name), kind); if (currentContext->kind != K_UNDEFINED) { verbose (" to context %s\n", vStringValue (currentContext->name)); currentContext->lastKind = kind; tag.extensionFields.scopeKindIndex = currentContext->kind; tag.extensionFields.scopeName = vStringValue (currentContext->name); } verbose ("\n"); if (vStringLength (token->inheritance) > 0) { tag.extensionFields.inheritance = vStringValue (token->inheritance); verbose ("Class %s extends %s\n", vStringValue (token->name), tag.extensionFields.inheritance); } int corkIndex = makeTagEntry (&tag); if (isInputLanguage (Lang_systemverilog) && corkIndex != CORK_NIL && kind == K_TYPEDEF) registerEntry (corkIndex); if (isXtagEnabled(XTAG_QUALIFIED_TAGS) && currentContext->kind != K_UNDEFINED) { vString *const scopedName = vStringNew (); vStringCopy (scopedName, currentContext->name); vStringPut (scopedName, '.'); vStringCat (scopedName, token->name); tag.name = vStringValue (scopedName); markTagExtraBit (&tag, XTAG_QUALIFIED_TAGS); makeTagEntry (&tag); vStringDelete (scopedName); } /* Push token as context if it is a container */ if (isContainer (token)) { tokenInfo *newScope = newToken (); vStringCopy (newScope->name, token->name); newScope->kind = kind; createContext (newScope); /* Include found contents in context */ if (tagContents != NULL) { tokenInfo* content = tagContents; verbose ("Including tagContents\n"); do { createTag (content); content = content->scope; } while (content); } /* Drop temporary contexts */ if (isTempContext (currentContext)) { verbose ("Dropping context %s\n", vStringValue (currentContext->name)); currentContext = popToken (currentContext); } } /* Clear no longer required inheritance information */ vStringClear (token->inheritance); } static bool findBlockName (tokenInfo *const token) { int c; c = skipWhite (vGetc ()); if (c == ':') { c = skipWhite (vGetc ()); readIdentifier (token, c); return (bool) (vStringLength (token->name) > 0); } else vUngetc (c); return false; } static void processBlock (tokenInfo *const token) { bool blockStart = false; bool blockEnd = false; if (strcmp (vStringValue (token->name), "begin") == 0) { currentContext->nestLevel++; blockStart = true; } else if (strcmp (vStringValue (token->name), "end") == 0) { currentContext->nestLevel--; blockEnd = true; } if (findBlockName (token)) { verbose ("Found block: %s\n", vStringValue (token->name)); if (blockStart) { createTag (token); verbose ("Current context %s\n", vStringValue (currentContext->name)); } if (blockEnd && currentContext->kind == K_BLOCK && currentContext->nestLevel <= 1) { verbose ("Dropping context %s\n", vStringValue (currentContext->name)); currentContext = popToken (currentContext); } } } static void processPortList (int c) { if ((c = skipWhite (c)) == '(') { tokenInfo *token = newToken (); /* Get next non-whitespace character after ( */ c = skipWhite (vGetc ()); while (c != ';' && c != EOF) { if (c == '[') { c = skipPastMatch ("[]"); } else if (c == '(') { c = skipPastMatch ("()"); } else if (c == '{') { c = skipPastMatch ("{}"); } else if (c == '`') { c = skipMacro (c); } else if (c == '=') { /* Search for next port or end of port declaration */ while (c != ',' && c != ')' && c != EOF) { c = skipWhite (vGetc ()); } } else if (isIdentifierCharacter (c)) { readIdentifier (token, c); updateKind (token); if (token->kind == K_UNDEFINED) { /* Only add port name if it is the last keyword. * First keyword can be a dynamic type, like a class name */ c = skipWhite (vGetc ()); if (! isIdentifierCharacter (c) || c == '`') { verbose ("Found port: %s\n", vStringValue (token->name)); token->kind = K_PORT; createTag (token); } } else { c = skipWhite (vGetc ()); } } else { c = skipWhite (vGetc ()); } } if (! isIdentifierCharacter (c)) vUngetc (c); deleteToken (token); } else if (c != EOF) { vUngetc (c); } } static void processFunction (tokenInfo *const token) { int c; tokenInfo *classType; /* Search for function name * Last identifier found before a '(' or a ';' is the function name */ c = skipWhite (vGetc ()); do { readIdentifier (token, c); c = skipWhite (vGetc ()); /* Identify class type prefixes and create respective context*/ if (isInputLanguage (Lang_systemverilog) && c == ':') { c = vGetc (); if (c == ':') { verbose ("Found function declaration with class type %s\n", vStringValue (token->name)); classType = newToken (); vStringCopy (classType->name, token->name); classType->kind = K_CLASS; createContext (classType); currentContext->classScope = true; } else { vUngetc (c); } } } while (c != '(' && c != ';' && c != EOF); if ( vStringLength (token->name) > 0 ) { verbose ("Found function: %s\n", vStringValue (token->name)); /* Create tag */ createTag (token); /* Get port list from function */ processPortList (c); } } static void tagNameList (tokenInfo* token, int c); static void processEnum (tokenInfo *const token) { int c; /* Read enum type */ c = skipWhite (vGetc ()); if (isIdentifierCharacter (c)) { tokenInfo* typeQueue = NULL; tokenInfo* type; do { type = newToken (); readIdentifier (type, c); updateKind (type); typeQueue = pushToken (typeQueue, type); verbose ("Enum type %s\n", vStringValue (type->name)); c = skipWhite (vGetc ()); } while (isIdentifierCharacter (c)); /* Undefined kind means that we've reached the end of the * declaration without having any contents defined, which * indicates that this is in fact a forward declaration */ if (type->kind == K_UNDEFINED && (typeQueue->scope == NULL || typeQueue->scope->kind != K_UNDEFINED)) { verbose ("Prototype enum found \"%s\"\n", vStringValue (type->name)); type->kind = K_PROTOTYPE; createTag (type); pruneTokens (typeQueue); return; } /* Cleanup type queue */ pruneTokens (typeQueue); } /* Skip bus width definition */ if (c == '[') { c = skipWhite (skipPastMatch ("[]")); } /* Search enum elements */ if (c == '{') { c = skipWhite (vGetc ()); while (isIdentifierCharacter (c)) { tokenInfo *content = newToken (); readIdentifier (content, c); content->kind = K_CONSTANT; tagContents = pushToken (tagContents, content); verbose ("Pushed enum element \"%s\"\n", vStringValue (content->name)); c = skipWhite (vGetc ()); /* Skip element ranges */ /* TODO Implement element ranges */ if (c == '[') { c = skipWhite (skipPastMatch ("[]")); } /* Skip value assignments */ if (c == '=') { while (c != '}' && c != ',' && c != EOF) { c = skipWhite (vGetc ()); /* Skip enum value concatenations */ if (c == '{') { c = skipWhite (skipPastMatch ("{}")); } } } /* Skip comma */ if (c == ',') { c = skipWhite (vGetc ()); } /* End of enum elements list */ if (c == '}') { c = skipWhite (vGetc ()); break; } } } /* Following identifiers are tag names */ verbose ("Find enum tags. Token %s kind %d\n", vStringValue (token->name), token->kind); tagNameList (token, c); } static void processStruct (tokenInfo *const token) { int c; c = skipWhite (vGetc ()); /* Skip packed, signed, and unsigned */ while (isIdentifierCharacter (c)) { readIdentifier (token, c); c = skipWhite (vGetc ()); } /* Skip struct contents */ if (c == '{') { c = skipWhite (skipPastMatch ("{}")); } else { verbose ("Prototype struct found \"%s\"\n", vStringValue (token->name)); token->kind = K_PROTOTYPE; createTag (token); return; } /* Skip packed_dimension */ while (c == '[') { c = skipWhite (skipPastMatch ("[]")); } /* Following identifiers are tag names */ verbose ("Find struct|union tags. Token %s kind %d\n", vStringValue (token->name), token->kind); tagNameList (token, c); } static void processTypedef (tokenInfo *const token) { int c; /* Get typedef type */ c = skipWhite (vGetc ()); if (isIdentifierCharacter (c)) { readIdentifier (token, c); updateKind (token); switch (token->kind) { case K_INTERFACE: /* Expecting `typedef interface class` */ c = skipWhite (vGetc ()); readIdentifier (token, c); updateKind (token); case K_CLASS: /* A typedef class is just a prototype */ currentContext->prototype = true; break; case K_ENUM: /* Call enum processing function */ token->kind = K_TYPEDEF; processEnum (token); return; case K_STRUCT: /* Call enum processing function */ token->kind = K_TYPEDEF; processStruct (token); return; default : break; } c = skipWhite (vGetc ()); } /* Skip signed or unsiged */ if (isIdentifierCharacter (c)) { readIdentifier (token, c); c = skipWhite (vGetc ()); } /* Skip bus width definition */ while (c == '[') { c = skipWhite (skipPastMatch ("[]")); } /* Skip remaining identifiers */ while (isIdentifierCharacter (c)) { readIdentifier (token, c); c = skipWhite (vGetc ()); } /* Skip typedef contents */ if (c == '{') { c = skipWhite (skipPastMatch ("{}")); } /* Skip past class parameter override */ if (c == '#') { c = skipWhite (vGetc ()); if (c == '(') { c = skipWhite (skipPastMatch ("()")); } } /* Read typedef name */ if (isIdentifierCharacter (c)) { readIdentifier (token, c); } else { vUngetc (c); /* Empty typedefs are forward declarations and are considered * prototypes */ if (token->kind == K_UNDEFINED) { currentContext->prototype = true; } } /* Use last identifier to create tag, but always with kind typedef */ token->kind = K_TYPEDEF; createTag (token); } static void processClass (tokenInfo *const token) { /*Note: At the moment, only identifies typedef name and not its contents */ int c; tokenInfo *extra; tokenInfo *parameters = NULL; /* Get identifiers */ c = skipWhite (vGetc ()); if (isIdentifierCharacter (c)) { readIdentifier (token, c); c = skipWhite (vGetc ()); } /* Find class parameters list */ if (c == '#') { c = skipWhite (vGetc ()); if (c == '(') { parameters = newToken (); do { c = skipWhite (vGetc ()); readIdentifier (parameters, c); updateKind (parameters); verbose ("Found class parameter %s\n", vStringValue (parameters->name)); if (parameters->kind == K_UNDEFINED) { parameters->kind = K_CONSTANT; parameters = pushToken (parameters, newToken ()); c = vGetc(); while (c != ',' && c != ')' && c != EOF) { c = vGetc(); } } } while (c != ')' && c != EOF); c = skipWhite (vGetc ()); parameters = popToken (parameters); } } /* Search for inheritance information */ if (isIdentifierCharacter (c)) { extra = newToken (); readIdentifier (extra, c); c = skipWhite (vGetc ()); if (strcmp (vStringValue (extra->name), "extends") == 0) { readIdentifier (extra, c); vStringCopy (token->inheritance, extra->name); verbose ("Inheritance %s\n", vStringValue (token->inheritance)); } deleteToken (extra); } /* Use last identifier to create tag */ createTag (token); /* Add parameter list */ while (parameters) { createTag (parameters); parameters = popToken (parameters); } } static bool doesNameForKindExist (int corkIndex, tagEntryInfo *entry, void *data) { verilogKind *kind = data; if (entry->kindIndex == *kind) return false; return true; } static bool isAlreadyTaggedAs (tokenInfo *token, verilogKind kind) { if (!isInputLanguage (Lang_systemverilog)) return false; vString *name = token->name; if (vStringIsEmpty (name)) return false; return (foreachEntriesInScope (CORK_NIL, vStringValue (name), doesNameForKindExist, &kind) == false); } static void tagNameList (tokenInfo* token, int c) { verilogKind localKind; bool repeat; /* Many keywords can have bit width. * reg [3:0] net_name; * inout [(`DBUSWIDTH-1):0] databus; */ if (c == '(') c = skipPastMatch ("()"); c = skipWhite (c); if (c == '[') c = skipPastMatch ("[]"); c = skipWhite (c); if (c == '#') { c = vGetc (); if (c == '(') c = skipPastMatch ("()"); } c = skipWhite (c); do { repeat = false; while (c == '`' && c != EOF) { c = skipMacro (c); } if (isIdentifierCharacter (c)) { readIdentifier (token, c); localKind = getKindForToken (token); if (localKind != K_UNDEFINED || isAlreadyTaggedAs (token, K_TYPEDEF)) { /* Update kind unless it's a port, a constant (parameter) or an ignored keyword */ if (token->kind != K_PORT && token->kind != K_CONSTANT && localKind != K_IGNORE) { token->kind = localKind; } repeat = true; } else /* Create tag in case name is not a known kind ... */ { createTag (token); } } else break; c = skipWhite (vGetc ()); if (c == '[') c = skipPastMatch ("[]"); c = skipWhite (c); if (c == '=') { c = skipWhite (vGetc ()); if (c == '\'') { c = skipWhite (vGetc ()); if (c != '{') vUngetc (c); } if (c == '{') skipPastMatch ("{}"); else { /* Skip until end of current name, kind or parameter list definition */ do { c = vGetc (); if (c == '(') c = skipPastMatch ("()"); } while (c != EOF && c != ',' && c != ';' && c != ')'); } } if (c == ',') { c = skipWhite (vGetc ()); repeat = true; } } while (repeat); vUngetc (c); } static void findTag (tokenInfo *const token) { verbose ("Checking token %s of kind %d\n", vStringValue (token->name), token->kind); if (currentContext->kind != K_UNDEFINED) { /* Drop context, but only if an end token is found */ dropEndContext (token); } if (token->kind == K_CONSTANT && vStringItem (token->name, 0) == '`') { /* Bug #961001: Verilog compiler directives are line-based. */ int c = skipWhite (vGetc ()); readIdentifier (token, c); createTag (token); /* Skip the rest of the line. */ do { c = vGetc(); } while (c != EOF && c != '\n'); vUngetc (c); } else if (token->kind == K_BLOCK) { /* Process begin..end blocks */ processBlock (token); } else if (token->kind == K_FUNCTION || token->kind == K_TASK) { /* Functions are treated differently because they may also include the * type of the return value. * Tasks are treated in the same way, although not having a return * value.*/ processFunction (token); } else if (token->kind == K_ASSERTION) { if (vStringLength (currentContext->blockName) > 0) { vStringCopy (token->name, currentContext->blockName); createTag (token); skipToSemiColon (); } } else if (token->kind == K_TYPEDEF) { processTypedef (token); } else if (token->kind == K_ENUM) { processEnum (token); } else if (token->kind == K_STRUCT) { processStruct (token); } else if (token->kind == K_CLASS) { processClass (token); } else if (token->kind == K_IGNORE && isPrototype (token)) { currentContext->prototype = true; } else if (isVariable (token)) { int c = skipWhite (vGetc ()); tagNameList (token, c); } else if (token->kind != K_UNDEFINED && token->kind != K_IGNORE) { int c = skipWhite (vGetc ()); if (isIdentifierCharacter (c)) { readIdentifier (token, c); while (getKindForToken (token) == K_IGNORE) { c = skipWhite (vGetc ()); readIdentifier (token, c); } createTag (token); /* Get port list if required */ c = skipWhite (vGetc ()); if (c == '(' && hasSimplePortList (token)) { processPortList (c); } else { vUngetc (c); } } } } static void findVerilogTags (void) { tokenInfo *const token = newToken (); int c = '\0'; currentContext = newToken (); while (c != EOF) { c = vGetc (); c = skipWhite (c); switch (c) { /* Store current block name whenever a : is found * This is used later by any tag type that requires this information * */ case ':': vStringCopy (currentContext->blockName, token->name); break; /* Skip interface modport port declarations */ case '(': if (currentContext && currentContext->lastKind == K_MODPORT) { skipPastMatch ("()"); } break; case ';': /* Drop context on prototypes because they don't have an * end statement */ if (currentContext->scope && currentContext->scope->prototype) { verbose ("Dropping context %s\n", vStringValue (currentContext->name)); currentContext = popToken (currentContext); currentContext->prototype = false; } /* Prototypes end at the end of statement */ if (currentContext->prototype) { currentContext->prototype = false; } /* Cleanup tag contents list at end of declaration */ while (tagContents) { tagContents = popToken (tagContents); } default : if (isIdentifierCharacter (c)) { readIdentifier (token, c); updateKind (token); findTag (token); } } } deleteToken (token); pruneTokens (currentContext); currentContext = NULL; } extern parserDefinition* VerilogParser (void) { static const char *const extensions [] = { "v", NULL }; parserDefinition* def = parserNew ("Verilog"); def->kindTable = VerilogKinds; def->kindCount = ARRAY_SIZE (VerilogKinds); def->extensions = extensions; def->parser = findVerilogTags; def->initialize = initializeVerilog; return def; } extern parserDefinition* SystemVerilogParser (void) { static const char *const extensions [] = { "sv", "svh", "svi", NULL }; parserDefinition* def = parserNew ("SystemVerilog"); def->kindTable = SystemVerilogKinds; def->kindCount = ARRAY_SIZE (SystemVerilogKinds); def->extensions = extensions; def->parser = findVerilogTags; def->initialize = initializeSystemVerilog; def->useCork = CORK_QUEUE | CORK_SYMTAB; return def; }
1
19,419
s it the best way to tag "foo" of "`define foo ..." with constant kind? Introducing "d/definition" kind and tagging the "foo" with the definition kind is an alternative way. Using the unified kind "constant" in many areas reduces the information that client tools get from a tags file. In my experience, it is better to tag different things with different kinds. If ctags hides the differences, and just reports various language objects as "constant," in some cases, a client tool must parse the raw .sv source file for distinguishing whether a given tag is "`define"'ed or not. IMHO, ctags should not be too smart. Instead, ctags should be stupid; it should report what it sees in source input files as-is to client tools that will do something smart.
universal-ctags-ctags
c
@@ -213,7 +213,6 @@ const CASE_TRANSLATION = { tlscafile: 'tlsCAFile', tlscertificatekeyfile: 'tlsCertificateKeyFile', tlscertificatekeyfilepassword: 'tlsCertificateKeyFilePassword', - wtimeout: 'wTimeoutMS', j: 'journal' };
1
'use strict'; const URL = require('url'); const qs = require('querystring'); const dns = require('dns'); const MongoParseError = require('./error').MongoParseError; const ReadPreference = require('./topologies/read_preference'); /** * The following regular expression validates a connection string and breaks the * provide string into the following capture groups: [protocol, username, password, hosts] */ const HOSTS_RX = /(mongodb(?:\+srv|)):\/\/(?: (?:[^:]*) (?: : ([^@]*) )? @ )?([^/?]*)(?:\/|)(.*)/; /** * Determines whether a provided address matches the provided parent domain in order * to avoid certain attack vectors. * * @param {String} srvAddress The address to check against a domain * @param {String} parentDomain The domain to check the provided address against * @return {Boolean} Whether the provided address matches the parent domain */ function matchesParentDomain(srvAddress, parentDomain) { const regex = /^.*?\./; const srv = `.${srvAddress.replace(regex, '')}`; const parent = `.${parentDomain.replace(regex, '')}`; return srv.endsWith(parent); } /** * Lookup a `mongodb+srv` connection string, combine the parts and reparse it as a normal * connection string. * * @param {string} uri The connection string to parse * @param {object} options Optional user provided connection string options * @param {function} callback */ function parseSrvConnectionString(uri, options, callback) { const result = URL.parse(uri, true); if (result.hostname.split('.').length < 3) { return callback(new MongoParseError('URI does not have hostname, domain name and tld')); } result.domainLength = result.hostname.split('.').length; if (result.pathname && result.pathname.match(',')) { return callback(new MongoParseError('Invalid URI, cannot contain multiple hostnames')); } if (result.port) { return callback(new MongoParseError(`Ports not accepted with '${PROTOCOL_MONGODB_SRV}' URIs`)); } // Resolve the SRV record and use the result as the list of hosts to connect to. const lookupAddress = result.host; dns.resolveSrv(`_mongodb._tcp.${lookupAddress}`, (err, addresses) => { if (err) return callback(err); if (addresses.length === 0) { return callback(new MongoParseError('No addresses found at host')); } for (let i = 0; i < addresses.length; i++) { if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) { return callback( new MongoParseError('Server record does not share hostname with parent URI') ); } } // Convert the original URL to a non-SRV URL. result.protocol = 'mongodb'; result.host = addresses.map(address => `${address.name}:${address.port}`).join(','); // Default to SSL true if it's not specified. if ( !('ssl' in options) && (!result.search || !('ssl' in result.query) || result.query.ssl === null) ) { result.query.ssl = true; } // Resolve TXT record and add options from there if they exist. dns.resolveTxt(lookupAddress, (err, record) => { if (err) { if (err.code !== 'ENODATA') { return callback(err); } record = null; } if (record) { if (record.length > 1) { return callback(new MongoParseError('Multiple text records not allowed')); } record = qs.parse(record[0].join('')); if (Object.keys(record).some(key => key !== 'authSource' && key !== 'replicaSet')) { return callback( new MongoParseError('Text record must only set `authSource` or `replicaSet`') ); } Object.assign(result.query, record); } // Set completed options back into the URL object. result.search = qs.stringify(result.query); const finalString = URL.format(result); parseConnectionString(finalString, options, (err, ret) => { if (err) { callback(err); return; } callback(null, Object.assign({}, ret, { srvHost: lookupAddress })); }); }); }); } /** * Parses a query string item according to the connection string spec * * @param {string} key The key for the parsed value * @param {Array|String} value The value to parse * @return {Array|Object|String} The parsed value */ function parseQueryStringItemValue(key, value) { if (Array.isArray(value)) { // deduplicate and simplify arrays value = value.filter((v, idx) => value.indexOf(v) === idx); if (value.length === 1) value = value[0]; } else if (value.indexOf(':') > 0) { value = value.split(',').reduce((result, pair) => { const parts = pair.split(':'); result[parts[0]] = parseQueryStringItemValue(key, parts[1]); return result; }, {}); } else if (value.indexOf(',') > 0) { value = value.split(',').map(v => { return parseQueryStringItemValue(key, v); }); } else if (value.toLowerCase() === 'true' || value.toLowerCase() === 'false') { value = value.toLowerCase() === 'true'; } else if (!Number.isNaN(value) && !STRING_OPTIONS.has(key)) { const numericValue = parseFloat(value); if (!Number.isNaN(numericValue)) { value = parseFloat(value); } } return value; } // Options that are known boolean types const BOOLEAN_OPTIONS = new Set([ 'slaveok', 'slave_ok', 'sslvalidate', 'fsync', 'safe', 'retrywrites', 'j' ]); // Known string options, only used to bypass Number coercion in `parseQueryStringItemValue` const STRING_OPTIONS = new Set(['authsource', 'replicaset']); // Supported text representations of auth mechanisms // NOTE: this list exists in native already, if it is merged here we should deduplicate const AUTH_MECHANISMS = new Set([ 'GSSAPI', 'MONGODB-X509', 'MONGODB-CR', 'DEFAULT', 'SCRAM-SHA-1', 'SCRAM-SHA-256', 'PLAIN' ]); // Lookup table used to translate normalized (lower-cased) forms of connection string // options to their expected camelCase version const CASE_TRANSLATION = { replicaset: 'replicaSet', connecttimeoutms: 'connectTimeoutMS', sockettimeoutms: 'socketTimeoutMS', maxpoolsize: 'maxPoolSize', minpoolsize: 'minPoolSize', maxidletimems: 'maxIdleTimeMS', waitqueuemultiple: 'waitQueueMultiple', waitqueuetimeoutms: 'waitQueueTimeoutMS', wtimeoutms: 'wtimeoutMS', readconcern: 'readConcern', readconcernlevel: 'readConcernLevel', readpreference: 'readPreference', maxstalenessseconds: 'maxStalenessSeconds', readpreferencetags: 'readPreferenceTags', authsource: 'authSource', authmechanism: 'authMechanism', authmechanismproperties: 'authMechanismProperties', gssapiservicename: 'gssapiServiceName', localthresholdms: 'localThresholdMS', serverselectiontimeoutms: 'serverSelectionTimeoutMS', serverselectiontryonce: 'serverSelectionTryOnce', heartbeatfrequencyms: 'heartbeatFrequencyMS', retrywrites: 'retryWrites', uuidrepresentation: 'uuidRepresentation', zlibcompressionlevel: 'zlibCompressionLevel', tlsallowinvalidcertificates: 'tlsAllowInvalidCertificates', tlsallowinvalidhostnames: 'tlsAllowInvalidHostnames', tlsinsecure: 'tlsInsecure', tlscafile: 'tlsCAFile', tlscertificatekeyfile: 'tlsCertificateKeyFile', tlscertificatekeyfilepassword: 'tlsCertificateKeyFilePassword', wtimeout: 'wTimeoutMS', j: 'journal' }; /** * Sets the value for `key`, allowing for any required translation * * @param {object} obj The object to set the key on * @param {string} key The key to set the value for * @param {*} value The value to set * @param {object} options The options used for option parsing */ function applyConnectionStringOption(obj, key, value, options) { // simple key translation if (key === 'journal') { key = 'j'; } else if (key === 'wtimeoutms') { key = 'wtimeout'; } // more complicated translation if (BOOLEAN_OPTIONS.has(key)) { value = value === 'true' || value === true; } else if (key === 'appname') { value = decodeURIComponent(value); } else if (key === 'readconcernlevel') { obj['readConcernLevel'] = value; key = 'readconcern'; value = { level: value }; } // simple validation if (key === 'compressors') { value = Array.isArray(value) ? value : [value]; if (!value.every(c => c === 'snappy' || c === 'zlib')) { throw new MongoParseError( 'Value for `compressors` must be at least one of: `snappy`, `zlib`' ); } } if (key === 'authmechanism' && !AUTH_MECHANISMS.has(value)) { throw new MongoParseError( 'Value for `authMechanism` must be one of: `DEFAULT`, `GSSAPI`, `PLAIN`, `MONGODB-X509`, `SCRAM-SHA-1`, `SCRAM-SHA-256`' ); } if (key === 'readpreference' && !ReadPreference.isValid(value)) { throw new MongoParseError( 'Value for `readPreference` must be one of: `primary`, `primaryPreferred`, `secondary`, `secondaryPreferred`, `nearest`' ); } if (key === 'zlibcompressionlevel' && (value < -1 || value > 9)) { throw new MongoParseError('zlibCompressionLevel must be an integer between -1 and 9'); } // special cases if (key === 'compressors' || key === 'zlibcompressionlevel') { obj.compression = obj.compression || {}; obj = obj.compression; } if (key === 'authmechanismproperties') { if (typeof value.SERVICE_NAME === 'string') obj.gssapiServiceName = value.SERVICE_NAME; if (typeof value.SERVICE_REALM === 'string') obj.gssapiServiceRealm = value.SERVICE_REALM; if (typeof value.CANONICALIZE_HOST_NAME !== 'undefined') { obj.gssapiCanonicalizeHostName = value.CANONICALIZE_HOST_NAME; } } if (key === 'readpreferencetags' && Array.isArray(value)) { value = splitArrayOfMultipleReadPreferenceTags(value); } // set the actual value if (options.caseTranslate && CASE_TRANSLATION[key]) { obj[CASE_TRANSLATION[key]] = value; return; } obj[key] = value; } const USERNAME_REQUIRED_MECHANISMS = new Set([ 'GSSAPI', 'MONGODB-CR', 'PLAIN', 'SCRAM-SHA-1', 'SCRAM-SHA-256' ]); function splitArrayOfMultipleReadPreferenceTags(value) { const parsedTags = []; for (let i = 0; i < value.length; i++) { parsedTags[i] = {}; value[i].split(',').forEach(individualTag => { const splitTag = individualTag.split(':'); parsedTags[i][splitTag[0]] = splitTag[1]; }); } return parsedTags; } /** * Modifies the parsed connection string object taking into account expectations we * have for authentication-related options. * * @param {object} parsed The parsed connection string result * @return The parsed connection string result possibly modified for auth expectations */ function applyAuthExpectations(parsed) { if (parsed.options == null) { return; } const options = parsed.options; const authSource = options.authsource || options.authSource; if (authSource != null) { parsed.auth = Object.assign({}, parsed.auth, { db: authSource }); } const authMechanism = options.authmechanism || options.authMechanism; if (authMechanism != null) { if ( USERNAME_REQUIRED_MECHANISMS.has(authMechanism) && (!parsed.auth || parsed.auth.username == null) ) { throw new MongoParseError(`Username required for mechanism \`${authMechanism}\``); } if (authMechanism === 'GSSAPI') { if (authSource != null && authSource !== '$external') { throw new MongoParseError( `Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.` ); } parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } if (authMechanism === 'MONGODB-X509') { if (parsed.auth && parsed.auth.password != null) { throw new MongoParseError(`Password not allowed for mechanism \`${authMechanism}\``); } if (authSource != null && authSource !== '$external') { throw new MongoParseError( `Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.` ); } parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } if (authMechanism === 'PLAIN') { if (parsed.auth && parsed.auth.db == null) { parsed.auth = Object.assign({}, parsed.auth, { db: '$external' }); } } } // default to `admin` if nothing else was resolved if (parsed.auth && parsed.auth.db == null) { parsed.auth = Object.assign({}, parsed.auth, { db: 'admin' }); } return parsed; } /** * Parses a query string according the connection string spec. * * @param {String} query The query string to parse * @param {object} [options] The options used for options parsing * @return {Object|Error} The parsed query string as an object, or an error if one was encountered */ function parseQueryString(query, options) { const result = {}; let parsedQueryString = qs.parse(query); checkTLSOptions(parsedQueryString); for (const key in parsedQueryString) { const value = parsedQueryString[key]; if (value === '' || value == null) { throw new MongoParseError('Incomplete key value pair for option'); } const normalizedKey = key.toLowerCase(); const parsedValue = parseQueryStringItemValue(normalizedKey, value); applyConnectionStringOption(result, normalizedKey, parsedValue, options); } // special cases for known deprecated options if (result.wtimeout && result.wtimeoutms) { delete result.wtimeout; console.warn('Unsupported option `wtimeout` specified'); } return Object.keys(result).length ? result : null; } /** * Checks a query string for invalid tls options according to the URI options spec. * * @param {string} queryString The query string to check * @throws {MongoParseError} */ function checkTLSOptions(queryString) { const queryStringKeys = Object.keys(queryString); if ( queryStringKeys.indexOf('tlsInsecure') !== -1 && (queryStringKeys.indexOf('tlsAllowInvalidCertificates') !== -1 || queryStringKeys.indexOf('tlsAllowInvalidHostnames') !== -1) ) { throw new MongoParseError( 'The `tlsInsecure` option cannot be used with `tlsAllowInvalidCertificates` or `tlsAllowInvalidHostnames`.' ); } const tlsValue = assertTlsOptionsAreEqual('tls', queryString, queryStringKeys); const sslValue = assertTlsOptionsAreEqual('ssl', queryString, queryStringKeys); if (tlsValue != null && sslValue != null) { if (tlsValue !== sslValue) { throw new MongoParseError('All values of `tls` and `ssl` must be the same.'); } } } /** * Checks a query string to ensure all tls/ssl options are the same. * * @param {string} key The key (tls or ssl) to check * @param {string} queryString The query string to check * @throws {MongoParseError} * @return The value of the tls/ssl option */ function assertTlsOptionsAreEqual(optionName, queryString, queryStringKeys) { const queryStringHasTLSOption = queryStringKeys.indexOf(optionName) !== -1; let optionValue; if (Array.isArray(queryString[optionName])) { optionValue = queryString[optionName][0]; } else { optionValue = queryString[optionName]; } if (queryStringHasTLSOption) { if (Array.isArray(queryString[optionName])) { const firstValue = queryString[optionName][0]; queryString[optionName].forEach(tlsValue => { if (tlsValue !== firstValue) { throw new MongoParseError('All values of ${optionName} must be the same.'); } }); } } return optionValue; } const PROTOCOL_MONGODB = 'mongodb'; const PROTOCOL_MONGODB_SRV = 'mongodb+srv'; const SUPPORTED_PROTOCOLS = [PROTOCOL_MONGODB, PROTOCOL_MONGODB_SRV]; /** * Parses a MongoDB connection string * * @param {*} uri the MongoDB connection string to parse * @param {object} [options] Optional settings. * @param {boolean} [options.caseTranslate] Whether the parser should translate options back into camelCase after normalization * @param {parseCallback} callback */ function parseConnectionString(uri, options, callback) { if (typeof options === 'function') (callback = options), (options = {}); options = Object.assign({}, { caseTranslate: true }, options); // Check for bad uris before we parse try { URL.parse(uri); } catch (e) { return callback(new MongoParseError('URI malformed, cannot be parsed')); } const cap = uri.match(HOSTS_RX); if (!cap) { return callback(new MongoParseError('Invalid connection string')); } const protocol = cap[1]; if (SUPPORTED_PROTOCOLS.indexOf(protocol) === -1) { return callback(new MongoParseError('Invalid protocol provided')); } if (protocol === PROTOCOL_MONGODB_SRV) { return parseSrvConnectionString(uri, options, callback); } const dbAndQuery = cap[4].split('?'); const db = dbAndQuery.length > 0 ? dbAndQuery[0] : null; const query = dbAndQuery.length > 1 ? dbAndQuery[1] : null; let parsedOptions; try { parsedOptions = parseQueryString(query, options); } catch (parseError) { return callback(parseError); } parsedOptions = Object.assign({}, parsedOptions, options); const auth = { username: null, password: null, db: db && db !== '' ? qs.unescape(db) : null }; if (parsedOptions.auth) { // maintain support for legacy options passed into `MongoClient` if (parsedOptions.auth.username) auth.username = parsedOptions.auth.username; if (parsedOptions.auth.user) auth.username = parsedOptions.auth.user; if (parsedOptions.auth.password) auth.password = parsedOptions.auth.password; } else { if (parsedOptions.username) auth.username = parsedOptions.username; if (parsedOptions.user) auth.username = parsedOptions.user; if (parsedOptions.password) auth.password = parsedOptions.password; } if (cap[4].split('?')[0].indexOf('@') !== -1) { return callback(new MongoParseError('Unescaped slash in userinfo section')); } const authorityParts = cap[3].split('@'); if (authorityParts.length > 2) { return callback(new MongoParseError('Unescaped at-sign in authority section')); } if (authorityParts.length > 1) { const authParts = authorityParts.shift().split(':'); if (authParts.length > 2) { return callback(new MongoParseError('Unescaped colon in authority section')); } if (!auth.username) auth.username = qs.unescape(authParts[0]); if (!auth.password) auth.password = authParts[1] ? qs.unescape(authParts[1]) : null; } let hostParsingError = null; const hosts = authorityParts .shift() .split(',') .map(host => { let parsedHost = URL.parse(`mongodb://${host}`); if (parsedHost.path === '/:') { hostParsingError = new MongoParseError('Double colon in host identifier'); return null; } // heuristically determine if we're working with a domain socket if (host.match(/\.sock/)) { parsedHost.hostname = qs.unescape(host); parsedHost.port = null; } if (Number.isNaN(parsedHost.port)) { hostParsingError = new MongoParseError('Invalid port (non-numeric string)'); return; } const result = { host: parsedHost.hostname, port: parsedHost.port ? parseInt(parsedHost.port) : 27017 }; if (result.port === 0) { hostParsingError = new MongoParseError('Invalid port (zero) with hostname'); return; } if (result.port > 65535) { hostParsingError = new MongoParseError('Invalid port (larger than 65535) with hostname'); return; } if (result.port < 0) { hostParsingError = new MongoParseError('Invalid port (negative number)'); return; } return result; }) .filter(host => !!host); if (hostParsingError) { return callback(hostParsingError); } if (hosts.length === 0 || hosts[0].host === '' || hosts[0].host === null) { return callback(new MongoParseError('No hostname or hostnames provided in connection string')); } const result = { hosts: hosts, auth: auth.db || auth.username ? auth : null, options: Object.keys(parsedOptions).length ? parsedOptions : null }; if (result.auth && result.auth.db) { result.defaultDatabase = result.auth.db; } else { result.defaultDatabase = 'test'; } try { applyAuthExpectations(result); } catch (authError) { return callback(authError); } callback(null, result); } module.exports = parseConnectionString;
1
16,461
is this a breaking change? If I'm not mistaken, this is translating a user-provided `wtimeout` in the connection string into `wTimeoutMS`
mongodb-node-mongodb-native
js
@@ -0,0 +1,7 @@ +import json + + +def lambda_handler(event, context): + # Just print the event was passed to lambda + print('{}'.format(json.dumps(event))) + return 0
1
1
10,657
Can we rename this file to `lambda_echo.py`, to be more descriptive and in line with the other files in this directory? (all files in this folder are lambda functions, after all)
localstack-localstack
py
@@ -33,9 +33,10 @@ type Seccomp struct { type Action int const ( - Kill Action = iota - 4 + Kill Action = iota - 5 Errno Trap + Trace Allow )
1
package configs import ( "bytes" "encoding/json" "os/exec" ) type Rlimit struct { Type int `json:"type"` Hard uint64 `json:"hard"` Soft uint64 `json:"soft"` } // IDMap represents UID/GID Mappings for User Namespaces. type IDMap struct { ContainerID int `json:"container_id"` HostID int `json:"host_id"` Size int `json:"size"` } // Seccomp represents syscall restrictions // By default, only the native architecture of the kernel is allowed to be used // for syscalls. Additional architectures can be added by specifying them in // Architectures. type Seccomp struct { DefaultAction Action `json:"default_action"` Architectures []string `json:"architectures"` Syscalls []*Syscall `json:"syscalls"` } // An action to be taken upon rule match in Seccomp type Action int const ( Kill Action = iota - 4 Errno Trap Allow ) // A comparison operator to be used when matching syscall arguments in Seccomp type Operator int const ( EqualTo Operator = iota NotEqualTo GreaterThan GreaterThanOrEqualTo LessThan LessThanOrEqualTo MaskEqualTo ) // A rule to match a specific syscall argument in Seccomp type Arg struct { Index uint `json:"index"` Value uint64 `json:"value"` ValueTwo uint64 `json:"value_two"` Op Operator `json:"op"` } // An rule to match a syscall in Seccomp type Syscall struct { Name string `json:"name"` Action Action `json:"action"` Args []*Arg `json:"args"` } // TODO Windows. Many of these fields should be factored out into those parts // which are common across platforms, and those which are platform specific. // Config defines configuration options for executing a process inside a contained environment. type Config struct { // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs // This is a common option when the container is running in ramdisk NoPivotRoot bool `json:"no_pivot_root"` // ParentDeathSignal specifies the signal that is sent to the container's process in the case // that the parent process dies. ParentDeathSignal int `json:"parent_death_signal"` // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set. // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable. // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot. PivotDir string `json:"pivot_dir"` // Path to a directory containing the container's root filesystem. Rootfs string `json:"rootfs"` // Readonlyfs will remount the container's rootfs as readonly where only externally mounted // bind mounts are writtable. Readonlyfs bool `json:"readonlyfs"` // Specifies the mount propagation flags to be applied to /. RootPropagation int `json:"rootPropagation"` // Mounts specify additional source and destination paths that will be mounted inside the container's // rootfs and mount namespace if specified Mounts []*Mount `json:"mounts"` // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! Devices []*Device `json:"devices"` MountLabel string `json:"mount_label"` // Hostname optionally sets the container's hostname if provided Hostname string `json:"hostname"` // Namespaces specifies the container's namespaces that it should setup when cloning the init process // If a namespace is not provided that namespace is shared from the container's parent process Namespaces Namespaces `json:"namespaces"` // Capabilities specify the capabilities to keep when executing the process inside the container // All capbilities not specified will be dropped from the processes capability mask Capabilities []string `json:"capabilities"` // Networks specifies the container's network setup to be created Networks []*Network `json:"networks"` // Routes can be specified to create entries in the route table as the container is started Routes []*Route `json:"routes"` // Cgroups specifies specific cgroup settings for the various subsystems that the container is // placed into to limit the resources the container has available Cgroups *Cgroup `json:"cgroups"` // AppArmorProfile specifies the profile to apply to the process running in the container and is // change at the time the process is execed AppArmorProfile string `json:"apparmor_profile"` // ProcessLabel specifies the label to apply to the process running in the container. It is // commonly used by selinux ProcessLabel string `json:"process_label"` // Rlimits specifies the resource limits, such as max open files, to set in the container // If Rlimits are not set, the container will inherit rlimits from the parent process Rlimits []Rlimit `json:"rlimits"` // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores // for a process. Valid values are between the range [-1000, '1000'], where processes with // higher scores are preferred for being killed. // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ OomScoreAdj int `json:"oom_score_adj"` // AdditionalGroups specifies the gids that should be added to supplementary groups // in addition to those that the user belongs to. AdditionalGroups []string `json:"additional_groups"` // UidMappings is an array of User ID mappings for User Namespaces UidMappings []IDMap `json:"uid_mappings"` // GidMappings is an array of Group ID mappings for User Namespaces GidMappings []IDMap `json:"gid_mappings"` // MaskPaths specifies paths within the container's rootfs to mask over with a bind // mount pointing to /dev/null as to prevent reads of the file. MaskPaths []string `json:"mask_paths"` // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only // so that these files prevent any writes. ReadonlyPaths []string `json:"readonly_paths"` // Sysctl is a map of properties and their values. It is the equivalent of using // sysctl -w my.property.name value in Linux. Sysctl map[string]string `json:"sysctl"` // Seccomp allows actions to be taken whenever a syscall is made within the container. // A number of rules are given, each having an action to be taken if a syscall matches it. // A default action to be taken if no rules match is also given. Seccomp *Seccomp `json:"seccomp"` // Hooks are a collection of actions to perform at various container lifecycle events. // Hooks are not able to be marshaled to json but they are also not needed to. Hooks *Hooks `json:"-"` // Version is the version of opencontainer specification that is supported. Version string `json:"version"` } type Hooks struct { // Prestart commands are executed after the container namespaces are created, // but before the user supplied command is executed from init. Prestart []Hook // Poststop commands are executed after the container init process exits. Poststop []Hook } // HookState is the payload provided to a hook on execution. type HookState struct { Version string `json:"version"` ID string `json:"id"` Pid int `json:"pid"` Root string `json:"root"` } type Hook interface { // Run executes the hook with the provided state. Run(HookState) error } // NewFunctionHooks will call the provided function when the hook is run. func NewFunctionHook(f func(HookState) error) FuncHook { return FuncHook{ run: f, } } type FuncHook struct { run func(HookState) error } func (f FuncHook) Run(s HookState) error { return f.run(s) } type Command struct { Path string `json:"path"` Args []string `json:"args"` Env []string `json:"env"` Dir string `json:"dir"` } // NewCommandHooks will execute the provided command when the hook is run. func NewCommandHook(cmd Command) CommandHook { return CommandHook{ Command: cmd, } } type CommandHook struct { Command } func (c Command) Run(s HookState) error { b, err := json.Marshal(s) if err != nil { return err } cmd := exec.Cmd{ Path: c.Path, Args: c.Args, Env: c.Env, Stdin: bytes.NewReader(b), } return cmd.Run() }
1
8,588
why not do it like this ? <pre><code> const ( Kill Action = iota Errno Trap Allow Trace ) <pre><code>
opencontainers-runc
go
@@ -265,6 +265,7 @@ namespace NLog.Internal.FileAppenders /// <param name="expireTime">The time which prior the appenders considered expired</param> public void CloseAppenders(DateTime expireTime) { + lock(this) for (int i = 0; i < this.appenders.Length; ++i) { if (this.appenders[i] == null)
1
// // Copyright (c) 2004-2016 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Internal.FileAppenders { using System; using System.IO; /// <summary> /// Maintains a collection of file appenders usually associated with file targets. /// </summary> internal sealed class FileAppenderCache { private BaseFileAppender[] appenders; #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ private string archiveFilePatternToWatch = null; private bool logFileWasArchived = false; private readonly MultiFileWatcher externalFileArchivingWatcher = new MultiFileWatcher(NotifyFilters.FileName); #endif /// <summary> /// An "empty" instance of the <see cref="FileAppenderCache"/> class with zero size and empty list of appenders. /// </summary> public static readonly FileAppenderCache Empty = new FileAppenderCache(); /// <summary> /// Initializes a new "empty" instance of the <see cref="FileAppenderCache"/> class with zero size and empty /// list of appenders. /// </summary> private FileAppenderCache() : this(0, null, null) { } /// <summary> /// Initializes a new instance of the <see cref="FileAppenderCache"/> class. /// </summary> /// <remarks> /// The size of the list should be positive. No validations are performed during initialisation as it is an /// intenal class. /// </remarks> /// <param name="size">Total number of appenders allowed in list.</param> /// <param name="appenderFactory">Factory used to create each appender.</param> /// <param name="createFileParams">Parameters used for creating a file.</param> public FileAppenderCache(int size, IFileAppenderFactory appenderFactory, ICreateFileParameters createFileParams) { Size = size; Factory = appenderFactory; CreateFileParameters = createFileParams; appenders = new BaseFileAppender[Size]; #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ externalFileArchivingWatcher.OnChange += ExternalFileArchivingWatcher_OnChange; #endif } #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ private void ExternalFileArchivingWatcher_OnChange(object sender, FileSystemEventArgs e) { if ((e.ChangeType & WatcherChangeTypes.Created) == WatcherChangeTypes.Created) logFileWasArchived = true; } /// <summary> /// The archive file path pattern that is used to detect when archiving occurs. /// </summary> public string ArchiveFilePatternToWatch { get { return archiveFilePatternToWatch; } set { if (archiveFilePatternToWatch != value) { archiveFilePatternToWatch = value; logFileWasArchived = false; externalFileArchivingWatcher.StopWatching(); } } } /// <summary> /// Invalidates appenders for all files that were archived. /// </summary> public void InvalidateAppendersForInvalidFiles() { if (logFileWasArchived) { CloseAppenders(); logFileWasArchived = false; } } #endif /// <summary> /// Gets the parameters which will be used for creating a file. /// </summary> public ICreateFileParameters CreateFileParameters { get; private set; } /// <summary> /// Gets the file appender factory used by all the appenders in this list. /// </summary> public IFileAppenderFactory Factory { get; private set; } /// <summary> /// Gets the number of appenders which the list can hold. /// </summary> public int Size { get; private set; } /// <summary> /// It allocates the first slot in the list when the file name does not already in the list and clean up any /// unused slots. /// </summary> /// <param name="fileName">File name associated with a single appender.</param> /// <returns>The allocated appender.</returns> /// <exception cref="NullReferenceException"> /// Thrown when <see cref="M:AllocateAppender"/> is called on an <c>Empty</c><see cref="FileAppenderCache"/> instance. /// </exception> public BaseFileAppender AllocateAppender(string fileName) { // // BaseFileAppender.Write is the most expensive operation here // so the in-memory data structure doesn't have to be // very sophisticated. It's a table-based LRU, where we move // the used element to become the first one. // The number of items is usually very limited so the // performance should be equivalent to the one of the hashtable. // BaseFileAppender appenderToWrite = null; int freeSpot = appenders.Length - 1; for (int i = 0; i < appenders.Length; ++i) { // Use empty slot in recent appender list, if there is one. if (appenders[i] == null) { freeSpot = i; break; } if (appenders[i].FileName == fileName) { // found it, move it to the first place on the list // (MRU) // file open has a chance of failure // if it fails in the constructor, we won't modify any data structures BaseFileAppender app = appenders[i]; for (int j = i; j > 0; --j) { appenders[j] = appenders[j - 1]; } appenders[0] = app; appenderToWrite = app; break; } } if (appenderToWrite == null) { BaseFileAppender newAppender = Factory.Open(fileName, CreateFileParameters); if (appenders[freeSpot] != null) { CloseAppender(appenders[freeSpot]); appenders[freeSpot] = null; } for (int j = freeSpot; j > 0; --j) { appenders[j] = appenders[j - 1]; } appenders[0] = newAppender; appenderToWrite = newAppender; #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ if (!string.IsNullOrEmpty(archiveFilePatternToWatch)) { var archiveFilePatternToWatchPath = GetFullPathForPattern(archiveFilePatternToWatch); string directoryPath = Path.GetDirectoryName(archiveFilePatternToWatchPath); if (!Directory.Exists(directoryPath)) Directory.CreateDirectory(directoryPath); externalFileArchivingWatcher.Watch(archiveFilePatternToWatchPath); } #endif } return appenderToWrite; } /// <summary> /// Get fullpath for a relative file pattern, e.g *.log /// <see cref="Path.GetFullPath"/> crashes on patterns: ArgumentException: Illegal characters in path. /// </summary> /// <param name="pattern"></param> /// <returns></returns> private static string GetFullPathForPattern(string pattern) { string filePattern = Path.GetFileName(pattern); string dir = pattern.Substring(0, pattern.Length - filePattern.Length); // Get absolute path (root+relative) if (string.IsNullOrEmpty(dir)) { dir = "."; } return Path.Combine(Path.GetFullPath(dir), filePattern); } /// <summary> /// Close all the allocated appenders. /// </summary> public void CloseAppenders() { if (appenders != null) { for (int i = 0; i < appenders.Length; ++i) { if (appenders[i] == null) { break; } CloseAppender(appenders[i]); appenders[i] = null; } } } /// <summary> /// Close the allocated appenders initialised before the supplied time. /// </summary> /// <param name="expireTime">The time which prior the appenders considered expired</param> public void CloseAppenders(DateTime expireTime) { for (int i = 0; i < this.appenders.Length; ++i) { if (this.appenders[i] == null) { break; } if (this.appenders[i].OpenTime < expireTime) { for (int j = i; j < this.appenders.Length; ++j) { if (this.appenders[j] == null) { break; } CloseAppender(this.appenders[j]); this.appenders[j] = null; } break; } } } /// <summary> /// Fluch all the allocated appenders. /// </summary> public void FlushAppenders() { foreach (BaseFileAppender appender in appenders) { if (appender == null) { break; } appender.Flush(); } } /// <summary> /// Gets the file info for a particular appender. /// </summary> /// <param name="fileName">The file name associated with a particular appender.</param> /// <returns>The file characteristics, if the file information was retrieved successfully, otherwise null.</returns> public FileCharacteristics GetFileCharacteristics(string fileName) { foreach (BaseFileAppender appender in appenders) { if (appender == null) break; if (appender.FileName == fileName) return appender.GetFileCharacteristics(); } return null; } /// <summary> /// Closes the specified appender and removes it from the list. /// </summary> /// <param name="fileName">File name of the appender to be closed.</param> public void InvalidateAppender(string fileName) { for (int i = 0; i < appenders.Length; ++i) { if (appenders[i] == null) { break; } if (appenders[i].FileName == fileName) { CloseAppender(appenders[i]); for (int j = i; j < appenders.Length - 1; ++j) { appenders[j] = appenders[j + 1]; } appenders[appenders.Length - 1] = null; break; } } } private void CloseAppender(BaseFileAppender appender) { appender.Close(); #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ externalFileArchivingWatcher.StopWatching(); #endif } } }
1
13,087
lock(this) is a bad practise, since you lock the entire instance. cosider: var appenderCache = xxxx. lock(appenderCache) { Thread.Sleep(TimeSpan.FromHours(5)); } while inside AppenderCache instance lock(this) { is blocked 5 hours. } Its always better to make a class private lock objects.
NLog-NLog
.cs
@@ -135,7 +135,10 @@ func WithClientInfo(serviceName string, procedures []transport.Procedure, transp &ClientInfo{ clientDispatcher.ClientConfig(serviceName), grpcClientConn, - grpcheader.NewContextWrapper().WithCaller(serviceName + "-client").WithService(serviceName), + grpcheader.NewContextWrapper(). + WithCaller(serviceName + "-client"). + WithService(serviceName). + WithEncoding("proto"), }, ) }
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package testutils import ( "fmt" "net" "strconv" "go.uber.org/yarpc" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/transport/http" "go.uber.org/yarpc/transport/tchannel" "go.uber.org/yarpc/transport/x/grpc" "go.uber.org/yarpc/transport/x/grpc/grpcheader" "go.uber.org/multierr" ggrpc "google.golang.org/grpc" ) const ( // TransportTypeHTTP represents using HTTP. TransportTypeHTTP TransportType = iota // TransportTypeTChannel represents using TChannel. TransportTypeTChannel // TransportTypeGRPC represents using GRPC. TransportTypeGRPC ) var ( // AllTransportTypes are all TransportTypes, AllTransportTypes = []TransportType{ TransportTypeHTTP, TransportTypeTChannel, TransportTypeGRPC, } ) // TransportType is a transport type. type TransportType int // String returns a string representation of t. func (t TransportType) String() string { switch t { case TransportTypeHTTP: return "http" case TransportTypeTChannel: return "tchannel" case TransportTypeGRPC: return "grpc" default: return strconv.Itoa(int(t)) } } // ParseTransportType parses a transport type from a string. func ParseTransportType(s string) (TransportType, error) { switch s { case "http": return TransportTypeHTTP, nil case "tchannel": return TransportTypeTChannel, nil case "grpc": return TransportTypeGRPC, nil default: return 0, fmt.Errorf("invalid TransportType: %s", s) } } // ClientInfo holds the client info for testing. type ClientInfo struct { ClientConfig transport.ClientConfig GRPCClientConn *ggrpc.ClientConn ContextWrapper *grpcheader.ContextWrapper } // WithClientInfo wraps a function by setting up a client and server dispatcher and giving // the function the client configuration to use in tests for the given TransportType. // // The server dispatcher will be brought up using all TransportTypes and with the serviceName. // The client dispatcher will be brought up using the given TransportType for Unary, HTTP for // Oneway, and the serviceName with a "-client" suffix. func WithClientInfo(serviceName string, procedures []transport.Procedure, transportType TransportType, f func(*ClientInfo) error) (err error) { dispatcherConfig, err := NewDispatcherConfig(serviceName) if err != nil { return err } serverDispatcher, err := NewServerDispatcher(procedures, dispatcherConfig) if err != nil { return err } clientDispatcher, err := NewClientDispatcher(transportType, dispatcherConfig) if err != nil { return err } if err := serverDispatcher.Start(); err != nil { return err } defer func() { err = multierr.Append(err, serverDispatcher.Stop()) }() if err := clientDispatcher.Start(); err != nil { return err } defer func() { err = multierr.Append(err, clientDispatcher.Stop()) }() grpcPort, err := dispatcherConfig.GetPort(TransportTypeGRPC) if err != nil { return err } grpcClientConn, err := ggrpc.Dial(fmt.Sprintf("127.0.0.1:%d", grpcPort), ggrpc.WithInsecure()) if err != nil { return err } return f( &ClientInfo{ clientDispatcher.ClientConfig(serviceName), grpcClientConn, grpcheader.NewContextWrapper().WithCaller(serviceName + "-client").WithService(serviceName), }, ) } // NewClientDispatcher returns a new client Dispatcher. // // HTTP always will be configured as an outbound for Oneway unless using TransportTypeGRPC. func NewClientDispatcher(transportType TransportType, config *DispatcherConfig) (*yarpc.Dispatcher, error) { port, err := config.GetPort(transportType) if err != nil { return nil, err } httpPort, err := config.GetPort(TransportTypeHTTP) if err != nil { return nil, err } var onewayOutbound transport.OnewayOutbound var unaryOutbound transport.UnaryOutbound switch transportType { case TransportTypeTChannel: tchannelTransport, err := tchannel.NewChannelTransport(tchannel.ServiceName(config.GetServiceName())) if err != nil { return nil, err } onewayOutbound = http.NewTransport().NewSingleOutbound(fmt.Sprintf("http://127.0.0.1:%d", httpPort)) unaryOutbound = tchannelTransport.NewSingleOutbound(fmt.Sprintf("127.0.0.1:%d", port)) case TransportTypeHTTP: httpOutbound := http.NewTransport().NewSingleOutbound(fmt.Sprintf("http://127.0.0.1:%d", port)) onewayOutbound = httpOutbound unaryOutbound = httpOutbound case TransportTypeGRPC: onewayOutbound = http.NewTransport().NewSingleOutbound(fmt.Sprintf("http://127.0.0.1:%d", httpPort)) unaryOutbound = grpc.NewTransport().NewSingleOutbound(fmt.Sprintf("127.0.0.1:%d", port)) default: return nil, fmt.Errorf("invalid TransportType: %v", transportType) } return yarpc.NewDispatcher( yarpc.Config{ Name: fmt.Sprintf("%s-client", config.GetServiceName()), Outbounds: yarpc.Outbounds{ config.GetServiceName(): { Oneway: onewayOutbound, Unary: unaryOutbound, }, }, }, ), nil } // NewServerDispatcher returns a new server Dispatcher. func NewServerDispatcher(procedures []transport.Procedure, config *DispatcherConfig) (*yarpc.Dispatcher, error) { tchannelPort, err := config.GetPort(TransportTypeTChannel) if err != nil { return nil, err } httpPort, err := config.GetPort(TransportTypeHTTP) if err != nil { return nil, err } grpcPort, err := config.GetPort(TransportTypeGRPC) if err != nil { return nil, err } tchannelTransport, err := tchannel.NewChannelTransport( tchannel.ServiceName(config.GetServiceName()), tchannel.ListenAddr(fmt.Sprintf("127.0.0.1:%d", tchannelPort)), ) if err != nil { return nil, err } grpcListener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", grpcPort)) if err != nil { return nil, err } dispatcher := yarpc.NewDispatcher( yarpc.Config{ Name: config.GetServiceName(), Inbounds: yarpc.Inbounds{ tchannelTransport.NewInbound(), http.NewTransport().NewInbound(fmt.Sprintf("127.0.0.1:%d", httpPort)), grpc.NewTransport().NewInbound(grpcListener), }, }, ) dispatcher.Register(procedures) return dispatcher, nil } // DispatcherConfig is the configuration for a Dispatcher. type DispatcherConfig struct { serviceName string transportTypeToPort map[TransportType]uint16 } // NewDispatcherConfig returns a new DispatcherConfig with assigned ports. func NewDispatcherConfig(serviceName string) (*DispatcherConfig, error) { transportTypeToPort, err := getTransportTypeToPort() if err != nil { return nil, err } return &DispatcherConfig{ serviceName, transportTypeToPort, }, nil } // GetServiceName gets the service name. func (d *DispatcherConfig) GetServiceName() string { return d.serviceName } // GetPort gets the port for the TransportType. func (d *DispatcherConfig) GetPort(transportType TransportType) (uint16, error) { port, ok := d.transportTypeToPort[transportType] if !ok { return 0, fmt.Errorf("no port for TransportType %v", transportType) } return port, nil } func getTransportTypeToPort() (map[TransportType]uint16, error) { m := make(map[TransportType]uint16, len(AllTransportTypes)) for _, transportType := range AllTransportTypes { port, err := getFreePort() if err != nil { return nil, err } m[transportType] = port } return m, nil } func getFreePort() (uint16, error) { address, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { return 0, err } listener, err := net.ListenTCP("tcp", address) if err != nil { return 0, err } port := uint16(listener.Addr().(*net.TCPAddr).Port) if err := listener.Close(); err != nil { return 0, err } return port, nil }
1
13,714
should we use the constant above?
yarpc-yarpc-go
go
@@ -6979,8 +6979,8 @@ os_seek(file_t f, int64 offset, int origin) case OS_SEEK_END: { uint64 file_size = 0; - bool res = os_get_file_size_by_handle(f, &file_size); - ASSERT(res && "bad file handle?"); /* shouldn't fail */ + bool result = os_get_file_size_by_handle(f, &file_size); + ASSERT(result && "bad file handle?"); /* shouldn't fail */ abs_offset += file_size; } break;
1
/* ********************************************************** * Copyright (c) 2010-2017 Google, Inc. All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* * os.c - win32 specific routines */ #include "../globals.h" #include "../fragment.h" #include "../fcache.h" #include "ntdll.h" #include "os_private.h" #include "../nudge.h" #include "../moduledb.h" #include "../hotpatch.h" #ifdef DEBUG # include "../vmareas.h" #endif #include "../dispatch.h" #include "instrument.h" /* is_in_client_lib() */ #include <windows.h> #include <stddef.h> /* for offsetof */ #include "events.h" /* event log messages */ #include "aslr.h" #include "../synch.h" #include "../perscache.h" #include "../native_exec.h" #ifdef NOT_DYNAMORIO_CORE_PROPER # undef ASSERT # undef ASSERT_NOT_IMPLEMENTED # undef ASSERT_NOT_TESTED # undef ASSERT_CURIOSITY_ONCE # define ASSERT(x) /* nothing */ # define ASSERT_NOT_IMPLEMENTED(x) /* nothing */ # define ASSERT_NOT_TESTED(x) /* nothing */ # define ASSERT_CURIOSITY_ONCE(x) /* nothing */ # undef LOG # define LOG(x, ...) /* nothing */ #else /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ #ifdef DEBUG DECLARE_CXTSWPROT_VAR(static mutex_t snapshot_lock, INIT_LOCK_FREE(snapshot_lock)); #endif DECLARE_CXTSWPROT_VAR(static mutex_t dump_core_lock, INIT_LOCK_FREE(dump_core_lock)); DECLARE_CXTSWPROT_VAR(static mutex_t debugbox_lock, INIT_LOCK_FREE(debugbox_lock)); /* PR 601533: cleanup_and_terminate must release the initstack_mutex * prior to its final syscall, yet the wow64 syscall writes to the * stack b/c it makes a call, so we have a race that can lead to a * hang or worse. we do not expect the syscall to return, so we can * use a global single-entry stack (the wow64 layer swaps to a * different stack: presumably for alignment and other reasons). * We also use this for non-wow64, except on win8 wow64 where we need * a per-thread stack and we use the TEB. * We do leave room to store the 2 args to NtTerminateProcess for win8 wow64 * in case we can't get the target thread's TEB. */ #define WOW64_SYSCALL_SETUP_SIZE 3*XSP_SZ /* 2 args + retaddr of call to win8 wrapper */ /* 1 for call + 1 extra + setup */ #define WOW64_SYSCALL_STACK_SIZE 2*XSP_SZ + (WOW64_SYSCALL_SETUP_SIZE) DECLARE_NEVERPROT_VAR(static byte wow64_syscall_stack_array[WOW64_SYSCALL_STACK_SIZE], {0}); /* We point it several stack slots in for win8 setup */ const byte *wow64_syscall_stack = &wow64_syscall_stack_array[WOW64_SYSCALL_STACK_SIZE - WOW64_SYSCALL_SETUP_SIZE]; /* globals */ bool intercept_asynch = false; bool intercept_callbacks = false; /* we store here to enable TEB.ClientId.ProcessHandle as a spill slot */ process_id_t win32_pid = 0; /* we store here to enable TEB.ProcessEnvironmentBlock as a spill slot */ void *peb_ptr; static int os_version; static uint os_service_pack_major; static uint os_service_pack_minor; static const char *os_name; app_pc vsyscall_page_start = NULL; /* pc kernel will claim app is at while in syscall */ app_pc vsyscall_after_syscall = NULL; /* pc of the end of the syscall instr itself */ app_pc vsyscall_syscall_end_pc = NULL; /* atomic variable to prevent multiple threads from trying to detach at the same time */ DECLARE_CXTSWPROT_VAR(static volatile int dynamo_detaching_flag, LOCK_FREE_STATE); /* For Win10 this is what the call* in the syscall wrapper targets */ app_pc wow64_syscall_call_tgt; #ifdef PROFILE_RDTSC uint kilo_hertz; /* cpu clock speed */ #endif #define HEAP_INITIAL_SIZE 1024*1024 /* pc values delimiting dynamo dll image */ app_pc dynamo_dll_start = NULL; app_pc dynamo_dll_end = NULL; /* open-ended */ /* needed for randomizing DR library location */ static app_pc dynamo_dll_preferred_base = NULL; /* thread-local storage slots */ enum {TLS_UNINITIALIZED = (ushort) 0U}; static ushort tls_local_state_offs = TLS_UNINITIALIZED; /* we keep this cached for easy asm access */ static ushort tls_dcontext_offs = TLS_UNINITIALIZED; /* used for early inject */ app_pc parent_early_inject_address = NULL; /* dynamo.c fills in */ /* note that this is the early inject location we'll use for child processes * dr_early_injected_location is the location (if any) that the current * process was injected at */ static int early_inject_location = INJECT_LOCATION_Invalid; static app_pc early_inject_address = NULL; static app_pc ldrpLoadDll_address_not_NT = NULL; static app_pc ldrpLoadDll_address_NT = NULL; static app_pc ldrpLoadImportModule_address = NULL; dcontext_t *early_inject_load_helper_dcontext = NULL; static char cwd[MAXIMUM_PATH]; /* forwards */ static void get_system_basic_info(void); static bool is_child_in_thin_client(HANDLE process_handle); static const char* get_process_SID_string(void); static const PSID get_Everyone_SID(void); static const PSID get_process_owner_SID(void); static size_t get_allocation_size_ex(HANDLE process, byte *pc, byte **base_pc); static void os_take_over_init(void); static void os_take_over_exit(void); bool os_user_directory_supports_ownership(void); /* Safely gets the target of the call (assumed to be direct) to the nth stack * frame (i.e. the entry point to that function), returns NULL on failure. * NOTE - Be aware this routine may be called by DllMain before dr is * initialized (before even syscalls_init, though note that safe_read should * be fine as will just use the nt wrapper). */ static app_pc get_nth_stack_frames_call_target(int num_frames, reg_t *ebp) { reg_t *cur_ebp = ebp; reg_t next_frame[2]; int i; /* walk up the stack */ for (i = 0; i < num_frames; i++) { if (!safe_read(cur_ebp, sizeof(next_frame), next_frame)) break; cur_ebp = (reg_t *)next_frame[0]; } if (i == num_frames) { /* success walking frames, return address should be the after * call address of the call that targeted this frame */ /* FIXME - would be nice to get this with decode_cti, but dr might * not even be initialized yet and this is safer */ byte buf[5]; /* sizeof call rel32 */ if (safe_read((byte *)(next_frame[1] - sizeof(buf)), sizeof(buf), &buf) && buf[0] == CALL_REL32_OPCODE) { app_pc return_point = (app_pc)next_frame[1]; return (return_point + *(int *)&buf[1]); } } return NULL; } /* Should be called from NtMapViewOfSection interception with *base * pointing to the just mapped region. */ void check_for_ldrpLoadImportModule(byte *base, uint *ebp) { MEMORY_BASIC_INFORMATION mbi; if (query_virtual_memory(base, &mbi, sizeof(mbi)) == sizeof(mbi) && mbi.Type == MEM_IMAGE && is_readable_pe_base(base)) { /* loaded a module, check name */ const char *name; bool match = false; name = get_dll_short_name(base); /* we only need pe name */ if (name != NULL) { LOG(GLOBAL, LOG_TOP, 1, "early_inject hit mapview of image %s\n", name); string_option_read_lock(); /* we control both the pe_name and the option value so use * strcmp (vs. strcasecmp), just to keep things tight */ match = strcmp(DYNAMO_OPTION(early_inject_helper_name), name) == 0; string_option_read_unlock(); } if (match) { /* Found it. We expect the stack to look like this * (in NtMapViewOfSection) * ntdll!LdrpMapDll * ntdll!LdrpLoadImportModule (what we want) * After that don't really care (is one of the * Ldrp*ImportDescriptor* routines. So we walk the * stack back and get the desired address. * FIXME - would be nice if we had some way to double * check this address, could try to decode and check against * the versions we've seen. * Note that NtMapViewOfSection in all its various platform forms * (i.e. int, vsyscall, KiFastSystemCall etc.) doesn't set up a * new frame (nor do its callees) so will always be depth 2 */ #define STACK_DEPTH_LdrpLoadImportModule 2 ldrpLoadImportModule_address = get_nth_stack_frames_call_target(STACK_DEPTH_LdrpLoadImportModule, (reg_t *)ebp); LOG(GLOBAL, LOG_TOP, 1, "early_inject found address "PFX" for LdrpLoadImportModule\n", ldrpLoadImportModule_address); } } } /**************************************************************************** ** DllMain Routines ** **/ #ifdef INTERNAL /* we have interp not inline calls to this routine */ void DllMainThreadAttach() { if (INTERNAL_OPTION(noasynch) && dynamo_initialized && !dynamo_exited) { /* we normally intercept thread creation in callback.c, but with * noasynch, we do it here (which is later, but better than nothing) */ LOG(GLOBAL, LOG_TOP|LOG_THREADS, 1, "DllMain: initializing new thread "TIDFMT"\n", get_thread_id()); dynamo_thread_init(NULL, NULL _IF_CLIENT_INTERFACE(false)); } } #endif /* Hand-made DO_ONCE since DllMain is executed prior to DR init */ DECLARE_FREQPROT_VAR(static bool do_once_DllMain, false); /* DLL entry point * N.B.: dynamo interprets this routine! */ /* get_nth_stack_frames_call_target() assumes that DllMain has a frame pointer * so we cannot optimize it (i#566) */ START_DO_NOT_OPTIMIZE bool WINAPI /* get link warning 4216 if export via APIENTRY */ DllMain(HANDLE hModule, DWORD reason_for_call, LPVOID Reserved) { switch (reason_for_call) { case DLL_PROCESS_ATTACH: /* case 8097: with -no_hide, DllMain will be called a second time * after all the statically-bound dlls are loaded (the loader * blindly calls all the init routines regardless of whether a dll * was explicitly loaded and already had its init routine called). * We make that 2nd time a nop via a custom DO_ONCE (since default * DO_ONCE will try to unprotect .data, but we're pre-init). */ if (!do_once_DllMain) { byte *cur_ebp; do_once_DllMain = true; ASSERT(!dynamo_initialized); ASSERT(ldrpLoadDll_address_NT == NULL); ASSERT(ldrpLoadDll_address_not_NT == NULL); /* Carefully walk stack to find address of LdrpLoadDll. */ /* Remember dr isn't initialized yet, no need to worry about * protect from app etc., but also can't check options. */ GET_FRAME_PTR(cur_ebp); /* For non early_inject (late follow children, preinject) expect * stack to look like (for win2k and higher) * here (our DllMain) * ntdll!LdrpCallInitRoutine * ntdll!LdrpRunInitializeRoutines * ntdll!LdrpLoadDll * ntdll!LdrLoadDll * For NT is the same only doesn't have ntdll!LdrpCallInitRoutine. * * That's as far we care, after that is likely to be shimeng.dll * or kernel32 (possibly someone else?) depending on how we were * injected. For -early_inject, ntdll!LdrGetProcedureAddress is * usually the root of the call to our DLLMain (likely something * to do with load vs. init order at process startup? FIXME * understand better, is there a flag we can send to have this * called on load?), but in that case we use the address passed to * us by the parent. */ #define STACK_DEPTH_LdrpLoadDll_NT 3 #define STACK_DEPTH_LdrpLoadDll 4 /* Since dr isn't initialized yet we can't call get_os_version() * so just grab both possible LdrpLoadDll addresses (NT and non NT) * and we'll sort it out later in early_inject_init(). */ ldrpLoadDll_address_NT = get_nth_stack_frames_call_target(STACK_DEPTH_LdrpLoadDll_NT, (reg_t *)cur_ebp); ldrpLoadDll_address_not_NT = get_nth_stack_frames_call_target(STACK_DEPTH_LdrpLoadDll, (reg_t *)cur_ebp); /* FIXME - would be nice to have extra verification here, * but after this frame there are too many possibilites (many * of which are unexported) so is hard to find something we * can check. */ } else ASSERT(dynamo_initialized); break; #ifdef INTERNAL case DLL_THREAD_ATTACH: DllMainThreadAttach(); break; #endif /* we don't care about DLL_PROCESS_DETACH or DLL_THREAD_DETACH */ } return true; } END_DO_NOT_OPTIMIZE #ifdef WINDOWS_PC_SAMPLE static profile_t *global_profile = NULL; static profile_t *dynamo_dll_profile = NULL; static profile_t *ntdll_profile = NULL; file_t profile_file = INVALID_FILE; DECLARE_CXTSWPROT_VAR(mutex_t profile_dump_lock, INIT_LOCK_FREE(profile_dump_lock)); static void get_dll_bounds(wchar_t *name, app_pc *start, app_pc *end) { module_handle_t dllh; size_t len; PBYTE pb; MEMORY_BASIC_INFORMATION mbi; dllh = get_module_handle(name); ASSERT(dllh != NULL); pb = (PBYTE) dllh; /* FIXME: we should just call get_allocation_size() */ len = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(len == sizeof(mbi)); ASSERT(mbi.State != MEM_FREE); *start = (app_pc) mbi.AllocationBase; do { if (mbi.State == MEM_FREE || (app_pc) mbi.AllocationBase != *start) break; if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)); *end = (app_pc) pb; } static void init_global_profiles() { app_pc start, end; /* create the profile file */ /* if logging is on create in log directory, else use base directory */ DOLOG(1, LOG_ALL, { char buf[MAX_PATH]; uint size = BUFFER_SIZE_ELEMENTS(buf); if (get_log_dir(PROCESS_DIR, buf, &size)) { NULL_TERMINATE_BUFFER(buf); strncat(buf, "\\profile", BUFFER_SIZE_ELEMENTS(buf) - strlen(buf)); NULL_TERMINATE_BUFFER(buf); profile_file = os_open(buf, OS_OPEN_REQUIRE_NEW|OS_OPEN_WRITE); LOG(GLOBAL, LOG_PROFILE, 1, "Profile file is \"%s\"\n", buf); } }); if (profile_file == INVALID_FILE) { get_unique_logfile(".profile", NULL, 0, false, &profile_file); } DOLOG(1, LOG_PROFILE, { if (profile_file == INVALID_FILE) LOG(GLOBAL, LOG_PROFILE, 1, "Failed to create profile file\n"); }); ASSERT(profile_file != INVALID_FILE); /* Case 7533: put basic run info in profile file. */ print_version_and_app_info(profile_file); /* set the interval, don't assert success, on my desktop anything less than * 1221 gets set to 1221 on laptop was different minimum value, at least * appears that os sets it close as possible to the requested (starting * value was 39021 for me) */ LOG(GLOBAL, LOG_PROFILE, 1, "Profile interval was %d, setting to %d,", nt_query_profile_interval(), dynamo_options.prof_pcs_freq); nt_set_profile_interval(dynamo_options.prof_pcs_freq); LOG(GLOBAL, LOG_PROFILE, 1, " is now %d (units of 100 nanoseconds)\n", nt_query_profile_interval()); print_file(profile_file, "Interval %d\n\n", nt_query_profile_interval()); /* create profiles */ /* Default shift of 30 gives 4 buckets for the global profile, * allows us to separate kernel and user space (even in the case * of 3GB user space). Note if invalid range given we default to * 30, so we always have a global profile to use as denominator * later. */ global_profile = create_profile(UNIVERSAL_REGION_BASE, UNIVERSAL_REGION_END, DYNAMO_OPTION(prof_pcs_global), NULL); if (dynamo_options.prof_pcs_DR >= 2 && dynamo_options.prof_pcs_DR <= 32) { get_dll_bounds(L_DYNAMORIO_LIBRARY_NAME, &start, &end); dynamo_dll_profile = create_profile(start, end, dynamo_options.prof_pcs_DR, NULL); } if (dynamo_options.prof_pcs_ntdll >= 2 && dynamo_options.prof_pcs_ntdll <= 32) { get_dll_bounds(L"ntdll.dll", &start, &end); ntdll_profile = create_profile(start, end, dynamo_options.prof_pcs_ntdll, NULL); } /* start profiles */ start_profile(global_profile); if (dynamo_dll_profile) start_profile(dynamo_dll_profile); if (ntdll_profile) start_profile(ntdll_profile); } static void dump_dll_profile(profile_t *profile, uint global_sum, char *dll_name) { uint dll_sum; uint top=0, bottom=0; dll_sum = sum_profile(profile); if (global_sum > 0) divide_uint64_print(dll_sum, global_sum, true, 2, &top, &bottom); print_file(profile_file, "\nDumping %s profile\n%d hits out of %d, %u.%.2u%%\n", dll_name, dll_sum, global_sum, top, bottom); LOG(GLOBAL, LOG_PROFILE, 1, "%s profile had %d hits out of %d total, %u.%.2u%%\n", dll_name, dll_sum, global_sum, top, bottom); dump_profile(profile_file, profile); free_profile(profile); } static void exit_global_profiles() { int global_sum; if (dynamo_dll_profile) stop_profile(dynamo_dll_profile); if (ntdll_profile) stop_profile(ntdll_profile); stop_profile(global_profile); global_sum = sum_profile(global_profile); /* we expect to be the last thread at this point. FIXME: we can remove the mutex_lock/unlock then */ mutex_lock(&profile_dump_lock); if (dynamo_dll_profile) dump_dll_profile(dynamo_dll_profile, global_sum, "dynamorio.dll"); if (ntdll_profile) dump_dll_profile(ntdll_profile, global_sum, "ntdll.dll"); print_file(profile_file, "\nDumping global profile\n%d hits\n", global_sum); dump_profile(profile_file, global_profile); mutex_unlock(&profile_dump_lock); LOG(GLOBAL, LOG_PROFILE, 1, "\nDumping global profile\n%d hits\n", global_sum); DOLOG(1, LOG_PROFILE, dump_profile(GLOBAL, global_profile);); free_profile(global_profile); DELETE_LOCK(profile_dump_lock); } #endif /** ** ****************************************************************************/ static uint get_context_xstate_flag(void) { /* i#437: AVX is supported on Windows 7 SP1 and Windows Server 2008 R2 SP1 * win7sp1+ both should be 0x40. */ if (YMM_ENABLED()) { /* YMM_ENABLED indicates both OS and processor support (i#1278) * but we expect OS support only on Win7 SP1+. * XXX: what about the WINDOWS Server 2008 R2? */ ASSERT_CURIOSITY(os_version >= WINDOWS_VERSION_8 || (os_version == WINDOWS_VERSION_7 && os_service_pack_major >= 1)); return (IF_X64_ELSE(CONTEXT_AMD64, CONTEXT_i386) | 0x40L); } return IF_X64_ELSE((CONTEXT_AMD64 | 0x20L), (CONTEXT_i386 | 0x40L)); } /* FIXME: Right now error reporting will work here, but once we have our * error reporting syscalls going through wrappers and requiring this * init routine, we'll have to have a fallback here that dynamically * determines the syscalls and finishes init, and then reports the error. * We may never be able to report errors for the non-NT OS family. * N.B.: this is too early for LOGs so don't do any -- any errors reported * will not die, they will simply skip LOG. * N.B.: this is prior to eventlog_init(), but then we've been reporting * usage errors prior to that for a long time now anyway. */ bool windows_version_init(int num_GetContextThread, int num_AllocateVirtualMemory) { PEB *peb = get_own_peb(); /* choose appropriate syscall array (the syscall numbers change from * one version of windows to the next! * they may even change at different patch levels) */ syscalls = NULL; DOCHECK(1, { check_syscall_array_sizes(); }); /* In at least 2K, XP, XP64, Vista, and Win7, the service pack is * stored in peb->OSCSDVersion, major in the top byte: */ os_service_pack_major = (peb->OSCSDVersion & 0xff00) >> 8; os_service_pack_minor = (peb->OSCSDVersion & 0xff); if (peb->OSPlatformId == VER_PLATFORM_WIN32_NT) { /* WinNT or descendents */ /* N.B.: when adding new versions here, update the i#1598 unknown version * handling code below to use the most recent enum and arrays. */ if (peb->OSMajorVersion == 10 && peb->OSMinorVersion == 0) { /* Win10 does not provide a version number so we use the presence * of newly added syscalls to distinguish major updates. */ if (get_proc_address(get_ntdll_base(), "NtCallEnclave") != NULL) { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_10_1709_x64_syscalls; os_name = "Microsoft Windows 10-1709 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_10_1709_wow64_syscalls; os_name = "Microsoft Windows 10-1709 x64"; } else { syscalls = (int *) windows_10_1709_x86_syscalls; os_name = "Microsoft Windows 10-1709"; } os_version = WINDOWS_VERSION_10_1709; } else if (get_proc_address(get_ntdll_base(), "NtLoadHotPatch") != NULL) { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_10_1703_x64_syscalls; os_name = "Microsoft Windows 10-1703 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_10_1703_wow64_syscalls; os_name = "Microsoft Windows 10-1703 x64"; } else { syscalls = (int *) windows_10_1703_x86_syscalls; os_name = "Microsoft Windows 10-1703"; } os_version = WINDOWS_VERSION_10_1703; } else if (get_proc_address(get_ntdll_base(), "NtCreateRegistryTransaction") != NULL) { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_10_1607_x64_syscalls; os_name = "Microsoft Windows 10-1607 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_10_1607_wow64_syscalls; os_name = "Microsoft Windows 10-1607 x64"; } else { syscalls = (int *) windows_10_1607_x86_syscalls; os_name = "Microsoft Windows 10-1607"; } os_version = WINDOWS_VERSION_10_1607; } else if (get_proc_address(get_ntdll_base(), "NtCreateEnclave") != NULL) { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_10_1511_x64_syscalls; os_name = "Microsoft Windows 10-1511 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_10_1511_wow64_syscalls; os_name = "Microsoft Windows 10-1511 x64"; } else { syscalls = (int *) windows_10_1511_x86_syscalls; os_name = "Microsoft Windows 10-1511"; } os_version = WINDOWS_VERSION_10_1511; } else { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_10_x64_syscalls; os_name = "Microsoft Windows 10 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_10_wow64_syscalls; os_name = "Microsoft Windows 10 x64"; } else { syscalls = (int *) windows_10_x86_syscalls; os_name = "Microsoft Windows 10"; } os_version = WINDOWS_VERSION_10; } /* i#1825: future Windows updates will leave the PEB version at * 10.0.sp0, so we have to use syscall #'s to distinguish. * We check 2 different numbers currently toward the end of the * list in order to handle hooks on one of them and to handle * more weird reorderings. */ if ((num_GetContextThread != -1 && num_GetContextThread != syscalls[SYS_GetContextThread]) || (num_AllocateVirtualMemory != -1 && num_AllocateVirtualMemory != syscalls[SYS_AllocateVirtualMemory])) { syscalls = NULL; } } else if (peb->OSMajorVersion == 6 && peb->OSMinorVersion == 3) { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_81_x64_syscalls; os_name = "Microsoft Windows 8.1 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_81_wow64_syscalls; os_name = "Microsoft Windows 8.1 x64"; } else { syscalls = (int *) windows_81_x86_syscalls; os_name = "Microsoft Windows 8.1"; } os_version = WINDOWS_VERSION_8_1; } else if (peb->OSMajorVersion == 6 && peb->OSMinorVersion == 2) { if (module_is_64bit(get_ntdll_base())) { syscalls = (int *) windows_8_x64_syscalls; os_name = "Microsoft Windows 8 x64"; } else if (is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_8_wow64_syscalls; os_name = "Microsoft Windows 8 x64"; } else { syscalls = (int *) windows_8_x86_syscalls; os_name = "Microsoft Windows 8"; } os_version = WINDOWS_VERSION_8; } else if (peb->OSMajorVersion == 6 && peb->OSMinorVersion == 1) { module_handle_t ntdllh = get_ntdll_base(); /* i#437: ymm/avx is supported after Win-7 SP1 */ if (os_service_pack_major >= 1) { /* Sanity check on our SP ver retrieval */ ASSERT(get_proc_address(ntdllh, "RtlCopyContext") != NULL); if (module_is_64bit(get_ntdll_base()) || is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_7_x64_syscalls; os_name = "Microsoft Windows 7 x64 SP1"; } else { syscalls = (int *) windows_7_syscalls; os_name = "Microsoft Windows 7 SP1"; } } else { ASSERT(get_proc_address(ntdllh, "RtlCopyContext") == NULL); if (module_is_64bit(get_ntdll_base()) || is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_7_x64_syscalls; os_name = "Microsoft Windows 7 x64 SP0"; } else { syscalls = (int *) windows_7_syscalls; os_name = "Microsoft Windows 7 SP0"; } } os_version = WINDOWS_VERSION_7; } else if (peb->OSMajorVersion == 6 && peb->OSMinorVersion == 0) { module_handle_t ntdllh = get_ntdll_base(); if (os_service_pack_major >= 1) { /* Vista system call number differ between service packs, we use * the existence of NtReplacePartitionUnit as a sanity check * for sp1 - see PR 246402. They also differ for * 32-bit vs 64-bit/wow64. */ ASSERT(get_proc_address(ntdllh, "NtReplacePartitionUnit") != NULL); if (module_is_64bit(get_ntdll_base()) || is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_vista_sp1_x64_syscalls; os_name = "Microsoft Windows Vista x64 SP1"; } else { syscalls = (int *) windows_vista_sp1_syscalls; os_name = "Microsoft Windows Vista SP1"; } } else { ASSERT(get_proc_address(ntdllh, "NtReplacePartitionUnit") == NULL); if (module_is_64bit(get_ntdll_base()) || is_wow64_process(NT_CURRENT_PROCESS)) { syscalls = (int *) windows_vista_sp0_x64_syscalls; os_name = "Microsoft Windows Vista x64 SP0"; } else { syscalls = (int *) windows_vista_sp0_syscalls; os_name = "Microsoft Windows Vista SP0"; } } os_version = WINDOWS_VERSION_VISTA; } else if (peb->OSMajorVersion == 5 && peb->OSMinorVersion == 2) { /* Version 5.2 can mean 32- or 64-bit 2003, or 64-bit XP */ /* Assumption: get_ntll_base makes no system calls */ if (module_is_64bit(get_ntdll_base()) || is_wow64_process(NT_CURRENT_PROCESS)) { /* We expect x64 2003 and x64 XP to have the same system call * numbers but that has not been verified. System call numbers * remain the same even under WOW64 (ignoring the extra WOW * system calls, anyway). We do not split the version for WOW * as most users do not care to distinguish; those that do must * use a separate is_wow64_process() check. */ syscalls = (int *) windows_XP_x64_syscalls; /* We don't yet have need to split the version enum */ os_version = WINDOWS_VERSION_2003; os_name = "Microsoft Windows x64 XP/2003"; } else { syscalls = (int *) windows_2003_syscalls; os_version = WINDOWS_VERSION_2003; os_name = "Microsoft Windows 2003"; } } else if (peb->OSMajorVersion == 5 && peb->OSMinorVersion == 1) { syscalls = (int *) windows_XP_syscalls; os_version = WINDOWS_VERSION_XP; os_name = "Microsoft Windows XP"; } else if (peb->OSMajorVersion == 5 && peb->OSMinorVersion == 0) { syscalls = (int *) windows_2000_syscalls; os_version = WINDOWS_VERSION_2000; os_name = "Microsoft Windows 2000"; } else if (peb->OSMajorVersion == 4) { module_handle_t ntdllh = get_ntdll_base(); os_version = WINDOWS_VERSION_NT; /* NT4 syscalls differ among service packs. * Rather than reading the registry to find the service pack we * directly check which system calls are there. We don't just * check the number of the last syscall in our list b/c we want to * avoid issues w/ hookers. * We rely on these observations: * SP3: + Nt{Read,Write}FileScatter * SP4: - NtW32Call */ if (get_proc_address(ntdllh, "NtW32Call") != NULL) { /* < SP4 */ /* we don't know whether SP1 and SP2 fall in line w/ SP0 or w/ SP3, * or possibly are different from both, but we don't support them */ if (get_proc_address(ntdllh, "NtReadFileScatter") != NULL) { /* > SP0 */ syscalls = (int *) windows_NT_sp3_syscalls; os_name = "Microsoft Windows NT SP3"; } else { /* < SP3 */ syscalls = (int *) windows_NT_sp0_syscalls; os_name = "Microsoft Windows NT SP0"; } } else { syscalls = (int *) windows_NT_sp4_syscalls; os_name = "Microsoft Windows NT SP4, 5, 6, or 6a"; } } if (syscalls == NULL) { if (peb->OSMajorVersion == 10 && peb->OSMinorVersion == 0) { SYSLOG_INTERNAL_WARNING ("WARNING: Running on unsupported Windows 10+ version"); os_name = "Unknown Windows 10+ version"; } else { SYSLOG_INTERNAL_ERROR("Unknown Windows NT-family version: %d.%d", peb->OSMajorVersion, peb->OSMinorVersion); os_name = "Unknown Windows NT-family version"; } if (dynamo_options.max_supported_os_version < peb->OSMajorVersion * 10 + peb->OSMinorVersion) { if (standalone_library) return false; /* let app handle it */ FATAL_USAGE_ERROR(BAD_OS_VERSION, 4, get_application_name(), get_application_pid(), PRODUCT_NAME, os_name); } /* i#1598: try to make progress. Who knows, everything might just work. * First, we copy the latest numbers (mostly for SYSCALL_NOT_PRESENT). * Then in syscalls_init() we try to update with real numbers from * the wrappers (best-effort, modulo hooks). */ syscalls = windows_unknown_syscalls; if (module_is_64bit(get_ntdll_base())) { memcpy(syscalls, windows_10_1709_x64_syscalls, SYS_MAX*sizeof(syscalls[0])); } else if (is_wow64_process(NT_CURRENT_PROCESS)) { memcpy(syscalls, windows_10_1709_wow64_syscalls, SYS_MAX*sizeof(syscalls[0])); } else { memcpy(syscalls, windows_10_1709_x86_syscalls, SYS_MAX*sizeof(syscalls[0])); } os_version = WINDOWS_VERSION_10_1709; /* just use latest */ } } else if (peb->OSPlatformId == VER_PLATFORM_WIN32_WINDOWS) { /* Win95 or Win98 */ uint ver_high = (peb->OSBuildNumber >> 8) & 0xff; uint ver_low = peb->OSBuildNumber & 0xff; if (standalone_library) return false; /* let app handle it */ if (ver_low >= 90 || ver_high >= 5) os_name = "Windows ME"; else if (ver_low >= 10 && ver_low < 90) os_name = "Windows 98"; else if (ver_low < 5) os_name = "Windows 3.1 / WfWg"; else if (ver_low < 10) os_name = "Windows 98"; else os_name = "this unknown version of Windows"; FATAL_USAGE_ERROR(BAD_OS_VERSION, 4, get_application_name(), get_application_pid(), PRODUCT_NAME, os_name); } else { if (standalone_library) return false; /* let app handle it */ os_name = "Win32s"; /* Win32S on Windows 3.1 */ FATAL_USAGE_ERROR(BAD_OS_VERSION, 4, get_application_name(), get_application_pid(), PRODUCT_NAME, os_name); } return true; } /* Note that assigning a process to a Job is done only after it has * been created - with ZwAssignProcessToJobObject(), and we may start * before or after that has been done. */ static void print_mem_quota() { QUOTA_LIMITS qlimits; NTSTATUS res = get_process_mem_quota(NT_CURRENT_PROCESS, &qlimits); if (!NT_SUCCESS(res)) { ASSERT(false && "print_mem_quota"); return; } LOG(GLOBAL, LOG_TOP, 1, "Process Memory Limits:\n"); LOG(GLOBAL, LOG_TOP, 1, "\t Paged pool limit: %6d KB\n", qlimits.PagedPoolLimit/1024); LOG(GLOBAL, LOG_TOP, 1, "\t Non Paged pool limit: %6d KB\n", qlimits.NonPagedPoolLimit/1024); LOG(GLOBAL, LOG_TOP, 1, "\t Minimum working set size: %6d KB\n", qlimits.MinimumWorkingSetSize/1024); LOG(GLOBAL, LOG_TOP, 1, "\t Maximum working set size: %6d KB\n", qlimits.MaximumWorkingSetSize/1024); /* 4GB for unlimited */ LOG(GLOBAL, LOG_TOP, 1, "\t Pagefile limit: %6d KB\n", qlimits.PagefileLimit/1024); /* TimeLimit not supported on Win2k, but WSRM (Windows System * Resource Manager) can definitely set, so expected to be * supported on Win2003. Time in 100ns units. */ LOG(GLOBAL, LOG_TOP, 1, "\t TimeLimit: 0x%.8x%.8x\n", qlimits.TimeLimit.HighPart, qlimits.TimeLimit.LowPart); } /* os-specific initializations */ void os_init(void) { PEB *peb = get_own_peb(); uint alignment = 0; uint offs; int res; DEBUG_DECLARE(bool ok;) if (dynamo_options.max_supported_os_version < peb->OSMajorVersion * 10 + peb->OSMinorVersion) { SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(), get_application_pid(), os_name); } /* make sure we create the message box title string before we are * multi-threaded and is no longer safe to do so on demand, this also * takes care of initializing the static buffer get_appilication_name * and get_application_pid */ debugbox_setup_title(); win32_pid = get_process_id(); LOG(GLOBAL, LOG_TOP, 1, "Process id: %d\n", win32_pid); peb_ptr = (void *) get_own_peb(); LOG(GLOBAL, LOG_TOP, 1, "PEB: "PFX"\n", peb_ptr); ASSERT((PEB *)peb_ptr == get_own_teb()->ProcessEnvironmentBlock); #ifndef X64 /* We no longer rely on peb64 being adjacent to peb for i#816 but * let's print it nonetheless */ DOLOG(1, LOG_TOP, { if (is_wow64_process(NT_CURRENT_PROCESS)) { uint64 peb64 = (ptr_uint_t) get_own_x64_peb(); LOG(GLOBAL, LOG_TOP, 1, "x64 PEB: "UINT64_FORMAT_STRING"\n", peb64); } }); #endif /* match enums in os_exports.h with TEB definition from ntdll.h */ ASSERT(EXCEPTION_LIST_TIB_OFFSET == offsetof(TEB, ExceptionList)); ASSERT(TOP_STACK_TIB_OFFSET == offsetof(TEB, StackBase)); ASSERT(BASE_STACK_TIB_OFFSET == offsetof(TEB, StackLimit)); ASSERT(FIBER_DATA_TIB_OFFSET == offsetof(TEB, FiberData)); ASSERT(SELF_TIB_OFFSET == offsetof(TEB, Self)); ASSERT(TID_TIB_OFFSET == offsetof(TEB, ClientId) + offsetof(CLIENT_ID, UniqueThread)); ASSERT(PID_TIB_OFFSET == offsetof(TEB, ClientId) + offsetof(CLIENT_ID, UniqueProcess)); ASSERT(ERRNO_TIB_OFFSET == offsetof(TEB, LastErrorValue)); ASSERT(WOW64_TIB_OFFSET == offsetof(TEB, WOW32Reserved)); ASSERT(PEB_TIB_OFFSET == offsetof(TEB, ProcessEnvironmentBlock)); /* windows_version_init should have already been called */ ASSERT(syscalls != NULL); LOG(GLOBAL, LOG_TOP, 1, "Running on %s == %d SP%d.%d\n", os_name, os_version, os_service_pack_major, os_service_pack_minor); /* i#437, i#1278: get the context_xstate after proc_init() sets proc_avx_enabled() */ context_xstate = get_context_xstate_flag(); ntdll_init(); callback_init(); syscall_interception_init(); eventlog_init(); /* os dependent and currently Windows specific */ if (os_version >= WINDOWS_VERSION_XP) { /* FIXME: bootstrapping problem where we see 0x7ffe0300 before we see * the 1st sysenter...solution for now is to hardcode initial values so * we pass the 1st PROGRAM_SHEPHERDING code origins test, then re-set these once * we see the 1st syscall. */ /* on XP service pack 2 the syscall enter and exit stubs are Ki * routines in ntdll.dll FIXME : as a hack for now will leave * page_start as 0 (as it would be for 2000, since region is * executable no need for the code origins exception) and * after syscall to the appropriate value, this means will still * execute the return natively (as in xp/03) for simplicity even * though we could intercept it much more easily than before since * the ki routines are aligned (less concern about enough space for the * interception stub, nicely exported for us etc.) */ /* initializing so get_module_handle should be safe, FIXME */ module_handle_t ntdllh = get_ntdll_base(); app_pc return_point = (app_pc) get_proc_address(ntdllh, "KiFastSystemCallRet"); if (return_point != NULL) { app_pc syscall_pc = (app_pc) get_proc_address(ntdllh, "KiFastSystemCall"); vsyscall_after_syscall = (app_pc) return_point; /* we'll re-set this once we see the 1st syscall, but we set an * initial value to what it should be for go-native scenarios * where we may not see the 1st syscall (DrMem i#1235). */ if (syscall_pc != NULL) vsyscall_syscall_end_pc = syscall_pc + SYSENTER_LENGTH; else vsyscall_syscall_end_pc = NULL; /* wait until 1st one */ } else { /* FIXME : if INT syscalls are being used then this opens up a * security hole for the followin page */ vsyscall_page_start = VSYSCALL_PAGE_START_BOOTSTRAP_VALUE; vsyscall_after_syscall = VSYSCALL_AFTER_SYSCALL_BOOTSTRAP_VALUE; vsyscall_syscall_end_pc = vsyscall_after_syscall; } } /* TLS alignment use either preferred on processor, or hardcoded option value */ if (DYNAMO_OPTION(tls_align) == 0) { IF_X64(ASSERT_TRUNCATE(alignment, uint, proc_get_cache_line_size())); alignment = (uint) proc_get_cache_line_size(); } else { alignment = DYNAMO_OPTION(tls_align); } /* case 3701 about performance gains, * and case 6670 about TLS conflict in SQL2005 */ /* FIXME: could control which entry should be cache aligned, but * we should be able to restructure the state to ensure first * entry is indeed important. Should make sure we choose same * position in both release and debug, see local_state_t.stats. */ /* allocate thread-private storage */ res = tls_calloc(false/*no synch required*/, &offs, TLS_NUM_SLOTS, alignment); DODEBUG({ /* FIXME: elevate failure here to a release-build syslog? */ if (!res) { SYSLOG_INTERNAL_ERROR("Cannot allocate %d tls slots at %d alignment", TLS_NUM_SLOTS, alignment); } }); /* retry with no alignment on failure */ if (!res) { alignment = 0; ASSERT_NOT_TESTED(); /* allocate thread-private storage with no alignment */ res = tls_calloc(false/*no synch required*/, &offs, TLS_NUM_SLOTS, alignment); /* report even in release build that we really can't grab in TLS64 */ if (!res) { ASSERT_NOT_TESTED(); SYSLOG_INTERNAL_ERROR("Cannot allocate %d tls slots at %d alignment", TLS_NUM_SLOTS, alignment); report_dynamorio_problem(NULL, DUMPCORE_INTERNAL_EXCEPTION, NULL, NULL, "Unrecoverable error on TLS allocation", NULL, NULL, NULL); } } ASSERT(res); ASSERT(offs != TLS_UNINITIALIZED); ASSERT_TRUNCATE(tls_local_state_offs, ushort, offs); tls_local_state_offs = (ushort) offs; LOG(GLOBAL, LOG_TOP, 1, "%d TLS slots are @ %s:0x%x\n", TLS_NUM_SLOTS, IF_X64_ELSE("gs", "fs"), tls_local_state_offs); ASSERT_CURIOSITY(proc_is_cache_aligned(get_local_state()) || DYNAMO_OPTION(tls_align != 0)); if (IF_UNIT_TEST_ELSE(true, !standalone_library)) { tls_dcontext_offs = os_tls_offset(TLS_DCONTEXT_SLOT); ASSERT(tls_dcontext_offs != TLS_UNINITIALIZED); } DOLOG(1, LOG_VMAREAS, { print_modules(GLOBAL, DUMP_NOT_XML); }); DOLOG(2, LOG_TOP, { print_mem_quota(); }); #ifdef WINDOWS_PC_SAMPLE if (dynamo_options.profile_pcs) init_global_profiles(); #endif #ifdef PROFILE_RDTSC if (dynamo_options.profile_times) { ASSERT_NOT_TESTED(); kilo_hertz = get_timer_frequency(); LOG(GLOBAL, LOG_TOP|LOG_STATS, 1, "CPU MHz is %d\n", kilo_hertz/1000); } #endif if (!dr_early_injected && !dr_earliest_injected) inject_init(); get_dynamorio_library_path(); /* just to preserve side effects. If not done yet in eventlog, * path needs to be be preserved before hiding from module list. */ aslr_init(); /* ensure static cache buffers are primed, both for .data protection purposes and * because it may not be safe to get this information later */ get_own_qualified_name(); get_own_unqualified_name(); get_own_short_qualified_name(); get_own_short_unqualified_name(); get_application_name(); get_application_short_name(); get_application_short_unqualified_name(); get_process_primary_SID(); get_process_SID_string(); get_process_owner_SID(); get_Everyone_SID(); /* avoid later .data-unprotection calls */ get_dynamorio_dll_preferred_base(); get_image_entry(); get_application_base(); get_application_end(); get_system_basic_info(); if (!standalone_library) os_user_directory_supports_ownership(); is_wow64_process(NT_CURRENT_PROCESS); is_in_ntdll(get_ntdll_base()); os_take_over_init(); /* i#298: cache cur dir at init time, when safer to read it. * We just don't support later changes to cur dir. */ DEBUG_DECLARE(ok =) os_get_current_dir(cwd, BUFFER_SIZE_ELEMENTS(cwd)); } void native_exec_os_init(void) { /* Nothing yet. */ } static void print_mem_stats() { VM_COUNTERS mem; bool ok = get_process_mem_stats(NT_CURRENT_PROCESS, &mem); ASSERT(ok); LOG(GLOBAL, LOG_TOP, 1, "Process Memory Statistics:\n"); LOG(GLOBAL, LOG_TOP, 1, "\tPeak virtual size: %6d KB\n", mem.PeakVirtualSize/1024); LOG(GLOBAL, LOG_TOP, 1, "\tPeak working set size: %6d KB\n", mem.PeakWorkingSetSize/1024); LOG(GLOBAL, LOG_TOP, 1, "\tPeak paged pool usage: %6d KB\n", mem.QuotaPeakPagedPoolUsage/1024); LOG(GLOBAL, LOG_TOP, 1, "\tPeak non-paged pool usage: %6d KB\n", mem.QuotaPeakNonPagedPoolUsage/1024); LOG(GLOBAL, LOG_TOP, 1, "\tPeak pagefile usage: %6d KB\n", mem.PeakPagefileUsage/1024); } /* os-specific atexit cleanup * note that this is called even on the fast exit release path so don't add * uneccesary cleanup without ifdef DEBUG, but be careful with ifdef DEBUG's * also as Detach wants to leave nothing from us behind * Called by dynamo_shared_exit() and the fast path in dynamo_process_exit(). */ void os_fast_exit(void) { /* make sure we never see an .exe that does all its work in * DllMain()'s -- unlikely, even .NET apps have an image entry * just to jump to mscoree * * The curiosity is relaxed for thin_client and hotp_only; if nothing else * in the core is has run into this, then reached_image_entry doesn't have * to be set for thin_client & hotp_only. TODO: put in the image entry * hook or not? * * The curiosity is also relaxed if we enter DR using the API */ ASSERT_CURIOSITY(reached_image_entry_yet() || RUNNING_WITHOUT_CODE_CACHE() IF_APP_EXPORTS( || dr_api_entry) /* Clients can go native. XXX: add var for whether client did? */ IF_CLIENT_INTERFACE(|| CLIENTS_EXIST())); DOLOG(1, LOG_TOP, { print_mem_quota(); }); DOLOG(1, LOG_TOP, { print_mem_stats(); }); #ifdef WINDOWS_PC_SAMPLE if (dynamo_options.profile_pcs) { exit_global_profiles(); /* check to see if we are using the fast exit path * if so dump profiles that were skipped */ # ifndef DEBUG if (dynamo_detaching_flag == LOCK_FREE_STATE) { /* fast exit path, get remaining ungathered profile data */ if (dynamo_options.prof_pcs_gencode >= 2 && dynamo_options.prof_pcs_gencode <= 32) { thread_record_t **threads; int num, i; /* get surviving threads */ arch_profile_exit(); mutex_lock(&thread_initexit_lock); get_list_of_threads(&threads, &num); for (i = 0; i < num; i++) { arch_thread_profile_exit(threads[i]->dcontext); } global_heap_free(threads, num*sizeof(thread_record_t*) HEAPACCT(ACCT_THREAD_MGT)); mutex_unlock(&thread_initexit_lock); } if (dynamo_options.prof_pcs_fcache >= 2 && dynamo_options.prof_pcs_fcache <= 32) { /* note that fcache_exit() is called before os_fast_exit(), * we are here on fast exit path in which case fcache_exit() * is not called */ fcache_profile_exit(); } if (dynamo_options.prof_pcs_stubs >= 2 && dynamo_options.prof_pcs_stubs <= 32) { special_heap_profile_exit(); } } # endif print_file(profile_file, "\nFinished dumping all profile info\n"); close_file(profile_file); } #endif eventlog_fast_exit(); #ifdef DEBUG module_info_exit(); DELETE_LOCK(snapshot_lock); #endif /* case 10338: we don't free TLS on the fast path, in case there * are other active threads: we don't want to synchall on exit so * we let other threads run and try not to crash them until * the process is terminated. */ DELETE_LOCK(dump_core_lock); DELETE_LOCK(debugbox_lock); callback_exit(); ntdll_exit(); } /* os-specific atexit cleanup since Detach wants to leave nothing from * us behind. In addition any debug cleanup should only be DODEBUG. * Called by dynamo_shared_exit(). * Note it is expected to be called _after_ os_fast_exit(). */ void os_slow_exit(void) { /* free and zero thread-private storage (case 10338: slow path only) */ DEBUG_DECLARE(int res = ) tls_cfree(true/*need to synch*/, (uint) tls_local_state_offs, TLS_NUM_SLOTS); ASSERT(res); syscall_interception_exit(); aslr_exit(); eventlog_slow_exit(); os_take_over_exit(); tls_dcontext_offs = TLS_UNINITIALIZED; } /* Win8 WOW64 does not point edx at the param base so we must * put the args on the actual stack. We could have multiple threads * writing to these same slots so we use the TEB which should be dead * (unless the syscall fails and the app continues: which we do not handle). * Xref i#565. */ /* Pass INVALID_HANDLE_VALUE for process exit */ byte * os_terminate_wow64_stack(HANDLE thread_handle) { #ifdef X64 return (byte *) wow64_syscall_stack; #else if (syscall_uses_edx_param_base()) return (byte *) wow64_syscall_stack; else { TEB *teb; if (thread_handle == INVALID_HANDLE_VALUE) teb = get_own_teb(); else teb = get_teb(thread_handle); if (teb == NULL) /* app may have passed bogus handle */ return (byte *) wow64_syscall_stack; /* We use our scratch slots in the TEB. We need room for syscall * call*'s retaddr below and 2 args for os_terminate_wow64_write_args() * above, so we take our own xbx slot, which has xax below and xcx+xdx * above. We do not have the extra safety slot that wow64_syscall_stack * has, but that's not necessary, and if the wow64 wrapper wrote to it * it would just be writing to an app slot that's likely unused (b/c DR * takes TLS slots from the end). * * XXX: it would be cleaner to not write to this until we're done * cleaning up private libraries, which examine the TEB. * Then we could use any part of the TEB. * * XXX: we rely here on os_slow_exit()'s tls_cfree() not zeroing out * our TLS slots during cleanup (i#1156). */ return (byte *)teb + os_tls_offset(TLS_XBX_SLOT); } #endif } /* Only takes action when edx is not the param base */ void os_terminate_wow64_write_args(bool exit_process, HANDLE proc_or_thread_handle, int exit_status) { #ifndef X64 if (!syscall_uses_edx_param_base()) { byte *xsp = os_terminate_wow64_stack(exit_process ? INVALID_HANDLE_VALUE : proc_or_thread_handle); ASSERT(ALIGNED(xsp, sizeof(reg_t))); /* => atomic writes */ /* skip a slot (natively it's the retaddr from the call to the wrapper) */ *(((reg_t*)xsp)+1) = (reg_t) proc_or_thread_handle; *(((reg_t*)xsp)+2) = (reg_t) exit_status; } #endif } /* FIXME: what are good values here? */ #define KILL_PROC_EXIT_STATUS -1 #define KILL_THREAD_EXIT_STATUS -1 /* custom_code only honored if exit_process == true */ static byte * os_terminate_static_arguments(bool exit_process, bool custom_code, int exit_code) { byte *arguments; /* arguments for NtTerminate{Process,Thread} */ typedef struct _terminate_args_t { union { const byte *debug_code; byte pad_bytes[SYSCALL_PARAM_MAX_OFFSET]; } padding; struct { IN HANDLE ProcessOrThreadHandle; IN NTSTATUS ExitStatus; } args; } terminate_args_t; /* It is not safe to use app stack and hope application will work. * We need to stick the arguments for NtTerminate* in a place that * doesn't exacerbate the problem - esp may have been in attacker's * hands - so we place args in const static (read only) dr memory. */ /* To facilitate detecting syscall failure for SYSENTER, we set a * retaddr at edx (two extra slots there) as esp will be set to edx * by the kernel at the return from the sysenter. The kernel then sends * control to a native ret which targets the debug infinite loop. * (DEBUG only). */ static const terminate_args_t term_thread_args = { IF_DEBUG_ELSE_0((byte *)debug_infinite_loop), /* 0 -> NULL for release */ {NT_CURRENT_THREAD, KILL_THREAD_EXIT_STATUS} }; static const terminate_args_t term_proc_args = { IF_DEBUG_ELSE_0((byte *)debug_infinite_loop), /* 0 -> NULL for release */ {NT_CURRENT_PROCESS, KILL_PROC_EXIT_STATUS} }; /* special sygate froms (non-const) */ static terminate_args_t sygate_term_thread_args = { 0, /* will be set to sysenter_ret_address */ {NT_CURRENT_THREAD, KILL_THREAD_EXIT_STATUS} }; static terminate_args_t sygate_term_proc_args = { 0, /* will be set to sysenter_ret_address */ {NT_CURRENT_PROCESS, KILL_PROC_EXIT_STATUS} }; /* for variable exit code */ static terminate_args_t custom_term_proc_args = { IF_DEBUG_ELSE_0((byte *)debug_infinite_loop), /* 0 -> NULL for release */ {NT_CURRENT_PROCESS, KILL_PROC_EXIT_STATUS} }; /* for LOG statement just pick proc vs. thread here, will adjust for * offset below */ if (exit_process) { if (custom_code) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); ATOMIC_4BYTE_WRITE((byte *)&custom_term_proc_args.args.ExitStatus, exit_code, false); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); arguments = (byte *)&custom_term_proc_args; } else if (DYNAMO_OPTION(sygate_sysenter) && get_syscall_method() == SYSCALL_METHOD_SYSENTER) { byte *tgt = (byte *)&sygate_term_proc_args; /* Note we overwrite every time we use this, but is ATOMIC and * always with the same value so is ok */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); ATOMIC_ADDR_WRITE(tgt, sysenter_ret_address, false); DODEBUG({ ATOMIC_ADDR_WRITE(tgt+sizeof(sysenter_ret_address), (byte *)debug_infinite_loop, false);}); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); arguments = (byte *)&sygate_term_proc_args; } else arguments = (byte *)&term_proc_args; } else { if (DYNAMO_OPTION(sygate_sysenter) && get_syscall_method() == SYSCALL_METHOD_SYSENTER) { byte *tgt = (byte *)&sygate_term_thread_args; SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); ATOMIC_ADDR_WRITE(tgt, sysenter_ret_address, false); DODEBUG({ tgt += sizeof(sysenter_ret_address); ATOMIC_ADDR_WRITE(tgt, (byte *)debug_infinite_loop, false);}); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); arguments = (byte *)&sygate_term_thread_args; } else arguments = (byte *)&term_thread_args; } LOG(THREAD_GET, LOG_SYSCALLS, 2, "Placing terminate arguments tombstone at "PFX" offset=0x%x\n", arguments, SYSCALL_PARAM_OFFSET()); os_terminate_wow64_write_args (exit_process, ((terminate_args_t*)arguments)->args.ProcessOrThreadHandle, ((terminate_args_t*)arguments)->args.ExitStatus); arguments += offsetof(terminate_args_t, args) - SYSCALL_PARAM_OFFSET(); return arguments; } /* dcontext is not needed for TERMINATE_PROCESS, so can pass NULL in */ void os_terminate_common(dcontext_t *dcontext, terminate_flags_t terminate_type, bool custom_code, int exit_code) { HANDLE currentThreadOrProcess = NT_CURRENT_PROCESS; bool exit_process = true; ASSERT(TEST(TERMINATE_PROCESS, terminate_type) != /* xor */ TEST(TERMINATE_THREAD, terminate_type)); /* We could be holding the bb_building_lock at this point -- if we cleanup, * we will get a rank order violation with all_threads_synch_lock. if we * unlock the bb lock, we risk an error about the non-owning thread * releasing the lock. * Our solution is for the caller to release it when possible -- * on an attack we know if we hold it or not. But for other callers * they do not know who holds it...for now we do nothing, none of them * terminate just a thread, so the process is going down anyway, and it's * just a debug build assertion :) */ /* clean up may be dangerous - just terminate */ if (terminate_type == TERMINATE_PROCESS) { /* skip synchronizing dynamic options, is risky and caller has almost * certainly already done so for a syslog */ if (TESTANY(DETACH_ON_TERMINATE|DETACH_ON_TERMINATE_NO_CLEAN, DYNAMO_OPTION(internal_detach_mask))) { /* FIXME : if we run into stack problems we could reset the stack * here though caller has likely alredy gone as deep as detach * will since almost everyone SYSLOG's before calling this */ detach_helper(TEST(DETACH_ON_TERMINATE_NO_CLEAN, DYNAMO_OPTION(internal_detach_mask)) ? DETACH_BAD_STATE_NO_CLEANUP : DETACH_BAD_STATE); /* skip option synch, make this as safe as possible */ SYSLOG_INTERNAL_NO_OPTION_SYNCH (SYSLOG_WARNING, "detach on terminate failed or already started by another thread, " "killing thread "TIDFMT"\n", get_thread_id()); /* if we get here, either we recursed or someone is already trying * to detach, just kill this thread so progress is made we don't * have anything better to do with it */ /* skip cleanup, our state is likely messed up and we'd just like * to get out alive, also avoids recursion problems, see caveat at * remove_thread below */ terminate_type = TERMINATE_THREAD; } else { config_exit(); /* delete .1config file */ nt_terminate_process(currentThreadOrProcess, KILL_PROC_EXIT_STATUS); ASSERT_NOT_REACHED(); } } /* CHECK: Can a process disallow PROCESS_TERMINATE or THREAD_TERMINATE access even to itself? */ if (TEST(TERMINATE_THREAD, terminate_type)) { exit_process = (!IS_CLIENT_THREAD(dcontext) && is_last_app_thread() && !dynamo_exited); if (!exit_process) { currentThreadOrProcess = NT_CURRENT_THREAD; } } STATS_INC(num_threads_killed); if (TEST(TERMINATE_CLEANUP, terminate_type)) { byte *arguments = os_terminate_static_arguments(exit_process, custom_code, exit_code); /* Make sure debug loop pointer is in expected place since this makes * assumptions about offsets. We don't use the debug loop pointer for * int2e/syscall/wow64 system calls (since they return to the invocation * and can be handled there). For SYSENTER the SYSCALL_PARAM_OFFSET should * match up with arguments such that arguments is pointing to debugme */ ASSERT(does_syscall_ret_to_callsite() || *(byte **)arguments == (byte *)&debug_infinite_loop || (DYNAMO_OPTION(sygate_sysenter) && *(((byte **)arguments)+1) == (byte *)&debug_infinite_loop)); STATS_INC(num_threads_killed_cleanly); /* we enter from several different places, so rewind until top-level kstat */ KSTOP_REWIND_UNTIL(thread_measured); /* now we issue a syscall by number */ /* we can't use issue_system_call_for_app because it relies on * dstack that we should release */ /* FIXME: what happens now if we get some callbacks that are still on * their way? Shouldn't happen since Terminate* are believed to be * non-alertable. */ /* FIXME: we only want the last part of cleanup_and_terminate */ ASSERT(dcontext != NULL); cleanup_and_terminate (dcontext, syscalls[exit_process ? SYS_TerminateProcess : SYS_TerminateThread], (ptr_uint_t) IF_X64_ELSE((exit_process ? NT_CURRENT_PROCESS : NT_CURRENT_THREAD), arguments), (ptr_uint_t) IF_X64_ELSE((exit_process ? (custom_code ? exit_code : KILL_PROC_EXIT_STATUS) : KILL_THREAD_EXIT_STATUS), arguments /* no 2nd arg, just a filler */), exit_process, 0, 0); } else { /* may have decided to terminate process */ if (exit_process) { config_exit(); /* delete .1config file */ nt_terminate_process(currentThreadOrProcess, KILL_PROC_EXIT_STATUS); ASSERT_NOT_REACHED(); } else { /* FIXME: this is now very dangerous - we even leave our own state */ /* we should at least remove this thread from the all threads list * to avoid synchronizing issues, though we are running the risk of * an infinite loop with a failure in this function and detach on * failure */ if (all_threads != NULL) remove_thread(NULL, get_thread_id()); nt_terminate_thread(currentThreadOrProcess, KILL_THREAD_EXIT_STATUS); ASSERT_NOT_REACHED(); } /* CHECK: who is supposed to clean up the thread's stack? ZwFreeVirtualMemory can be called by another thread waiting on the thread object, hopefully someone will do it */ } ASSERT_NOT_REACHED(); } void os_terminate_with_code(dcontext_t *dcontext, terminate_flags_t terminate_type, int exit_code) { os_terminate_common(dcontext, terminate_type, true, exit_code); } void os_terminate(dcontext_t *dcontext, terminate_flags_t terminate_type) { os_terminate_common(dcontext, terminate_type, false, 0); } void os_tls_init() { /* everything was done in os_init, even TEB TLS slots are initialized to 0 for us */ } void os_tls_exit(local_state_t *local_state, bool other_thread) { /* not needed for windows, everything is done is os_slow_exit including zeroing * the freed TEB tls slots */ } #ifdef CLIENT_INTERFACE /* Allocates num_slots tls slots aligned with alignment align */ bool os_tls_calloc(OUT uint *offset, uint num_slots, uint alignment) { bool need_synch = !dynamo_initialized; if (num_slots == 0) return false; return (bool) tls_calloc(need_synch, offset, num_slots, alignment); } bool os_tls_cfree(uint offset, uint num_slots) { return (bool) tls_cfree(true, offset, num_slots); } #endif void os_thread_init(dcontext_t *dcontext) { NTSTATUS res; DEBUG_DECLARE(bool ok;) os_thread_data_t *ostd = (os_thread_data_t *) heap_alloc(dcontext, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER)); dcontext->os_field = (void *) ostd; /* init ostd fields here */ ostd->stack_base = NULL; ostd->stack_top = NULL; ostd->teb_stack_no_longer_valid = false; DEBUG_DECLARE(ok = )get_stack_bounds(dcontext, NULL, NULL); ASSERT(ok); /* case 8721: save the win32 start address and print it in the ldmp */ res = query_win32_start_addr(NT_CURRENT_THREAD, &dcontext->win32_start_addr); if (!NT_SUCCESS(res)) { ASSERT(false && "failed to obtain win32 start address"); dcontext->win32_start_addr = (app_pc)0; } else { LOG(THREAD, LOG_THREADS, 2, "win32 start addr is "PFX"\n", dcontext->win32_start_addr); } aslr_thread_init(dcontext); } void os_thread_exit(dcontext_t *dcontext, bool other_thread) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; aslr_thread_exit(dcontext); #ifdef DEBUG /* for non-debug we do fast exit path and don't free local heap */ /* clean up ostd fields here */ heap_free(dcontext, ostd, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER)); #endif } void os_thread_stack_exit(dcontext_t *dcontext) { os_thread_data_t *ostd = (os_thread_data_t *) dcontext->os_field; ASSERT_OWN_MUTEX(true, &thread_initexit_lock); /* see case 3768: a thread's stack is not de-allocated by this process, * so we remove its stack from our executable region here * ref also case 5518 where it is sometimes freed in process, we watch for * that and set stack_base to NULL * note: thin_client doesn't have executable or aslr areas, so this is moot. */ if (DYNAMO_OPTION(thin_client)) return; if (IS_CLIENT_THREAD(dcontext)) { /* dstack is the only stack */ return; } if (ostd->stack_base != NULL) { LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "os_thread_stack_exit : removing "PFX" - "PFX"\n", ostd->stack_base, ostd->stack_top); ASSERT(ostd->stack_top != NULL); DOCHECK(1, { /* ASSERT that os region matches region stored in ostd */ byte *alloc_base; size_t size = get_allocation_size(ostd->stack_base, &alloc_base); /* Xref case 5877, this assert can fire if the exiting thread has already * exited (resulting in freed stack) before we clean it up. This could be do * to using THREAD_SYNCH_TERMINATED_AND_CLEANED with a synch_with* routine * (no current uses) or a race with detach resuming a translated thread * before cleaning it up. The detach race is harmless so we allow it. */ ASSERT(doing_detach || ((size == (size_t) ALIGN_FORWARD (ostd->stack_top - (ptr_int_t)ostd->stack_base, PAGE_SIZE) || /* PR 252008: for WOW64 nudges we allocate an extra page. */ (size == PAGE_SIZE + (size_t)(ostd->stack_top - ostd->stack_base) && is_wow64_process(NT_CURRENT_PROCESS) && dcontext->nudge_target != NULL)) && ostd->stack_base == alloc_base)); }); /* believe <= win2k frees the stack in process, would like to check * that but we run into problems with stacks that are never freed * (TerminateThread, threads killed by TerminateProcess 0, last thread * calling TerminateProcess, etc.) FIXME figure out way to add an * assert_curiosity */ /* make sure we use our dcontext (dcontext could belong to another thread * from other_thread_exit) since flushing will end up using this dcontext * for synchronization purposes */ /* do not flush if at process exit since already cleaned up fragment * info (for PR 536058) */ if (!dynamo_exited) { app_memory_deallocation(get_thread_private_dcontext(), ostd->stack_base, ostd->stack_top - ostd->stack_base, true /* own thread_initexit_lock */, false /* not image */); } if (TEST(ASLR_HEAP_FILL, DYNAMO_OPTION(aslr))) { size_t stack_reserved_size = ostd->stack_top - ostd->stack_base; /* verified above with get_allocation_size() this is not * only the committed portion */ aslr_pre_process_free_virtual_memory(dcontext, ostd->stack_base, stack_reserved_size); } } else { LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1, "os_thread_stack_exit : Thread's os stack has alread been freed\n"); /* believe >= XP free the stack out of process */ ASSERT(ostd->stack_top == NULL); ASSERT_CURIOSITY(get_os_version() <= WINDOWS_VERSION_2000); } } void os_thread_under_dynamo(dcontext_t *dcontext) { /* add cur thread to callback list */ ASSERT_MESSAGE(CHKLVL_ASSERTS+1/*expensive*/, "can only act on executing thread", dcontext == get_thread_private_dcontext()); set_asynch_interception(get_thread_id(), true); } void os_thread_not_under_dynamo(dcontext_t *dcontext) { /* remove cur thread from callback list */ ASSERT_MESSAGE(CHKLVL_ASSERTS+1/*expensive*/, "can only act on executing thread", dcontext == get_thread_private_dcontext()); set_asynch_interception(get_thread_id(), false); } void os_process_under_dynamorio_initiate(dcontext_t *dcontext) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); init_apc_go_native = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } void os_process_under_dynamorio_complete(dcontext_t *dcontext) { /* Nothing. */ } void os_process_not_under_dynamorio(dcontext_t *dcontext) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); init_apc_go_native = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } /*************************************************************************** * THREAD TAKEOVER */ /* Data passed to a thread for its own initialization */ typedef struct _takeover_data_t { app_pc continuation_pc; bool in_progress; thread_id_t tid; #ifndef X64 /* For WOW64 we sometimes need to modify the x64 state: */ HANDLE thread_handle; CONTEXT_64 *cxt64; byte *cxt64_alloc; /* We assume these will never be 0 and use that as a sentinel */ ptr_uint_t memval_stack; ptr_uint_t memval_r14; #endif } takeover_data_t; /* List of threads */ typedef struct _thread_list_t { HANDLE handle; thread_id_t tid; /* may not be known, in which case INVALID_THREAD_ID */ void *user_data; /* set to NULL initially */ } thread_list_t; /* Stored in thread_list_t.user_data */ enum { TAKEOVER_NEW = 0, /* must match initial NULL */ TAKEOVER_TRIED, TAKEOVER_SUCCESS, }; /* Our set of a thread's context is not always visible until the thread is * scheduled. Thus to avoid memory leaks we need global storage that lasts * across calls to os_take_over_all_unknown_threads(). * We also use the table to ensure we (eventually) free any takeover_data_t for * a thread that never gets scheduled. * A final use is for cases where our set context doesn't seem to take * effect except for eip. * We do not hold the table lock while accessing table payloads because * we rely on an invariant that only the owning thread can free its own * data, or another thread during synchall. */ static generic_table_t *takeover_table; #define INIT_HTABLE_SIZE_TAKEOVER 6 /* should remain small */ #define INVALID_PAYLOAD ((void *)(ptr_int_t)-2) /* NULL and -1 are used by table */ static void takeover_table_entry_free(dcontext_t *dcontext, void *e) { takeover_data_t *data = (takeover_data_t *) e; if (e == INVALID_PAYLOAD) return; #ifndef X64 if (data->thread_handle != NULL) close_handle(data->thread_handle); if (data->cxt64_alloc != NULL) { global_heap_free(data->cxt64_alloc, MAX_CONTEXT_64_SIZE HEAPACCT(ACCT_THREAD_MGT)); } #endif global_heap_free(data, sizeof(*data) HEAPACCT(ACCT_THREAD_MGT)); } static void os_take_over_init(void) { takeover_table = generic_hash_create(GLOBAL_DCONTEXT, INIT_HTABLE_SIZE_TAKEOVER, 80 /* load factor: not perf-critical */, HASHTABLE_SHARED | HASHTABLE_PERSISTENT, takeover_table_entry_free _IF_DEBUG("takeover table")); } /* Only called on slow exit */ static void os_take_over_exit(void) { generic_hash_destroy(GLOBAL_DCONTEXT, takeover_table); } /* We need to distinguish a thread intercepted via APC hook but that is in ntdll * code (e.g., waiting for a lock) so we mark threads during init prior to being * added to the main thread table */ void os_take_over_mark_thread(thread_id_t tid) { TABLE_RWLOCK(takeover_table, write, lock); if (generic_hash_lookup(GLOBAL_DCONTEXT, takeover_table, tid) == NULL) generic_hash_add(GLOBAL_DCONTEXT, takeover_table, tid, INVALID_PAYLOAD); TABLE_RWLOCK(takeover_table, write, unlock); } void os_take_over_unmark_thread(thread_id_t tid) { TABLE_RWLOCK(takeover_table, write, lock); if (generic_hash_lookup(GLOBAL_DCONTEXT, takeover_table, tid) == INVALID_PAYLOAD) generic_hash_remove(GLOBAL_DCONTEXT, takeover_table, tid); TABLE_RWLOCK(takeover_table, write, unlock); } /* Returns an array of num_threads_out thread_list_t entries allocated on the * global protected heap with HEAPACCT(ACCT_THREAD_MGT). * Each HANDLE should be closed prior to freeing the array. */ static thread_list_t * os_list_threads(uint *num_threads_out) { HANDLE hthread; thread_list_t *threads = NULL; NTSTATUS res = nt_thread_iterator_next (NT_CURRENT_PROCESS, NULL, &hthread, THREAD_ALL_ACCESS); ASSERT(num_threads_out != NULL); if (NT_SUCCESS(res)) { uint num_threads = 0; uint num_alloc = 16; threads = global_heap_alloc(num_alloc*sizeof(*threads) HEAPACCT(ACCT_THREAD_MGT)); do { if (num_threads == num_alloc) { uint new_alloc = num_alloc * 2; threads = global_heap_realloc(threads, num_alloc, new_alloc, sizeof(*threads) HEAPACCT(ACCT_THREAD_MGT)); num_alloc = new_alloc; } LOG(GLOBAL, LOG_THREADS, 1, "%s: thread "TIDFMT" handle="PFX"\n", __FUNCTION__, num_threads, hthread); threads[num_threads].handle = hthread; threads[num_threads].tid = INVALID_THREAD_ID; threads[num_threads].user_data = NULL; num_threads++; res = nt_thread_iterator_next (NT_CURRENT_PROCESS, hthread, &hthread, THREAD_ALL_ACCESS); } while (NT_SUCCESS(res)); *num_threads_out = num_threads; threads = global_heap_realloc(threads, num_alloc, num_threads, sizeof(*threads) HEAPACCT(ACCT_THREAD_MGT)); } else { SYSTEM_PROCESSES *sp; uint sysinfo_size; byte *sysinfo; sysinfo = get_system_processes(&sysinfo_size); sp = (SYSTEM_PROCESSES *) sysinfo; while (sysinfo != NULL) { if (is_pid_me((process_id_t)sp->ProcessId)) { uint i; threads = global_heap_alloc(sp->ThreadCount*sizeof(*threads) HEAPACCT(ACCT_THREAD_MGT)); for (i = 0; i < sp->ThreadCount; i++) { thread_id_t tid = (thread_id_t) sp->Threads[i].ClientId.UniqueThread; /* sanity checks (xref i#1220) */ ASSERT(get_process_id() == (process_id_t) sp->Threads[i].ClientId.UniqueProcess); LOG(GLOBAL, LOG_THREADS, 1, "%s: thread "TIDFMT" UniqueThread="PFX"\n", __FUNCTION__, i, tid); threads[i].handle = thread_handle_from_id(tid); ASSERT(threads[i].handle != INVALID_HANDLE_VALUE); threads[i].tid = tid; threads[i].user_data = NULL; } *num_threads_out = sp->ThreadCount; break; } if (sp->NextEntryDelta == 0) break; sp = (SYSTEM_PROCESSES *) (((byte *)sp) + sp->NextEntryDelta); } global_heap_free(sysinfo, sysinfo_size HEAPACCT(ACCT_OTHER)); } return threads; } /* Removes the entry for the executing thread from the table and frees data */ static void thread_attach_remove_from_table(takeover_data_t *data) { TABLE_RWLOCK(takeover_table, write, lock); /* this will free data */ generic_hash_remove(GLOBAL_DCONTEXT, takeover_table, data->tid); TABLE_RWLOCK(takeover_table, write, unlock); } /* Restores memory and the x64 context */ static void thread_attach_restore_full_state(takeover_data_t *data) { #ifndef X64 if (data->cxt64 != NULL) { if (data->memval_stack != 0) { if (!safe_write((void *)(ptr_uint_t)data->cxt64->Rsp, sizeof(data->memval_stack), &data->memval_stack)) { LOG(GLOBAL, LOG_THREADS, 1, "%s: failed to restore *Rsp "PFX"\n", __FUNCTION__, data->cxt64->Rsp); } } if (data->memval_r14 != 0) { if (!safe_write((void *)(ptr_uint_t)data->cxt64->R14, sizeof(data->memval_r14), &data->memval_r14)) { LOG(GLOBAL, LOG_THREADS, 1, "%s: failed to restore *R14 "PFX"\n", __FUNCTION__, data->cxt64->R14); } } if (!thread_set_context_64(data->thread_handle, data->cxt64)) { LOG(GLOBAL, LOG_THREADS, 1, "%s: failed to set x64 context\n", __FUNCTION__); } } #endif } void thread_attach_translate(dcontext_t *dcontext, priv_mcontext_t *mc INOUT, bool restore_memory) { takeover_data_t *data; TABLE_RWLOCK(takeover_table, read, lock); data = (takeover_data_t *) generic_hash_lookup(GLOBAL_DCONTEXT, takeover_table, (ptr_uint_t)dcontext->owning_thread); TABLE_RWLOCK(takeover_table, read, unlock); if (data != NULL && data != INVALID_PAYLOAD) { mc->pc = data->continuation_pc; if (restore_memory) thread_attach_restore_full_state(data); } else ASSERT_NOT_REACHED(); /* translating a non-native thread! */ } void thread_attach_context_revert(CONTEXT *cxt INOUT) { takeover_data_t *data; TABLE_RWLOCK(takeover_table, read, lock); data = (takeover_data_t *) generic_hash_lookup(GLOBAL_DCONTEXT, takeover_table, (ptr_uint_t)get_thread_id()); TABLE_RWLOCK(takeover_table, read, unlock); if (data != NULL && data != INVALID_PAYLOAD) { cxt->CXT_XIP = (ptr_uint_t) data->continuation_pc; thread_attach_restore_full_state(data); thread_attach_remove_from_table(data); } else ASSERT_NOT_REACHED(); /* translating a non-native thread! */ } void thread_attach_exit(dcontext_t *dcontext, priv_mcontext_t *mc) { ASSERT(mc->pc == (app_pc) thread_attach_takeover); TABLE_RWLOCK(takeover_table, write, lock); generic_hash_remove(GLOBAL_DCONTEXT, takeover_table, (ptr_uint_t) dcontext->owning_thread); TABLE_RWLOCK(takeover_table, write, unlock); } #ifndef X64 /* i#1141: problems with NtGetContextThread and NtSetContextThread on WOW64 * * For wow64, when a thread is in the wow64 layer, 32-bit NtGetContextThread * goes and reads from the CONTEXT32 (aka WOW64_CONTEXT) stored in userspace * (TEB64->TlsSlots[1]) by the wow64 layer and returns that. Similary, * NtSetContextThread goes and writes that stored CONTEXT32. * * If a target suspended thread is in the process of saving (on entry to wow64 * layer 64-bit mode), NtGetContextThread will return an incorrect context; * and if the thread is in the process of restoring (on exit back to 32-bit * mode), NtSetContextThread will have some of its values overwritten once the * thread resumes. * * My solution is to get the x64 CONTEXT64, pattern-match the entry and exit, * and set the appropriate registers or memory. Unfortunately this is fragile * with respect to the exact code sequences in * wow64cpu!CpupReturnFromSimulatedCode and wow64cpu!CpuSimulate changing in * the future. * * As part of this I also changed the takeover to not store the context at * suspend time and instead only change Eip then, capturing the context when * the thread resumes. This requires an assume-nothing routine, which * requires initstack: but these takeover points shouldn't be perf-critical. * This really simplifies the wow64 entry/exit corner cases. */ static bool wow64_cases_pre_win10(takeover_data_t *data, CONTEXT_64 *cxt64, HANDLE hthread, thread_id_t tid, app_pc takeover) { /* The WOW64_CONTEXT.Eip won't be correct in two spots: right before it's * saved, and right after it's restored. * It's saved here: * wow64cpu!CpupReturnFromSimulatedCode: * 00000000`78b83c2c 67448b0424 mov r8d,dword ptr [esp] * 00000000`78b83c31 458985bc000000 mov dword ptr [r13+0BCh],r8d * 00000000`78b83c38 83c404 add esp,4 * 00000000`78b83c3b 4189a5c8000000 mov dword ptr [r13+0C8h],esp * And restored here: * wow64cpu!CpuSimulate+0x161: * 00000000`74ff2711 458b8dbc000000 mov r9d,dword ptr [r13+0BCh] * 00000000`74ff2718 45890e mov dword ptr [r14],r9d * 00000000`74ff271b 41ff2e jmp fword ptr [r14] * We have to change either [esp], r8d, r9d, or [r14]. */ /* We include the subsequent instr for 12 to avoid matching elsewhere in wow64 code */ static const byte WOW64_ENTER_INST12[] = {0x67, 0x44, 0x8b, 0x04, 0x24, 0x45, 0x89, 0x85, 0xbc, 0x00, 0x00, 0x00}; static const byte WOW64_ENTER_INST2[] = {0x45, 0x89, 0x85, 0xbc, 0x00, 0x00, 0x00}; static const byte WOW64_EXIT_INST12[] = {0x45, 0x89, 0x0e, 0x41, 0xff, 0x2e}; static const byte WOW64_EXIT_INST2[] = {0x41, 0xff, 0x2e}; bool changed_x64_cxt = false; /* If in high ntdll64, just exit (memcmp calls will crash on low bits of Rip) */ if (cxt64->Rip >= 0x100000000ULL) return false; /* Corner case #1: 1st instr on entry where retaddr is in [esp] */ if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_ENTER_INST12, sizeof(WOW64_ENTER_INST12)) == 0) { if (safe_read((void *)(ptr_uint_t)cxt64->Rsp, sizeof(data->memval_stack), &data->memval_stack) && safe_write((void *)(ptr_uint_t)cxt64->Rsp, sizeof(takeover), &takeover)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 enter1 => changed [esp]\n", tid); } else { data->memval_stack = 0; LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 enter1, but FAILED to change [esp]\n", tid); ASSERT_NOT_REACHED(); } } /* Corner case #2: 2nd instr in entry where retaddr is in r8d */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_ENTER_INST2, sizeof(WOW64_ENTER_INST2)) == 0) { uint64 orig_r8 = cxt64->R8; cxt64->R8 = (DWORD64)(ptr_uint_t) takeover; if (thread_set_context_64(hthread, cxt64)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 enter2 => changed r8d\n", tid); } else { LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 enter2, but FAILED to change r8d\n", tid); ASSERT_NOT_REACHED(); } /* Restore so we can use cxt64 to revert if necessary */ cxt64->R8 = orig_r8; } /* Corner case #3: 2nd-to-last instr in exit where retaddr is in r9d */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT_INST12, sizeof(WOW64_EXIT_INST12)) == 0) { uint64 orig_r9 = cxt64->R9; cxt64->R9 = (DWORD64)(ptr_uint_t) takeover; if (thread_set_context_64(hthread, cxt64)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 exit1 => changed r9d\n", tid); } else { LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 exit1, but FAILED to change r9d\n", tid); ASSERT_NOT_REACHED(); } /* Restore so we can use cxt64 to revert if necessary */ cxt64->R9 = orig_r9; } /* Corner case #4: last instr in exit where we already copied retaddr to [r14] */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT_INST2, sizeof(WOW64_EXIT_INST2)) == 0) { if (safe_read((void *)(ptr_uint_t)cxt64->R14, sizeof(data->memval_r14), &data->memval_r14) && safe_write((void *)(ptr_uint_t)cxt64->R14, sizeof(takeover), &takeover)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 exit2 => changed [r14]\n", tid); } else { data->memval_r14 = 0; LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 exit2, but FAILED to change *r14\n", tid); ASSERT_NOT_REACHED(); } } return changed_x64_cxt; } static bool wow64_cases_win10(takeover_data_t *data, CONTEXT_64 *cxt64, HANDLE hthread, thread_id_t tid, app_pc takeover) { /* Eip is saved here (only +3C is due to 0x80 missing: no FloatSave): * wow64cpu!CpupReturnFromSimulatedCode: * 00000000`59da18e6 4987e6 xchg rsp,r14 * 00000000`59da18e9 458b06 mov r8d,dword ptr [r14] * 00000000`59da18ec 4983c604 add r14,4 * 00000000`59da18f0 4589453c mov dword ptr [r13+3Ch],r8d * * And restored in 2 places: * wow64cpu!RunSimulatedCode+0x5f: (from earlier, r14==rsp) * 00000000`59da183f 458b4d3c mov r9d,dword ptr [r13+3Ch] * 00000000`59da1843 44890c24 mov dword ptr [rsp],r9d * 00000000`59da1847 418b6548 mov esp,dword ptr [r13+48h] * 00000000`59da184b 41ff2e jmp fword ptr [r14] * wow64cpu!RunSimulatedCode+0xfc: * 00000000`59da18dc 458b453c mov r8d,dword ptr [r13+3Ch] * 00000000`59da18e0 4c890424 mov qword ptr [rsp],r8 * 00000000`59da18e4 48cf iretq * We have to change either [esp], r8d, r9d, or [r14]. */ /* We include the subsequent instr for a tighter match */ static const byte WOW64_ENTER_INST12[] = {0x49,0x87,0xe6, 0x45,0x8b,0x06}; static const byte WOW64_ENTER_INST23[] = {0x45,0x8b,0x06, 0x49,0x83,0xc6,0x04}; static const byte WOW64_ENTER_INST34[] = {0x49,0x83,0xc6,0x04, 0x45,0x89,0x45,0x3c}; static const byte WOW64_ENTER_INST4[] = {0x45,0x89,0x45,0x3c}; static const byte WOW64_EXIT1_INST12[] = {0x44,0x89,0x0c,0x24, 0x41,0x8b,0x65,0x48}; static const byte WOW64_EXIT1_INST23[] = {0x41,0x8b,0x65,0x48, 0x41,0xff,0x2e}; static const byte WOW64_EXIT1_INST3[] = {0x41,0xff,0x2e}; static const byte WOW64_EXIT2_INST12[] = {0x4c,0x89,0x04,0x24, 0x48,0xcf}; static const byte WOW64_EXIT2_INST2[] = {0x48,0xcf}; bool changed_x64_cxt = false; /* If in high ntdll64, just exit (memcmp calls will crash on low bits of Rip) */ if (cxt64->Rip >= 0x100000000ULL) return false; /* Corner case #1: 1st instr on entry where retaddr is in [esp] */ if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_ENTER_INST12, sizeof(WOW64_ENTER_INST12)) == 0) { if (safe_read((void *)(ptr_uint_t)cxt64->Rsp, sizeof(data->memval_stack), &data->memval_stack) && safe_write((void *)(ptr_uint_t)cxt64->Rsp, sizeof(takeover), &takeover)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 enter1 => changed [esp]\n", tid); } else { data->memval_stack = 0; LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 enter1, but FAILED to change [esp]\n", tid); ASSERT_NOT_REACHED(); } } /* Corner case #2: 2nd instr in entry where retaddr is in [r14] */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_ENTER_INST23, sizeof(WOW64_ENTER_INST23)) == 0) { if (safe_read((void *)(ptr_uint_t)cxt64->R14, sizeof(data->memval_stack), &data->memval_stack) && safe_write((void *)(ptr_uint_t)cxt64->R14, sizeof(takeover), &takeover)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 enter1 => changed [r14]\n", tid); } else { data->memval_stack = 0; LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 enter1, but FAILED to change [r14]\n", tid); ASSERT_NOT_REACHED(); } } /* Corner case #3: 3rd or 4th instr in entry where retaddr is in r8d */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_ENTER_INST34, sizeof(WOW64_ENTER_INST34)) == 0 || memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_ENTER_INST4, sizeof(WOW64_ENTER_INST4)) == 0) { uint64 orig_r8 = cxt64->R8; cxt64->R8 = (DWORD64)(ptr_uint_t) takeover; if (thread_set_context_64(hthread, cxt64)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 enter2 => changed r8d\n", tid); } else { LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 enter2, but FAILED to change r8d\n", tid); ASSERT_NOT_REACHED(); } /* Restore so we can use cxt64 to revert if necessary */ cxt64->R8 = orig_r8; } /* Corner case #4: 3rd-to-last instr in 1st exit where retaddr is in r9d */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT1_INST12, sizeof(WOW64_EXIT1_INST12)) == 0) { uint64 orig_r9 = cxt64->R9; cxt64->R9 = (DWORD64)(ptr_uint_t) takeover; if (thread_set_context_64(hthread, cxt64)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 exit1 => changed r9d\n", tid); } else { LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 exit1, but FAILED to change r9d\n", tid); ASSERT_NOT_REACHED(); } /* Restore so we can use cxt64 to revert if necessary */ cxt64->R9 = orig_r9; } /* Corner case #5: last 2 instrs in 1st exit where already copied retaddr to [r14] */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT1_INST23, sizeof(WOW64_EXIT1_INST23)) == 0 || memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT1_INST3, sizeof(WOW64_EXIT1_INST3)) == 0) { if (safe_read((void *)(ptr_uint_t)cxt64->R14, sizeof(data->memval_r14), &data->memval_r14) && safe_write((void *)(ptr_uint_t)cxt64->R14, sizeof(takeover), &takeover)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 exit2 => changed [r14]\n", tid); } else { data->memval_r14 = 0; LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 exit2, but FAILED to change *r14\n", tid); ASSERT_NOT_REACHED(); } } /* Corner case #6: 2nd-to-last instr in 2nd exit where retaddr is in r8d */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT2_INST12, sizeof(WOW64_EXIT2_INST12)) == 0) { uint64 orig_r8 = cxt64->R8; cxt64->R8 = (DWORD64)(ptr_uint_t) takeover; if (thread_set_context_64(hthread, cxt64)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 exit1 => changed r8d\n", tid); } else { LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 exit1, but FAILED to change r8d\n", tid); ASSERT_NOT_REACHED(); } /* Restore so we can use cxt64 to revert if necessary */ cxt64->R8 = orig_r8; } /* Corner case #7: last instr in 2nd exit where already copied retaddr to [esp] */ else if (memcmp((byte *)(ptr_uint_t)cxt64->Rip, WOW64_EXIT2_INST2, sizeof(WOW64_EXIT2_INST2)) == 0) { if (safe_read((void *)(ptr_uint_t)cxt64->Rsp, sizeof(data->memval_stack), &data->memval_stack) && safe_write((void *)(ptr_uint_t)cxt64->Rsp, sizeof(takeover), &takeover)) { changed_x64_cxt = true; LOG(GLOBAL, LOG_THREADS, 2, "\ttid %d @ wow64 exit2 => changed [rsp]\n", tid); } else { data->memval_stack = 0; LOG(GLOBAL, LOG_THREADS, 1, "\ttid %d @ wow64 exit2, but FAILED to change *rsp\n", tid); ASSERT_NOT_REACHED(); } } return changed_x64_cxt; } static void os_take_over_wow64_extra(takeover_data_t *data, HANDLE hthread, thread_id_t tid, CONTEXT *cxt32) { CONTEXT_64 *cxt64; bool changed_x64_cxt = false; app_pc takeover = thread_attach_takeover; byte * buf; # ifdef DEBUG /* Match the wow64 syscall call*: * 7d8513eb 64ff15c0000000 call dword ptr fs:[000000c0] */ static const byte WOW64_SYSCALL_CALL[] = {0x64, 0xff, 0x15, 0xc0, 0x00, 0x00, 0x00}; # endif if (!is_wow64_process(NT_CURRENT_PROCESS)) return; /* WOW64 context setting is fragile: we need the raw x64 context as well. * We can't easily use nt_initialize_context so we manually set the flags. */ buf = (byte *) global_heap_alloc(MAX_CONTEXT_64_SIZE HEAPACCT(ACCT_THREAD_MGT)); cxt64 = (CONTEXT_64 *) ALIGN_FORWARD(buf, 0x10); cxt64->ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER; if (!thread_get_context_64(hthread, cxt64)) { LOG(GLOBAL, LOG_THREADS, 1, "\tfailed to get x64 cxt for thread "TIDFMT"\n", tid); ASSERT_NOT_REACHED(); global_heap_free(buf, MAX_CONTEXT_64_SIZE HEAPACCT(ACCT_THREAD_MGT)); return; } LOG(GLOBAL, LOG_THREADS, 2, "x64 context for thread "TIDFMT": xip is "HEX64_FORMAT_STRING ", xsp="HEX64_FORMAT_STRING, tid, cxt64->Rip, cxt64->Rsp); if (cxt64->SegCs == CS32_SELECTOR || /* XXX i#1637: on xp64 I have seen the x64 NtGetConifftextThread return * success but fill cxt64 with zeroes. We hope this only happens when * truly in the kernel. */ cxt64->Rip == 0) { /* In x86 mode, so not inside the wow64 layer. Context setting should * work fine. */ global_heap_free(buf, MAX_CONTEXT_64_SIZE HEAPACCT(ACCT_THREAD_MGT)); return; } /* Could be in ntdll or user32 or anywhere a syscall is made, so we don't * assert is_in_ntdll, but we do check that it's the wow64 syscall call*: */ # ifdef DEBUG if (get_os_version() >= WINDOWS_VERSION_10) { ASSERT_CURIOSITY(*(app_pc*)(data->continuation_pc - CTI_IND1_LENGTH - sizeof(app_pc)) == wow64_syscall_call_tgt); } else { ASSERT_CURIOSITY(memcmp(data->continuation_pc - sizeof(WOW64_SYSCALL_CALL), WOW64_SYSCALL_CALL, sizeof(WOW64_SYSCALL_CALL)) == 0); } # endif if (get_os_version() >= WINDOWS_VERSION_10) changed_x64_cxt = wow64_cases_pre_win10(data, cxt64, hthread, tid, takeover); else changed_x64_cxt = wow64_cases_win10(data, cxt64, hthread, tid, takeover); if (changed_x64_cxt) { /* We'll need the handle in case we have to revert/restore the x64 context. * We shouldn't have to undo any of these changes on a successful * takeover. */ duplicate_handle(NT_CURRENT_PROCESS, hthread, NT_CURRENT_PROCESS, &data->thread_handle, 0, 0, DUPLICATE_SAME_ACCESS|DUPLICATE_SAME_ATTRIBUTES); data->cxt64 = cxt64; data->cxt64_alloc = buf; } else { global_heap_free(buf, MAX_CONTEXT_64_SIZE HEAPACCT(ACCT_THREAD_MGT)); } } #endif /* On success, returns true and leaves thread suspended. */ static bool os_take_over_thread(dcontext_t *dcontext, HANDLE hthread, thread_id_t tid, bool suspended) { bool success = true; char buf[MAX_CONTEXT_SIZE]; CONTEXT *cxt = nt_initialize_context(buf, CONTEXT_DR_STATE); ASSERT(tid == thread_id_from_handle(hthread)); if ((suspended || nt_thread_suspend(hthread, NULL)) && NT_SUCCESS(nt_get_context(hthread, cxt))) { /* Rather than try to emulate clone handling by putting this * on the stack and thus risking transparency violations, we * just allocate it on our heap and put it into a hashtable. * * Originally I tried storing the context here, pointing at it in a * register, and swapping to dstack now, for a simpler takeover routine: * but the state changes between here and the takeover routine, * resulting in corrupted registers. Thus, we have the takeover routine * assume nothing and capture the context once the thread gets there. * Then our only problem is the eip setting not sticking, meaning we * don't take over at all. */ NTSTATUS res; takeover_data_t *data; void *already_taken_over; /* i#1443: avoid self-interp on threads that are waiting at our hook * for DR to initialize. We have to check two things: first, whether * the context is in DR; second, whether flagged (to cover the thread * being in ntdll or vsyscall). */ if (is_in_dynamo_dll((app_pc)cxt->CXT_XIP) || new_thread_is_waiting_for_dr_init(tid, (app_pc)cxt->CXT_XIP)) { LOG(GLOBAL, LOG_THREADS, 1, "\tthread "TIDFMT" is already waiting\n", tid); return true; /* it's waiting for us to take it over */ } /* Avoid double-takeover. * N.B.: is_dynamo_address() on xip and xsp is not sufficient as * a newly set context may not show up until the thread is scheduled. * We still want to check them to catch threads created after * our APC hook was in place. */ TABLE_RWLOCK(takeover_table, read, lock); already_taken_over = generic_hash_lookup(GLOBAL_DCONTEXT, takeover_table, (ptr_uint_t)tid); TABLE_RWLOCK(takeover_table, read, unlock); if (already_taken_over != NULL || is_dynamo_address((byte *)cxt->CXT_XIP)) { /* Thread was never scheduled on last takeover, or has not * yet added itself to main thread table. */ LOG(GLOBAL, LOG_THREADS, 1, "\tthread "TIDFMT" partially taken over already; pc="PFX"\n", tid, cxt->CXT_XIP); if (already_taken_over != NULL && already_taken_over != INVALID_PAYLOAD && !is_dynamo_address((byte *)cxt->CXT_XIP) && /* Rule out thread initializing but currently in ntdll */ !((takeover_data_t *)already_taken_over)->in_progress && cxt->CXT_XIP != (ptr_uint_t) thread_attach_takeover) { /* XXX: I see cases where my setcontext succeeds, immediate getcontext * confirms, and then later the thread's context is back to native * and we never take it over! So we detect here and try again. * See also comment above. */ data = (takeover_data_t *) already_taken_over; LOG(GLOBAL, LOG_THREADS, 1, "\tthread "TIDFMT" reverted!", tid); /* Now that i#1141 is fixed this shouldn't happen: we'd like to * know if it does. */ ASSERT_CURIOSITY(false && "thread takeover context reverted!"); } else return true; } else { thread_record_t *tr = thread_lookup(tid); data = (takeover_data_t *) global_heap_alloc(sizeof(*data) HEAPACCT(ACCT_THREAD_MGT)); } LOG(GLOBAL, LOG_THREADS, 1, "thread "TIDFMT" context:\n", tid); memset(data, 0, sizeof(*data)); data->tid = tid; data->continuation_pc = (app_pc) cxt->CXT_XIP; cxt->CXT_XIP = (ptr_uint_t) thread_attach_takeover; #ifndef X64 os_take_over_wow64_extra(data, hthread, tid, cxt); #endif /* See comments above: not safe to change any other regs here */ ASSERT(TESTALL(CONTEXT_DR_STATE, cxt->ContextFlags)); res = nt_set_context(hthread, cxt); if (!NT_SUCCESS(res)) { LOG(GLOBAL, LOG_THREADS, 1, "\tfailed to set context for thread "TIDFMT" with error %d\n", tid, res); success = false; global_heap_free(data, sizeof(*data) HEAPACCT(ACCT_THREAD_MGT)); if (!nt_thread_resume(hthread, NULL)) { LOG(GLOBAL, LOG_THREADS, 1, "\tfailed to resume thread "TIDFMT"\n", tid); ASSERT_NOT_REACHED(); } } else { if (already_taken_over == NULL) { TABLE_RWLOCK(takeover_table, write, lock); generic_hash_add(GLOBAL_DCONTEXT, takeover_table, tid, data); TABLE_RWLOCK(takeover_table, write, unlock); } LOG(GLOBAL, LOG_THREADS, 1, "\tset context for thread "TIDFMT"; old xip="PFX", xsp="PFX", data=" PFX"\n", tid, data->continuation_pc, cxt->CXT_XSP, data); /* leave thread suspended */ } } else { LOG(GLOBAL, LOG_THREADS, 1, "\tfailed to suspend/query thread "TIDFMT"\n", tid); success = false; } return success; } bool os_thread_take_over_suspended_native(dcontext_t *dcontext) { thread_record_t *tr = dcontext->thread_record; if (!is_thread_currently_native(tr)) return false; /* If the app voluntarily stopped, wait for it to ask to start again */ if (dcontext->currently_stopped) return false; /* In case of failure (xref all the issues with setting the context), we * use this to signal syscall_while_native() to take this thread * over if it makes it to one of our syscall hooks. * The thread will still be considered is_thread_currently_native(). */ tr->retakeover = true; return os_take_over_thread(dcontext, tr->handle, tr->id, true/*suspended*/); } /* Called for os-specific takeover of a secondary thread from the one * that called dr_app_setup(). */ void os_thread_take_over_secondary(dcontext_t *dcontext) { /* Nothing yet. */ } bool os_thread_re_take_over(void) { /* Nothing to do. */ return false; } bool os_take_over_all_unknown_threads(dcontext_t *dcontext) { uint i, iters; const uint MAX_ITERS = 16; uint num_threads = 0; thread_list_t *threads = NULL; thread_id_t my_id = get_thread_id(); bool took_over_all = true, found_new_threads = true; /* ensure user_data starts out how we think it does */ ASSERT(TAKEOVER_NEW == (ptr_uint_t) NULL); mutex_lock(&thread_initexit_lock); /* Need to iterate until no new threads, w/ an escape valve of max iters. * This ends up looking similar to synch_with_all_threads(), though it has * some key differences, making it non-trivial to share code. * We need to do at least 2 iters no matter what, but dr_app_start or * external attach should be considered heavyweight events in any case. */ for (iters = 0; found_new_threads && iters < MAX_ITERS; iters++) { uint num_new_threads, j; thread_list_t *new_threads = os_list_threads(&num_new_threads); LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: iteration %d\n", iters); if (new_threads == NULL) { took_over_all = false; break; } found_new_threads = false; for (i = 0; i < num_new_threads; i++) { if (new_threads[i].tid == INVALID_THREAD_ID) new_threads[i].tid = thread_id_from_handle(new_threads[i].handle); } if (threads != NULL) { /* Copy user_data over. Yeah, nested loop: but hashtable seems overkill. */ for (i = 0; i < num_threads; i++) { for (j = 0; j < num_new_threads; j++) { if (new_threads[j].tid == threads[i].tid) new_threads[j].user_data = threads[i].user_data; } if ((ptr_uint_t)threads[i].user_data == TAKEOVER_SUCCESS) close_handle(threads[i].handle); } global_heap_free(threads, num_threads*sizeof(*threads) HEAPACCT(ACCT_THREAD_MGT)); } threads = new_threads; num_threads = num_new_threads; for (i = 0; i < num_threads; i++) { thread_record_t *tr; if ((ptr_uint_t)threads[i].user_data == TAKEOVER_NEW) { found_new_threads = true; threads[i].user_data = (void *)(ptr_uint_t) TAKEOVER_TRIED; tr = thread_lookup(threads[i].tid); if ((tr == NULL || /* Re-takeover known threads that are currently native as well. * XXX i#95: we need a synchall-style loop for known threads as * they can be in DR for syscall hook handling. */ (is_thread_currently_native(tr) IF_CLIENT_INTERFACE(&& !IS_CLIENT_THREAD(tr->dcontext)))) && threads[i].tid != my_id) { LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: taking over thread "TIDFMT"\n", threads[i].tid); if (os_take_over_thread(dcontext, threads[i].handle, threads[i].tid, false/*!suspended*/)) { threads[i].user_data = (void *)(ptr_uint_t) TAKEOVER_SUCCESS; } else { took_over_all = false; /* We want to know when this happens. We might expect * it with injected logon/logoff threads: let's see. */ ASSERT_CURIOSITY(false && "failed to take over a thread!"); } } } if ((ptr_uint_t)threads[i].user_data != TAKEOVER_SUCCESS) close_handle(threads[i].handle); } } /* Potential risk of a thread from an earlier list somehow not showing up on * the final list: but shouldn't happen unless the thread is destroyed in * which case it's ok to never resume it. */ for (i = 0; i < num_threads; i++) { if ((ptr_uint_t)threads[i].user_data == TAKEOVER_SUCCESS) { if (!nt_thread_resume(threads[i].handle, NULL)) { LOG(GLOBAL, LOG_THREADS, 1, "\tfailed to resume thread "TIDFMT"\n", threads[i].tid); took_over_all = false; ASSERT_NOT_REACHED(); } close_handle(threads[i].handle); } } global_heap_free(threads, num_threads*sizeof(*threads) HEAPACCT(ACCT_THREAD_MGT)); if (iters == MAX_ITERS) { LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: hit max iters %d\n", iters); took_over_all = false; } mutex_unlock(&thread_initexit_lock); return !took_over_all; } /* Previously-unknown thread is redirected here to initialize itself. */ void thread_attach_setup(priv_mcontext_t *mc) { dcontext_t *dcontext; takeover_data_t *data; ENTERING_DR(); TABLE_RWLOCK(takeover_table, write, lock); data = (takeover_data_t *) generic_hash_lookup(GLOBAL_DCONTEXT, takeover_table, (ptr_uint_t)get_thread_id()); TABLE_RWLOCK(takeover_table, write, unlock); if (data == NULL || data == INVALID_PAYLOAD) { ASSERT_NOT_REACHED(); /* in release better to let thread run native than to crash */ EXITING_DR(); return; } /* Preclude double takeover if we become suspended while in ntdll */ data->in_progress = true; /* We come here for native_exec threads and dr_app_stop threads, which are * already initialized. */ if (!is_thread_initialized()) { int rc = dynamo_thread_init(NULL, mc _IF_CLIENT_INTERFACE(false)); ASSERT(rc == SUCCESS); } dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); dynamo_thread_under_dynamo(dcontext); /* clear retakeover field, if we came from os_thread_take_over_suspended_native() */ dcontext->thread_record->retakeover = false; /* A native_exec_syscalls hook on NtCallbackReturn could have left the * at_syscall flag set, so make sure to clear it. */ set_at_syscall(dcontext, false); LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: thread "TIDFMT", start pc "PFX"\n", get_thread_id(), data->continuation_pc); ASSERT(os_using_app_state(dcontext)); dcontext->next_tag = data->continuation_pc; *get_mcontext(dcontext) = *mc; thread_attach_remove_from_table(data); data = NULL; transfer_to_dispatch(dcontext, get_mcontext(dcontext), false/*!full_DR_state*/); ASSERT_NOT_REACHED(); } /*************************************************************************** * CLIENT THREADS */ #ifdef CLIENT_SIDELINE /* PR 222812: tied to sideline usage */ /* i#41/PR 222812: client threads * * thread must have dcontext since many API routines require one and we * don't expose GLOBAL_DCONTEXT (xref PR 243008, PR 216936, PR 536058) * * reversed the old design of not using dstack (partly b/c want dcontext) * and avoiding needing a temp stack by just creating dstack up front, * like is done on linux. dstack should be big enough for client threads * (xref PR 202669) * * reversed the old design of explicit dr_terminate_client_thread(): now * the thread is auto-terminated and stack cleaned up on return from run * function */ /* FIXME PR 210591: transparency issues: * 1) All dlls will be notifed of thread creation by DLL_THREAD_ATTACH * => this is now solved by not running the Ldr code: intercept_new_thread() * just comes straight here * 2) The thread will show up in the list of threads accessed by * NtQuerySystemInformation's SystemProcessesAndThreadsInformation structure. * 3) check_sole_thread() * 4) Vista+'s NtGetNextThread and NtGetNextProcess * (which I am assuming expose the iterator interface of * PsGetNextProcessThread, should check) */ void client_thread_target(void *param) { /* Thread was initialized in intercept_new_thread() */ dcontext_t *dcontext = get_thread_private_dcontext(); /* We stored the func and args at base of dstack and param points at them */ void **arg_buf = (void **) param; void (*func)(void *param) = (void (*)(void*)) convert_data_to_function(arg_buf[0]); void *arg = arg_buf[1]; byte *dstack = dcontext->dstack; ASSERT(IS_CLIENT_THREAD(dcontext)); LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d *****\n\n", get_thread_id()); LOG(THREAD, LOG_ALL, 1, "func="PFX", arg="PFX"\n", func, arg); (*func)(arg); LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d EXITING *****\n\n", get_thread_id()); os_terminate(dcontext, TERMINATE_THREAD|TERMINATE_CLEANUP); } DR_API bool dr_create_client_thread(void (*func)(void *param), void *arg) { dcontext_t *dcontext = get_thread_private_dcontext(); byte *dstack = stack_alloc(DYNAMORIO_STACK_SIZE, NULL); HANDLE hthread; bool res; thread_id_t tid; void *arg_buf[2]; LOG(THREAD, LOG_ASYNCH, 1, "dr_create_client_thread: dstack for new thread is "PFX"\n", dstack); pre_second_thread(); /* We store the func and args at base of dstack for client_thread_target */ arg_buf[0] = (void *) func; arg_buf[1] = arg; /* FIXME PR 225714: does this work on Vista? */ hthread = our_create_thread_have_stack(NT_CURRENT_PROCESS, IF_X64_ELSE(true, false), (void *)client_thread_target, NULL, arg_buf, BUFFER_SIZE_BYTES(arg_buf), dstack, DYNAMORIO_STACK_SIZE, false, &tid); CLIENT_ASSERT(hthread != INVALID_HANDLE_VALUE, "error creating thread"); if (hthread == INVALID_HANDLE_VALUE) { stack_free(dstack, DYNAMORIO_STACK_SIZE); return false; } /* FIXME: what about all of our check_sole_thread() checks? */ res = close_handle(hthread); CLIENT_ASSERT(res, "error closing thread handle"); return res; } #endif CLIENT_SIDELINE /* PR 222812: tied to sideline usage */ int get_os_version() { return os_version; } void get_os_version_ex(int *version OUT, uint *service_pack_major OUT, uint *service_pack_minor OUT) { if (version != NULL) *version = os_version; if (service_pack_major != NULL) *service_pack_major = os_service_pack_major; if (service_pack_minor != NULL) *service_pack_minor = os_service_pack_minor; } bool is_in_dynamo_dll(app_pc pc) { ASSERT(dynamo_dll_start != NULL && dynamo_dll_end != NULL); return (pc >= dynamo_dll_start && pc < dynamo_dll_end); } static char * mem_state_string(uint state) { switch (state) { case 0: return "none"; case MEM_COMMIT: return "COMMIT"; case MEM_FREE: return "FREE"; case MEM_RESERVE: return "RESERVE"; } return "(error)"; } static char * mem_type_string(uint type) { switch (type) { case 0: return "none"; case MEM_IMAGE: return "IMAGE"; case MEM_MAPPED: return "MAPPED"; case MEM_PRIVATE: return "PRIVATE"; } return "(error)"; } char * prot_string(uint prot) { uint ignore_extras = prot & ~PAGE_PROTECTION_QUALIFIERS; switch (ignore_extras) { case PAGE_NOACCESS: return "----"; case PAGE_READONLY: return "r---"; case PAGE_READWRITE: return "rw--"; case PAGE_WRITECOPY: return "rw-c"; case PAGE_EXECUTE: return "--x-"; case PAGE_EXECUTE_READ: return "r-x-"; case PAGE_EXECUTE_READWRITE: return "rwx-"; case PAGE_EXECUTE_WRITECOPY: return "rwxc"; } return "(error)"; } static bool prot_is_readable(uint prot) { prot &= ~PAGE_PROTECTION_QUALIFIERS; /* FIXME: consider just E to be unreadable? * do not do exclusions, sometimes prot == 0 or something */ switch (prot) { case PAGE_READONLY: case PAGE_READWRITE: case PAGE_WRITECOPY: case PAGE_EXECUTE: case PAGE_EXECUTE_READ: case PAGE_EXECUTE_READWRITE: case PAGE_EXECUTE_WRITECOPY: return true; } return false; } bool prot_is_writable(uint prot) { prot &= ~PAGE_PROTECTION_QUALIFIERS; return (prot == PAGE_READWRITE || prot == PAGE_WRITECOPY || prot == PAGE_EXECUTE_READWRITE || prot == PAGE_EXECUTE_WRITECOPY); } bool prot_is_executable(uint prot) { prot &= ~PAGE_PROTECTION_QUALIFIERS; return (prot == PAGE_EXECUTE || prot == PAGE_EXECUTE_READ || prot == PAGE_EXECUTE_READWRITE || prot == PAGE_EXECUTE_WRITECOPY); } /* true when page hasn't been written to */ bool prot_is_copyonwrite(uint prot) { prot &= ~PAGE_PROTECTION_QUALIFIERS; /* although really providing an enumeration, the known PAGE_ * values use separate bit flags. We use TESTANY in case new * PAGE_PROTECTION_QUALIFIERS show up. */ return TESTANY(PAGE_WRITECOPY|PAGE_EXECUTE_WRITECOPY, prot); } /* true when page is a guard page and hasn't been touched */ bool prot_is_guard(uint prot) { return TEST(PAGE_GUARD, prot); } /* translate platform independent protection bits to native flags */ int memprot_to_osprot(uint prot) { uint os_prot = 0; if (TEST(MEMPROT_EXEC, prot)) { if (!TEST(MEMPROT_READ, prot)) { ASSERT(!TEST(MEMPROT_WRITE, prot)); os_prot = PAGE_EXECUTE; } else if (TEST(MEMPROT_WRITE, prot)) os_prot = PAGE_EXECUTE_READWRITE; else os_prot = PAGE_EXECUTE_READ; } else if (TEST(MEMPROT_READ, prot)) { if (TEST(MEMPROT_WRITE, prot)) os_prot = PAGE_READWRITE; else os_prot = PAGE_READONLY; } else os_prot = PAGE_NOACCESS; if (TEST(MEMPROT_GUARD, prot)) os_prot |= PAGE_GUARD; return os_prot; } /* translate native flags to platform independent protection bits */ int osprot_to_memprot(uint prot) { uint mem_prot = 0; if (prot_is_readable(prot)) mem_prot |= MEMPROT_READ; if (prot_is_writable(prot)) mem_prot |= MEMPROT_WRITE; if (prot_is_executable(prot)) mem_prot |= MEMPROT_EXEC; if (prot_is_guard(prot)) mem_prot |= MEMPROT_GUARD; return mem_prot; } int osprot_add_writecopy(uint prot) { int pr = prot & ~PAGE_PROTECTION_QUALIFIERS; switch (pr) { case PAGE_READWRITE: return (prot & (~pr)) | PAGE_WRITECOPY; case PAGE_EXECUTE_READWRITE: return (prot & (~pr)) | PAGE_EXECUTE_WRITECOPY; default: ASSERT_NOT_REACHED(); } return prot; } /* does not change prot if it doesn't already have read access */ static uint osprot_add_write(uint prot) { int pr = prot & ~PAGE_PROTECTION_QUALIFIERS; switch (pr) { case PAGE_READONLY: return (prot & (~pr)) | PAGE_READWRITE; case PAGE_EXECUTE_READ: return (prot & (~pr)) | PAGE_EXECUTE_READWRITE; } return prot; } /* returns osprot flags preserving all native protection flags except * for RWX, which are replaced according to memprot */ uint osprot_replace_memprot(uint old_osprot, uint memprot) { uint old_qualifiers = old_osprot & PAGE_PROTECTION_QUALIFIERS; uint new_osprot = memprot_to_osprot(memprot); /* preserve any old WRITECOPY 'flag' if page hasn't been touched */ if (prot_is_copyonwrite(old_osprot) && prot_is_writable(new_osprot)) new_osprot = osprot_add_writecopy(new_osprot); new_osprot |= old_qualifiers; return new_osprot; } void dump_mbi(file_t file, MEMORY_BASIC_INFORMATION *mbi, bool dump_xml) { print_file(file, dump_xml ? "\t\tBaseAddress= \""PFX"\"\n" "\t\tAllocationBase= \""PFX"\"\n" "\t\tAllocationProtect= \"0x%08x %s\"\n" "\t\tRegionSize= \"0x%08x\"\n" "\t\tState= \"0x%08x %s\"\n" "\t\tProtect= \"0x%08x %s\"\n" "\t\tType= \"0x%08x %s\"\n" : "BaseAddress: "PFX"\n" "AllocationBase: "PFX"\n" "AllocationProtect: 0x%08x %s\n" "RegionSize: 0x%08x\n" "State: 0x%08x %s\n" "Protect: 0x%08x %s\n" "Type: 0x%08x %s\n", mbi->BaseAddress, mbi->AllocationBase, mbi->AllocationProtect, prot_string(mbi->AllocationProtect), mbi->RegionSize, mbi->State, mem_state_string(mbi->State), mbi->Protect, prot_string(mbi->Protect), mbi->Type, mem_type_string(mbi->Type)); } void dump_mbi_addr(file_t file, app_pc target, bool dump_xml) { MEMORY_BASIC_INFORMATION mbi; size_t len; len = query_virtual_memory(target, &mbi, sizeof(mbi)); if (len == sizeof(mbi)) dump_mbi(file, &mbi, dump_xml); else { if (dump_xml) { print_file(file, "<-- Unable to dump mbi for addr "PFX"\n -->", target); } else { print_file(file, "Unable to dump mbi for addr "PFX"\n", target); } } } /* FIXME: * We need to be able to distinguish our own pid from that of a child * process. We observe that after CreateProcess a child has pid of 0 (as * determined by process_id_from_handle, calling NtQueryInformationProcess). * For our current injection methods pid is always set when we take over, * but for future early-injection methods what if the pid is still 0 when * we start executing in the process' context? */ bool is_pid_me(process_id_t pid) { return (pid == get_process_id()); } bool is_phandle_me(HANDLE phandle) { /* make the common case of NT_CURRENT_PROCESS faster */ if (phandle == NT_CURRENT_PROCESS) { return true; } else { /* we know of no way to detect whether two handles point to the same object, * so we go to pid */ process_id_t pid = process_id_from_handle(phandle); return is_pid_me(pid); } } /* used only in get_dynamorio_library_path() but file level namespace * so it is easily available to windbg scripts */ static char dynamorio_library_path[MAXIMUM_PATH]; /* get full path to our own library, (cached), used for forking and message file name */ char* get_dynamorio_library_path() { /* This operation could be dangerous, so it is still better that we do it * once at startup when there is a single thread only */ if (!dynamorio_library_path[0]) { /* not cached */ /* get_module_name can take any pc in the dll, * so we simply take the address of this function * instead of using get_module_handle to find the base */ app_pc pb = (app_pc)&get_dynamorio_library_path; /* here's where we set the library path */ ASSERT(!dr_earliest_injected); /* should be already set for earliest */ get_module_name(pb, dynamorio_library_path, MAXIMUM_PATH); } return dynamorio_library_path; } /* based on a process handle to a process that is not yet running, * verify whether we should be taking control over it */ /* if target process should be injected into returns true, and * inject_settings is set if non-NULL */ bool should_inject_into_process(dcontext_t *dcontext, HANDLE process_handle, int *rununder_mask, /* OPTIONAL OUT */ inject_setting_mask_t *inject_settings /* OPTIONAL OUT */) { bool inject = false; synchronize_dynamic_options(); if (DYNAMO_OPTION(follow_children) || DYNAMO_OPTION(follow_explicit_children) || DYNAMO_OPTION(follow_systemwide)) { inject_setting_mask_t should_inject = systemwide_should_inject(process_handle, rununder_mask); if (DYNAMO_OPTION(follow_systemwide) && TEST(INJECT_TRUE, should_inject)) { LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tconfigured child should be injected\n"); inject = true; } if (!inject && DYNAMO_OPTION(follow_explicit_children) && TESTALL(INJECT_EXPLICIT|INJECT_TRUE, should_inject)) { LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\texplicit child should be injected\n"); inject = true; } if (!inject && DYNAMO_OPTION(follow_children)) { inject = true; /* -follow_children defaults to inject */ /* check if child should be excluded from running under dr */ if (TEST(INJECT_EXCLUDED, should_inject)) { LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tchild is excluded, not injecting\n"); inject = false; } /* check if we should leave injection to preinjector */ if (TEST(INJECT_TRUE, should_inject) && systemwide_inject_enabled() && !TEST(INJECT_EXPLICIT, should_inject)) { ASSERT(!DYNAMO_OPTION(follow_systemwide)); LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tletting preinjector inject into child\n"); inject = false; } DODEBUG({ if (inject) { LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tnon-excluded, non-preinjected child should be injected\n"); } }); } if (inject) { ASSERT(!TEST(INJECT_EXCLUDED, should_inject)); if (inject_settings != NULL) *inject_settings = should_inject; } } DODEBUG({ if (inject) { LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tinjecting into child process\n"); } else { LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tletting child execute natively " "(may still get injected by systemwide injector!)\n"); } }); return inject; } /* cxt may be NULL if -inject_at_create_process */ static int inject_into_process(dcontext_t *dcontext, HANDLE process_handle, CONTEXT *cxt, inject_setting_mask_t should_inject) { /* Here in fact we don't want to have the default argument override mechanism take place. If an app specific AUTOINJECT value is provided, then we should of course use it. However, if no specific one is given we should not use the global default when follow_children. For follow_explicit_children it is actually OK to use the global default value, it will be the GUI's responsibility to set both the parent and child if it is desired to have them use the same library. */ char library_path_buf[MAXIMUM_PATH]; char *library = library_path_buf; bool res; int err = get_process_parameter(process_handle, PARAM_STR(DYNAMORIO_VAR_AUTOINJECT), library_path_buf, sizeof(library_path_buf)); /* If there is no app-specific subkey, then we should check in * what mode are we injecting. * If we are in fact in follow_children - meaning all children are followed, * and there is no app specific option then we should use the parent library, * unless the child is in fact explicit in which case we just use the global library. */ switch (err) { case GET_PARAMETER_SUCCESS: break; case GET_PARAMETER_NOAPPSPECIFIC: /* We got the global key's library, use parent's library instead if the only * reason we're injecting is -follow_children (i.e. reading RUNUNDER gave us * !INJECT_TRUE). */ if (!TEST(INJECT_TRUE, should_inject)) { ASSERT(DYNAMO_OPTION(follow_children)); library = get_dynamorio_library_path(); } break; case GET_PARAMETER_BUF_TOO_SMALL: case GET_PARAMETER_FAILURE: library = get_dynamorio_library_path(); break; default: ASSERT_NOT_REACHED(); } LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "\tinjecting %s into child process\n", library); if (DYNAMO_OPTION(aslr_dr) && /* case 8749 - can't aslr dr for thin_clients */ process_handle != NULL && !is_child_in_thin_client(process_handle)) { aslr_force_dynamorio_rebase(process_handle); } /* Can't early inject 32-bit DR into a wow64 process as there is no * ntdll32.dll at early inject point, so thread injection only. PR 215423. * This is only true for xp64/2003. It happens to work on vista+ because * it turns out ntdll32 is mapped in by the kernel. (xref i#381) */ if (DYNAMO_OPTION(early_inject) && (get_os_version() >= WINDOWS_VERSION_VISTA || !is_wow64_process(process_handle))) { ASSERT(early_inject_address != NULL || !INJECT_LOCATION_IS_LDR(early_inject_location)); /* FIXME if early_inject_address == NULL then early_inject_init failed * to find the correct address to use. Don't expect that to happen, * but if it does could fall back to late injection (though we can't * be sure that would work, i.e. early thread process for ex.) or * do a SYSLOG error. */ res = inject_into_new_process(process_handle, library, DYNAMO_OPTION(early_inject_map), early_inject_location, early_inject_address); } else { ASSERT(cxt != NULL); res = inject_into_thread(process_handle, cxt, NULL, library); } if (!res) { SYSLOG_INTERNAL_ERROR("ERROR: injection from pid=%d of %s into child " "process %d failed", get_process_id(), library, process_id_from_handle(process_handle)); /* FIXME i#49: this can happen for a 64-bit child of a 32-bit parent */ ASSERT_CURIOSITY(false && "injection into child failed: 32 to 64?"); return false; /* for compilation correctness and release builds */ } return true; } bool is_first_thread_in_new_process(HANDLE process_handle, CONTEXT *cxt) { /* ASSUMPTION: based on what I've seen, on win2k a new process has * pid 0 until its first thread is created. This is not true on XP * so we also check if the argument value is the PEB address * (which it should be if it is the first thread in the process, * according to inside win2k). This is a slight risk of double * or late injection if someone creates a remote thread that * happens to have an argument that equals the address of PEB. * Better would be able to tell from Eip if it is pointing at the * kernel32 thread start thunk or the kernel32 process start thunk, * or to check if the number of threads in the process equals 0, * but no easy way to do either here. FIXME */ process_id_t pid = process_id_from_handle(process_handle); if (pid == 0) return true; if (!is_pid_me(pid)) { ptr_uint_t peb = (ptr_uint_t) get_peb(process_handle); if (cxt->THREAD_START_ARG == peb) return true; else if (is_wow64_process(process_handle) && get_os_version() >= WINDOWS_VERSION_VISTA) { /* i#816: for wow64 process PEB query will be x64 while thread addr * will be the x86 PEB. On Vista and Win7 the x86 PEB seems to * always be one page below but we don't want to rely on that, and * it doesn't hold on Win8. Instead we ensure the start addr is * a one-page alloc whose first 3 fields match the x64 PEB: * boolean flags, Mutant, and ImageBaseAddress. */ int64 peb64[3]; int peb32[3]; byte *base = NULL; size_t sz = get_allocation_size_ex (process_handle, (byte *)cxt->THREAD_START_ARG, &base); LOG(THREAD_GET, LOG_SYSCALLS|LOG_THREADS, 2, "%s: pid="PIFX" vs me="PIFX", arg="PFX" vs peb="PFX"\n", __FUNCTION__, pid, get_process_id(), cxt->THREAD_START_ARG, peb); if (sz != PAGE_SIZE || base != (byte *)cxt->THREAD_START_ARG) return false; if (!nt_read_virtual_memory(process_handle, (const void *) peb, peb64, sizeof(peb64), &sz) || sz != sizeof(peb64) || !nt_read_virtual_memory(process_handle, (const void *) cxt->THREAD_START_ARG, peb32, sizeof(peb32), &sz) || sz != sizeof(peb32)) return false; LOG(THREAD_GET, LOG_SYSCALLS|LOG_THREADS, 2, "%s: peb64 "PIFX","PIFX","PIFX" vs peb32 "PIFX","PIFX","PIFX"\n", __FUNCTION__, peb64[0], peb64[1], peb64[2], peb32[0], peb32[1], peb32[2]); if (peb64[0] == peb32[0] && peb64[1] == peb32[1] && peb64[2] == peb32[2]) return true; } } return false; } /* Depending on registry and options maybe inject into child process with * handle process_handle. Called by SYS_CreateThread in pre_system_call (in * which case cxt is non-NULL) and by CreateProcess[Ex] in post_system_call (in * which case cxt is NULL). */ bool maybe_inject_into_process(dcontext_t *dcontext, HANDLE process_handle, CONTEXT *cxt) { /* if inject_at_create_process becomes dynamic, need to move this check below * the synchronize dynamic options */ /* FIXME - can't read process parameters, at process create time is NULL * value in peb field except in Vista. Could pass it in. */ /* Can't early inject 32-bit DR into a wow64 process as there is no * ntdll32.dll at early inject point, so thread injection only. PR 215423. * This is only true for xp64/2003. It happens to work on vista+ because * it turns out ntdll32 is mapped in by the kernel. (xref i#381) */ bool injected = false; if ((cxt == NULL && (DYNAMO_OPTION(inject_at_create_process) || (get_os_version() >= WINDOWS_VERSION_VISTA && DYNAMO_OPTION(vista_inject_at_create_process))) && (!is_wow64_process(process_handle) || get_os_version() >= WINDOWS_VERSION_VISTA)) || (cxt != NULL && is_first_thread_in_new_process(process_handle, cxt))) { int rununder_mask; inject_setting_mask_t should_inject; /* Creating a new process & at potential inject point */ DEBUG_DECLARE(process_id_t pid = process_id_from_handle(process_handle);) DOLOG(3, LOG_SYSCALLS|LOG_THREADS, { SYSLOG_INTERNAL_INFO("found a fork: pid %d", pid); }); LOG(THREAD, LOG_SYSCALLS|LOG_THREADS, 1, "found a fork: pid %d\n", pid); if (should_inject_into_process(dcontext, process_handle, &rununder_mask, &should_inject)) { if (cxt == NULL && !DYNAMO_OPTION(early_inject)) { /* On Vista+ a legacy NtCreateProcess* syscall is being used, and * without -early_inject and without a context we're forced to * wait and assume NtCreateThread will be called later. * FIXME i#1898: on win10 for heap crash handling we hit this, and * we are currently missing the child. */ SYSLOG_INTERNAL_WARNING("legacy process creation detected: may miss " "child"); } else { injected = true; /* attempted, at least */ ASSERT(cxt != NULL || DYNAMO_OPTION(early_inject)); /* FIXME : if not -early_inject, we are going to read and write * to cxt, which may be unsafe */ if (inject_into_process(dcontext, process_handle, cxt, should_inject)) { check_for_run_once(process_handle, rununder_mask); } } } } return injected; } /* For case 8749: can't aslr dr for thin_client because cygwin apps will die. */ static bool is_child_in_thin_client(HANDLE process_handle) { bool res; const options_t *opts; /* Shouldn't be using this for the current process. */ ASSERT(process_handle != NT_CURRENT_PROCESS && process_handle != NT_CURRENT_THREAD && process_handle != NULL); opts = get_process_options(process_handle); ASSERT_OWN_READWRITE_LOCK(true, &options_lock); ASSERT(opts != NULL); /* In this case the option is used only for preventing aslr_dr, so be safe * if you can't read it and say yes which will prevent aslr dr. Note: this * isn't the secure option, which is to say no, so that we alsr dr. * Interesting tradeoff; choosing safety as this scenario is rare in which * case first goal is to do no harm. */ if (opts == NULL) { res = false; } else { res = opts->thin_client; } write_unlock(&options_lock); return res; } app_pc get_dynamorio_dll_start() { if (dynamo_dll_start == NULL) dynamo_dll_start = get_allocation_base((app_pc) get_dynamorio_dll_start); return dynamo_dll_start; } app_pc get_dynamorio_dll_preferred_base(void) { if (dynamo_dll_preferred_base == NULL) { dynamo_dll_preferred_base = get_module_preferred_base(get_dynamorio_dll_start()); ASSERT(dynamo_dll_preferred_base != NULL); } return dynamo_dll_preferred_base; } static app_pc highest_user_address = (app_pc)(ptr_uint_t) IF_X64_ELSE(0x000007fffffeffffLL, 0x7ffeffff); /* 0x7ffeffff on 2GB:2GB default */ /* or 0xbffeffff with /3GB in boot.ini, */ /* /userva switch may also change the actual value seen */ static void get_system_basic_info(void) { SYSTEM_BASIC_INFORMATION sbasic_info; NTSTATUS result = query_system_info(SystemBasicInformation, sizeof(SYSTEM_BASIC_INFORMATION), &sbasic_info); ASSERT(NT_SUCCESS(result)); highest_user_address = (app_pc)sbasic_info.HighestUserAddress; /* typically we have 2GB:2GB split between user and kernel virtual memory * lkd> dd nt!MmUserProbeAddress l1 * 8055ee34 7fff0000 * lkd> dd nt!MmHighestUserAddress l1 * 8055ee3c 7ffeffff */ LOG(GLOBAL, LOG_VMAREAS, 1, "get_system_basic_info: " "HighestUserAddress "PFX"\n", highest_user_address); /* for testing purposes we can pretend all other addresses are inaccessible */ if (INTERNAL_OPTION(stress_fake_userva) != 0) { if (highest_user_address > (app_pc)INTERNAL_OPTION(stress_fake_userva)) { highest_user_address = (app_pc)INTERNAL_OPTION(stress_fake_userva); SYSLOG_INTERNAL_WARNING("using stress_fake_userva "PFX"\n", highest_user_address); } else { ASSERT_CURIOSITY("useless stress_fake_userva"); } } ASSERT(OS_ALLOC_GRANULARITY == sbasic_info.AllocationGranularity); } bool is_user_address(app_pc pc) { /* we don't worry about LowestUserAddress which is the first 64KB * page which should normally be invalid. * * FIXME: case 10899 although users can in fact allocate in the * NULL allocation region (by using base=1), as typically done in * a local NULL pointer attack. Natively the address is still * visible for execution, and the OS should handle base=NULL on * our queries, but we should check if we will. Of course, this * is likely an attack so it is OK for us to fail it. * * we only check upper bound and treat all smaller addresses as user addresses */ return pc <= highest_user_address; } void merge_writecopy_pages(app_pc start, app_pc end) { MEMORY_BASIC_INFORMATION mbi; PBYTE pb = start; uint prot; size_t len = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(len == sizeof(mbi)); LOG(GLOBAL, LOG_VMAREAS, 2, "merge_writecopy_pages "PFX"-"PFX"\n", start, end); do { if ((app_pc)mbi.BaseAddress >= end) break; ASSERT(mbi.State == MEM_COMMIT); ASSERT(prot_is_writable(mbi.Protect)); prot = mbi.Protect & ~PAGE_PROTECTION_QUALIFIERS; if (prot == PAGE_WRITECOPY) { /* HACK (xref case 8069): make a process-local copy to try and merge * entire section into single region, for more efficient protection! * Yes all the writable regions are already contiguous, but they * have different flags, and so are different regions, and * NtProtectVirtualMemory refuses to do more than one region at a time. * However, regions seem to be merged when they have the * same flags, so we just remove the C flag. * Calling NtProtectVirtualMemory w/ PAGE_READWRITE to try * and remove the copy-on-write bits does not work, so we * write to every page! * FIXME: test on other versions of windows! * it's not documented so it may not be everywhere! * works on Win2K Professional * N.B.: since make_writable doesn't preserve copy-on-write, * it's a good thing we do this hack. * FIXME: how many of these pages would never have been made private? * (case 8069 covers that inquiry) */ volatile app_pc pc = mbi.BaseAddress; app_pc stop = ((app_pc)mbi.BaseAddress) + mbi.RegionSize; ASSERT(stop <= end); LOG(GLOBAL, LOG_VMAREAS, 2, "writing to "SZFMT" pages to get local copy of" " copy-on-write section @"PFX"\n", mbi.RegionSize/PAGE_SIZE, pc); while (pc < stop) { *pc = *pc; pc += PAGE_SIZE; } } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)); LOG(GLOBAL, LOG_VMAREAS, 2, "checking that "PFX"-"PFX" merger worked\n", start, end); len = query_virtual_memory(start, &mbi, sizeof(mbi)); ASSERT(len == sizeof(mbi)); ASSERT(prot_is_writable(mbi.Protect)); /* OS could merge w/ another writable region so may not end at end */ ASSERT(end <= start + mbi.RegionSize); /* we only call this on DR data sections right now */ ASSERT(dynamo_dll_end == NULL || /* FIXME: init it earlier */ (is_in_dynamo_dll(start) && is_in_dynamo_dll(end))); LOG(GLOBAL, LOG_VMAREAS, 2, "DR regions post-merger:\n"); DOLOG(1, LOG_VMAREAS, { print_dynamo_regions(); LOG(GLOBAL, LOG_VMAREAS, 2, "\n"); }); } int find_dynamo_library_vm_areas() { /* walk through memory regions in our own dll */ size_t len; PBYTE pb; MEMORY_BASIC_INFORMATION mbi; int num_regions = 0; get_dynamorio_library_path(); /* just to preserve side effects */ LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME" dll path: %s\n", get_dynamorio_library_path()); get_dynamorio_dll_start(); /* for side effects: probably already called though */ ASSERT(dynamo_dll_start != NULL); pb = dynamo_dll_start; len = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(len == sizeof(mbi)); ASSERT(mbi.State != MEM_FREE); LOG(GLOBAL, LOG_VMAREAS, 1, "\nOur regions:\n"); do { if (mbi.State == MEM_FREE || (app_pc) mbi.AllocationBase != dynamo_dll_start) break; if (mbi.State == MEM_COMMIT) { /* only look at committed regions */ LOG(GLOBAL, LOG_VMAREAS, 1, PFX"-"PFX" %s\n", mbi.BaseAddress, ((app_pc)mbi.BaseAddress) + mbi.RegionSize, prot_string(mbi.Protect)); num_regions++; #ifndef STATIC_LIBRARY /* For static library builds, DR's code is in the exe and isn't considered * to be a DR area. */ add_dynamo_vm_area(mbi.BaseAddress, ((app_pc)mbi.BaseAddress) + mbi.RegionSize, osprot_to_memprot(mbi.Protect), true /* from image */ _IF_DEBUG(prot_string(mbi.Protect))); /* we need all writable regions to be inside the * sections that we protect */ ASSERT(!prot_is_writable(mbi.Protect) || data_sections_enclose_region((app_pc)mbi.BaseAddress, ((app_pc)mbi.BaseAddress) + mbi.RegionSize)); #endif } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)); dynamo_dll_end = (app_pc) pb; LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME" dll: from "PFX" to "PFX"\n\n", dynamo_dll_start, dynamo_dll_end); return num_regions; } void print_dynamo_regions() { /* walk through memory regions in our own dll */ size_t len; PBYTE pb; MEMORY_BASIC_INFORMATION mbi; /* dynamo_dll_start is a global defined in find_dynamo_library_vm_areas */ ASSERT(dynamo_dll_start != NULL); pb = dynamo_dll_start; len = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(len == sizeof(mbi)); ASSERT(mbi.State != MEM_FREE); do { if (mbi.State == MEM_FREE || (app_pc) mbi.AllocationBase != dynamo_dll_start) break; if (mbi.State == MEM_COMMIT) { /* only look at committed regions */ LOG(GLOBAL, LOG_ALL, 1, PFX"-"PFX" %s\n", mbi.BaseAddress, ((app_pc)mbi.BaseAddress) + mbi.RegionSize, prot_string(mbi.Protect)); } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)); } #ifdef DEBUG /* MEMORY STATS ****************************************/ /* to make it easy to control log statement */ # define MEM_STATS_ADD(stat, sz) do { \ if ((sz) != 0) { \ STATS_ADD(stat, sz); \ LOG(GLOBAL, LOG_MEMSTATS, 4, #stat" sz "SZFMT"\n", sz); \ } \ } while (0); /* N.B.: "reserved" here means reserved but not committed, so reserved * and committed are disjoint, returns whether or not it was our memory */ static bool add_mem_stats(app_pc region, size_t r_commit, size_t r_reserve, bool r_is_stack, uint r_type, size_t r_exec, size_t r_rw, size_t r_ro) { bool ours = false; /* add region to stats */ if (r_type == MEM_IMAGE) { if (is_in_dynamo_dll(region)) { ours = true; MEM_STATS_ADD(dr_library_space, r_commit); ASSERT(r_reserve == 0); } else { /* an image can have reserve-only sections (e.g., mscorlib has 2!) */ MEM_STATS_ADD(app_image_capacity, r_commit+r_reserve); } } else { if (is_dynamo_address(region)) { ours = true; } else if (r_type == MEM_MAPPED) { MEM_STATS_ADD(app_mmap_capacity, r_commit); } else { if (r_is_stack) { MEM_STATS_ADD(app_stack_capacity, r_commit); } else { MEM_STATS_ADD(app_heap_capacity, r_commit); } } } LOG(GLOBAL, LOG_MEMSTATS, 4, "Region "PFX"-"PFX" commit="SZFMT" reserve="SZFMT" stack="SZFMT" ours="SZFMT"\n", region, region+r_commit+r_reserve, r_commit, r_reserve, r_is_stack, ours); if (ours) { MEM_STATS_ADD(dr_commited_capacity, r_commit); MEM_STATS_ADD(dr_reserved_capacity, r_reserve); MEM_STATS_ADD(dr_vsize, r_commit + r_reserve); } else { MEM_STATS_ADD(app_reserved_capacity, r_reserve); MEM_STATS_ADD(app_committed_capacity, r_commit); MEM_STATS_ADD(app_vsize, r_commit + r_reserve); MEM_STATS_ADD(app_exec_capacity, r_exec); MEM_STATS_ADD(app_rw_capacity, r_rw); MEM_STATS_ADD(app_ro_capacity, r_ro); } /* yes, on windows vsize includes reserved */ MEM_STATS_ADD(total_vsize, r_commit + r_reserve); /* count unaligned allocations (PEB TEB etc. see inside win2k pg 420) */ if (!ALIGNED(region, OS_ALLOC_GRANULARITY)) { STATS_INC(unaligned_allocations); } return ours; } /* Since incremental app memory stats are too hard, we use snapshots */ void mem_stats_snapshot() { PBYTE pb = NULL; MEMORY_BASIC_INFORMATION mbi; /* stats strategy: have to wait until end of region to know everything, * so locally cache sum-of-block values until then */ size_t r_commit = 0, r_reserve = 0, r_exec = 0, r_ro = 0, r_rw = 0; bool r_is_stack = false; uint r_type = 0; app_pc r_start = NULL; if (!dynamo_initialized) { /* Now that vm_areas_init() is after dynamo_thread_init()'s call to * dump_global_stats() we come here prior to dynamo_areas or DR's * library bounds being set up: best to just abort until we can * gather accurate stats. */ return; } /* It's too hard to keep track of these incrementally -- would have to * record prior to NtAllocateVirtualMemory all of the reserved regions to * know which went from reserved to committed, and on freeing to know what * was committed and what reserved, etc., so we only do complete snapshots, * resetting the stats to 0 each time. */ mutex_lock(&snapshot_lock); STATS_RESET(unaligned_allocations); STATS_RESET(dr_library_space); STATS_RESET(dr_commited_capacity); STATS_RESET(dr_reserved_capacity); STATS_RESET(total_wasted_vsize); STATS_RESET(dr_wasted_vsize); STATS_RESET(app_wasted_vsize); STATS_RESET(total_vsize); STATS_RESET(dr_vsize); STATS_RESET(app_vsize); STATS_RESET(app_reserved_capacity); STATS_RESET(app_committed_capacity); STATS_RESET(app_stack_capacity); STATS_RESET(app_heap_capacity); STATS_RESET(app_image_capacity); STATS_RESET(app_mmap_capacity); STATS_RESET(app_exec_capacity); STATS_RESET(app_ro_capacity); STATS_RESET(app_rw_capacity); /* walk through every block in memory */ while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)) { /* Standard block iteration that groups blocks with the same * allocation base into a single region */ if (mbi.State == MEM_FREE || mbi.AllocationBase == mbi.BaseAddress) { bool ours = false; if (r_start != NULL) { ours = add_mem_stats(r_start, r_commit, r_reserve, r_is_stack, r_type, r_exec, r_ro, r_rw); } /* reset for next region */ r_commit = r_reserve = r_exec = r_ro = r_rw = 0; r_is_stack = false; r_type = mbi.Type; if (mbi.State == MEM_FREE) { LOG(GLOBAL, LOG_MEMSTATS, 4, "Free "PFX"-"PFX"\n", mbi.BaseAddress, ((app_pc)mbi.BaseAddress)+mbi.RegionSize); if (r_start != NULL && !ALIGNED(mbi.BaseAddress, OS_ALLOC_GRANULARITY)) { /* wasted virtual address space, at least part of this free * region is unusable */ size_t wasted = ALIGN_FORWARD(mbi.BaseAddress, OS_ALLOC_GRANULARITY) - (ptr_uint_t)mbi.BaseAddress; if (ours) { /* last region is ours, we are wasting */ MEM_STATS_ADD(dr_wasted_vsize, (stats_int_t)wasted); } else { /* last region apps, its wasting */ MEM_STATS_ADD(app_wasted_vsize, (stats_int_t)wasted); } MEM_STATS_ADD(total_wasted_vsize, (stats_int_t)wasted); } r_start = NULL; } else { r_start = mbi.AllocationBase; } } /* incremental work until have end of region */ if (mbi.State == MEM_RESERVE) { r_reserve += mbi.RegionSize; } else if (mbi.State == MEM_COMMIT) { r_commit += mbi.RegionSize; if (TEST(PAGE_GUARD, mbi.Protect)) { /* if any guard blocks inside region, assume entire region * is a stack */ r_is_stack = true; } /* protection stats could be incremental but that would duplicate checks * for being DR memory. * mbi.Protect is invalid for reserved memory, only useful for committed. */ if (prot_is_executable(mbi.Protect)) r_exec += mbi.RegionSize; else if (prot_is_writable(mbi.Protect)) r_rw += mbi.RegionSize; else if (prot_is_readable(mbi.Protect)) r_ro += mbi.RegionSize; /* we don't add up no-access memory! */ } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } if (r_start != NULL) { add_mem_stats(r_start, r_commit, r_reserve, r_is_stack, r_type, r_exec, r_ro, r_rw); } STATS_PEAK(unaligned_allocations); STATS_PEAK(dr_commited_capacity); STATS_PEAK(dr_reserved_capacity); STATS_PEAK(total_wasted_vsize); STATS_PEAK(dr_wasted_vsize); STATS_PEAK(app_wasted_vsize); STATS_PEAK(total_vsize); STATS_PEAK(dr_vsize); STATS_PEAK(app_vsize); STATS_PEAK(app_reserved_capacity); STATS_PEAK(app_committed_capacity); STATS_PEAK(app_stack_capacity); STATS_PEAK(app_heap_capacity); STATS_PEAK(app_image_capacity); STATS_PEAK(app_mmap_capacity); STATS_PEAK(app_exec_capacity); STATS_PEAK(app_ro_capacity); STATS_PEAK(app_rw_capacity); mutex_unlock(&snapshot_lock); } #endif /* DEBUG (MEMORY STATS) ****************************************/ /* update our data structures that record info on PE modules */ /* rewalking is set when walking existing memory mappings, and is * unset if called when processing a system call for (un)map. * Returns true if this mapped image is a library. */ static bool process_image(app_pc base, size_t size, uint prot, bool add, bool rewalking, const char *filepath) { const char *name = NULL; bool module_is_native_exec = false; bool already_added_native_exec = false; size_t image_size; /* ensure header is readable */ ASSERT(prot_is_readable(prot)); ASSERT(!rewalking || add); /* when rewalking can only add */ /* FIXME: we only know that we are in a MEM_IMAGE * we still need to be careful to check it is a real PE * We could optimize out these system calls, but for now staying safe */ if (!is_readable_pe_base(base)) { DOCHECK(1, { wchar_t buf[MAXIMUM_PATH]; NTSTATUS res = get_mapped_file_name(base, buf, BUFFER_SIZE_BYTES(buf)); if (NT_SUCCESS(res)) { LOG(GLOBAL, LOG_VMAREAS, 2, "\t%s: WARNING: image but non-PE mapping @"PFX" backed by \"%S\"\n", __FUNCTION__, base, buf); } /* This happens with on win7 so not an assert curiosity * \Device\HarddiskVolume1\Windows\System32\apisetschema.dll */ if (!NT_SUCCESS(res) || wcsstr(buf, L"apisetschema") == NULL) SYSLOG_INTERNAL_WARNING_ONCE("image but non-PE mapping found"); }); return false; } /* Our WOW64 design for 32-bit DR involves ignoring all 64-bit dlls * (several are visible: wow64cpu.dll, wow64win.dll, wow64.dll, and ntdll.dll) * For 64-bit DR both should be handled. */ #ifdef X64 DODEBUG({ if (module_is_32bit(base)) { LOG(GLOBAL, LOG_VMAREAS, 1, "image "PFX"-"PFX" is 32-bit dll (wow64 process?)\n", base, base+size); /* This happens in a 64-bit process when creating a 32-bit * child: CreateProcess maps in the child executable in * this process first (i#817) */ ASSERT_CURIOSITY(is_wow64_process(NT_CURRENT_PROCESS) || !TEST(IMAGE_FILE_DLL, get_module_characteristics(base))); } }); #else if (module_is_64bit(base)) { LOG(GLOBAL, LOG_VMAREAS, 1, "image "PFX"-"PFX" is 64-bit dll (wow64 process?): ignoring it!\n", base, base+size); ASSERT(is_wow64_process(NT_CURRENT_PROCESS)); return false; } #endif /* i#1172: do not treat partially-mapped images as "modules" as they are * not normal libraries loaded by the system loader but instead are * usually mapped in to read resources or other data from the file. * If code is executed from a partial map, DR will still perform proper * cache consistency as that's done in the caller. * Having native_exec not apply seems ok: we'll err on the side of executing * it, which is the conservative side. Hot patches and * patch-proof list should only apply to system-loaded libs. */ if (!get_module_info_pe(base, NULL, NULL, &image_size, NULL, NULL) || size < image_size) { LOG(GLOBAL, LOG_VMAREAS, 2, "not treating partially-mapped ("PIFX" < "PIFX") image @"PFX"as module\n", size, image_size, base); return false; } /* Track loaded module list. Needs to be done before * hotp_process_image() and any caller of get_module_short_name() * or other data that we cache in the list. */ if (add) /* add first */ module_list_add(base, size, !rewalking /* !rewalking <=> at_map */, filepath); else os_module_set_flag(base, MODULE_BEING_UNLOADED); /* DYNAMO_OPTION(native_exec) and DYNAMO_OPTION(use_moduledb) are the * primary user of module name */ name = os_get_module_name_strdup(base HEAPACCT(ACCT_VMAREAS)); LOG(GLOBAL, LOG_VMAREAS, 1, "image %-15s %smapped @ "PFX"-"PFX"\n", name == NULL ? "<no name>" : name, add ? "" : "un", base, base+size); /* Check if module_list_add added the module to native_exec_areas. If we're * removing the module, it will also be there from the load earlier. */ if (DYNAMO_OPTION(native_exec) && vmvector_overlap(native_exec_areas, base, base+size)) { LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 1, "module %s is on native_exec list\n", name); module_is_native_exec = true; already_added_native_exec = true; #ifdef GBOP /* FIXME: if some one just loads a vm, our gbop would become useless; * need better dgc identification for gbop; see case 8087. */ if (add && TEST(GBOP_IS_DGC, DYNAMO_OPTION(gbop)) && !gbop_vm_loaded) { /* !gbop_vm_loaded in the check above would prevent this memory * protection change from happenning for each vm load, not that any * process load a vm multiple times or multiple vms. */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); gbop_vm_loaded = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } #endif } moduledb_process_image(name, base, add); /* Case 7266: add all exes and dlls with managed code to native_exec_areas, * for now. * FIXME: should try to execute non-managed code under DR, when possible. */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_managed_code) && module_has_cor20_header(base)) { DODEBUG({ if (add) { LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 1, "process_image: module=%s, base="PFX" has cor20 header, " "adding to native exec areas\n", name ? name : "<noname>", base); SYSLOG_INTERNAL_INFO_ONCE("cor20 module %s added to native exec area", name ? name : "<noname>"); } }); module_is_native_exec = true; } /* xref case 10998 - we native exec modules with .pexe sections to handle all the * int 3 strangeness. FIXME - restrict further? only observed on Vista, known .pexe * sections from problematic dlls all begin with mostly the same 0x60 first bytes, * .pexe is observed to always be the first section, etc. */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_dot_pexe) && get_named_section_bounds(base, ".pexe", NULL, NULL)) { DODEBUG({ if (add) { LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 1, "process_image: module=%s, base="PFX" has .pexe section, " "adding to native exec areas\n", name ? name : "<noname>", base); SYSLOG_INTERNAL_INFO(".pexe module %s added to native exec area", name ? name : "<noname>"); /* check is one of the known .pexe dlls */ ASSERT_CURIOSITY (name != NULL && check_filter ("AuxiliaryDisplayCpl.dll;AuxiliaryDisplayDriverLib.dll;" "AuxiliaryDisplayServices.dll;NetProjW.dll;P2PGraph.dll;" "localspl.dll;lsasrv.dll;mssrch.dll;p2psvc.dll;pmcsnap.dll;" "shell32.dll;spoolss.dll;uDWM.dll", name)); } }); module_is_native_exec = true; } if (module_is_native_exec && add && !already_added_native_exec) { RSTATS_INC(num_native_module_loads); vmvector_add(native_exec_areas, base, base+size, NULL); } else if (!already_added_native_exec) { /* For safety we'll just always remove the region (even if add==true) to avoid * any possibility of having stale entries in the vector overlap into new * non-native regions. Also see case 7628. */ ASSERT(!module_is_native_exec || /* if not native_exec shouldn't be in vector */ !vmvector_overlap(native_exec_areas, base, base+size)); vmvector_remove(native_exec_areas, base, base+size); } if (!IS_STRING_OPTION_EMPTY(patch_proof_list) || !IS_STRING_OPTION_EMPTY(patch_proof_default_list)) { /* even if name is not valid we should match ALL */ if ((IS_LISTSTRING_OPTION_FORALL(patch_proof_list) || IS_LISTSTRING_OPTION_FORALL(patch_proof_default_list)) || (name != NULL && check_list_default_and_append(dynamo_options.patch_proof_default_list, dynamo_options.patch_proof_list, name))) { if (add) { LOG(GLOBAL, LOG_INTERP|LOG_VMAREAS, 1, "module %s is on patch proof list\n", name ? name : "<noname>"); STATS_INC(num_patch_proof_module_loads); /* assuming code sections are added as non-writable we * will prevent them from becoming writable */ /* Note adding full module region here, * app_memory_protection_change() will filter out only * CODE. FIXME: [minor perf] alternatively could walk * module and add only code sections here. */ vmvector_add(patch_proof_areas, base, base+size, NULL); } else { /* remove all areas in range */ vmvector_remove(patch_proof_areas, base, base+size); } } } #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hot_patching)) { if (!DYNAMO_OPTION(hotp_only)) { hotp_process_image(base, add, false, false, NULL, NULL, 0); } else { bool needs_processing = false; int num_threads = 0; thread_record_t **all_threads = NULL; /* For hotp_only, image processing is done in two steps. The * first one is done without suspending all threads (expensive if * done for each dll load or unload). Only if the first step * identified a module match, all threads (known to the core, of * course) are suspended and the image is processed, i.e., hot * patches are either injected or removed both of which in * hotp_only need all threads to be suspended. */ hotp_process_image(base, add, false/*no locks*/, /* Do single-step at init: assume no other threads. * Risk is low; rest of DR assumes it as well. * Can't do two-step since have no dcontext yet * and hit synch_with_all_threads assert. */ dynamo_initialized/*just check?*/, dynamo_initialized ? &needs_processing : NULL, NULL, 0); if (needs_processing) { DEBUG_DECLARE(bool ok =) synch_with_all_threads(THREAD_SYNCH_SUSPENDED, &all_threads, /* Case 6821: other synch-all-thread uses that * only care about threads carrying fcache * state can ignore us */ &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER, /* if we fail to suspend a thread (e.g., * privilege problems) ignore it. * FIXME: retry instead? */ THREAD_SYNCH_SUSPEND_FAILURE_IGNORE); ASSERT(ok); hotp_process_image(base, add, false, false, NULL, all_threads, num_threads); end_synch_with_all_threads(all_threads, num_threads, true/*resume*/); } } } #endif if (DYNAMO_OPTION(IAT_convert)) { /* case 85 */ /* add IAT areas to a vmarea for faster lookup */ app_pc IAT_start, IAT_end; bool valid = get_IAT_section_bounds(base, &IAT_start, &IAT_end); if (valid && IAT_start != IAT_end) { LOG(GLOBAL, LOG_INTERP, 2, "module %s IAT("PFX","PFX") %s\n", name ? name : "<noname>", IAT_start, IAT_end, add ? "added" : "removed"); ASSERT_CURIOSITY(IAT_start != NULL && IAT_end != NULL); ASSERT(IAT_start < IAT_end); if (add) { ASSERT(!vmvector_overlap(IAT_areas, IAT_start, IAT_end)); STATS_INC(num_IAT_areas); if (!module_is_native_exec) { LOG(GLOBAL, LOG_INTERP, 1, "module %s IAT("PFX","PFX") added\n", name ? name : "<noname>", IAT_start, IAT_end); vmvector_add(IAT_areas, IAT_start, IAT_end, NULL); } else { LOG(GLOBAL, LOG_INTERP, 1, "skipping native module %s IAT("PFX","PFX "), native modules seen\n", name ? name : "<noname>", IAT_start, IAT_end); } } else { STATS_DEC(num_IAT_areas); vmvector_remove(IAT_areas, IAT_start, IAT_end); } } else { ASSERT(!valid || IAT_start == base); ASSERT_CURIOSITY(valid && "bad module"); } } #ifdef RETURN_AFTER_CALL DODEBUG({ if (!add && DYNAMO_OPTION(ret_after_call)) { /* case 5329 (see comments in process_image_post_vmarea()) -- * here we just check for exec areas before we flush them */ /* although some have no .text section * e.g. hpzst3zm.dll from case 9121 */ if (!executable_vm_area_overlap(base, base + size, false/*have no lock*/)) { SYSLOG_INTERNAL_WARNING_ONCE("DLL with no executable areas "PFX"-"PFX"\n", base, base + size); } } }); #endif /* RET_AFTER_CALL */ /* add module and its export symbols to our list only if logging */ DOLOG(1, LOG_SYMBOLS, { if (add) { /* we need to touch memory to check for PE and that doesn't always work * FIXME: but, this is MEM_IMAGE, and above we verify the header * is readable, so we can get rid of all of these system calls here */ add_module_info((app_pc)base, size); } else { /* remove module if we have it added to our list */ remove_module_info((app_pc)base, size); } }); if (name != NULL) dr_strfree(name HEAPACCT(ACCT_VMAREAS)); return true; } /* Image processing that must be done after vmarea processing (mainly * persisted cache loading) */ /* rewalking is set when walking existing memory mappings, and is * unset if called when processing a system call for (un)map */ static void process_image_post_vmarea(app_pc base, size_t size, uint prot, bool add, bool rewalking) { /* Our WOW64 design for 32-bit DR involves ignoring all 64-bit dlls * (several are visible: wow64cpu.dll, wow64win.dll, wow64.dll, and ntdll.dll) * This includes a 64-bit child process (i#838). * For 64-bit DR both should be handled. */ #ifndef X64 if (module_is_64bit(base)) return; #endif /* ensure header is readable */ ASSERT(prot_is_readable(prot)); ASSERT(!rewalking || add); /* when rewalking can only add */ /* FIXME: we only know that we are in a MEM_IMAGE * we still need to be careful to check it is a real PE * We could optimize out these system calls, but for now staying safe */ if (!is_readable_pe_base(base)) { /* see comments in process_image() where we SYSLOG */ return; } #ifdef RCT_IND_BRANCH if (TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_call)) || TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_jump))) { /* we need to know about module addition or removal * whether or not we'll act on it right now */ rct_process_module_mmap(base, size, add, rewalking); } #endif /* RCT_IND_BRANCH */ if (!add) /* remove last */ module_list_remove(base, size); } /* returns true if it added an executable region * ok for dcontext to be NULL if init==true and add==true */ static bool process_memory_region(dcontext_t *dcontext, MEMORY_BASIC_INFORMATION *mbi, bool init, bool add) { bool from_image = (mbi->Type == MEM_IMAGE); /* Our WOW64 design involves ignoring all 64-bit dlls * (several are visible: wow64cpu.dll, wow64win.dll, wow64.dll, and ntdll.dll) * We go ahead and track the memory, but we do not treat as an image */ if (is_wow64_process(NT_CURRENT_PROCESS) && from_image && module_is_64bit(mbi->AllocationBase/*NOT BaseAddress*/)) from_image = false; ASSERT(dcontext != NULL || (init && add)); DOLOG(2, LOG_VMAREAS, { if (mbi->State != MEM_FREE) { LOG(GLOBAL, LOG_VMAREAS, prot_is_executable(mbi->Protect) ? 1U : 2U, PFX"-"PFX" %s %s allocbase="PFX"\n", mbi->BaseAddress, ((app_pc)mbi->BaseAddress) + mbi->RegionSize, prot_string(mbi->Protect), (mbi->State == MEM_RESERVE) ? "reserve" : "commit ", mbi->AllocationBase); } }); /* MEM_RESERVE has meaningless mbi->Protect field, so we ignore it here */ if (mbi->State != MEM_COMMIT) return false; /* call these even if not marked as x, esp. the de-alloc, since some policy * could have them on future list or something */ if (add) { return app_memory_allocation(dcontext, mbi->BaseAddress, mbi->RegionSize, osprot_to_memprot(mbi->Protect), from_image _IF_DEBUG(from_image ? "module" : "alloc")); } else { app_memory_deallocation(dcontext, mbi->BaseAddress, mbi->RegionSize, false /* don't own thread_initexit_lock */, from_image); } return false; } /* returns the number of executable areas added to DR's list */ int find_executable_vm_areas() { PBYTE pb = NULL; MEMORY_BASIC_INFORMATION mbi; PBYTE image_base = NULL; size_t view_size = 0; uint image_prot = 0; int num_executable = 0; LOG(GLOBAL, LOG_VMAREAS, 2, "Executable regions:\n"); DOLOG(1, LOG_MEMSTATS, { mem_stats_snapshot(); }); /* Strategy: walk through every block in memory */ while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)) { /* Skip client lib and any other privately loaded libs: we don't want them * on our mod list or executable area list */ bool skip = dynamo_vm_area_overlap(pb, pb + mbi.RegionSize) && !is_in_dynamo_dll(pb) /* our own text section is ok */ /* client lib text section is ok (xref i#487) */ IF_CLIENT_INTERFACE(&& !is_in_client_lib(pb)); bool full_image = true; ASSERT(pb == mbi.BaseAddress); DOLOG(2, LOG_VMAREAS, { if (skip) { LOG(GLOBAL, LOG_VMAREAS, 2, PFX"-"PFX" skipping: internal DR region\n", pb, pb + mbi.RegionSize); } }); if (!skip && mbi.State != MEM_FREE && mbi.Type == MEM_IMAGE && pb == mbi.AllocationBase) { /* first region in an image */ MEMORY_BASIC_INFORMATION mbi_image; PBYTE pb_image = pb + mbi.RegionSize; image_base = pb; image_prot = mbi.Protect; /* We want to add to our module list right away so we can use it to * obtain info when processing each +x region. We need the view size * to call process_image with so we walk the image here. */ /* FIXME - if it ever becomes a perf issue we can prob. change process_image * to not require the view size (by moving more things into * process_image_post_vmarea or remembering the queryies). */ while (query_virtual_memory(pb_image, &mbi_image, sizeof(mbi_image)) == sizeof(mbi_image) && mbi_image.State != MEM_FREE && mbi_image.AllocationBase == pb) { ASSERT(mbi_image.Type == MEM_IMAGE); pb_image += mbi_image.RegionSize; } view_size = pb_image - pb; full_image = process_image(image_base, view_size, image_prot, true /* add */, true /* rewalking */, NULL); } if (!skip && process_memory_region(NULL, &mbi, true/*init*/, true/*add*/)) num_executable++; if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; if (!skip && image_base != NULL && pb >= image_base + view_size) { ASSERT(pb == image_base + view_size); if (full_image) { process_image_post_vmarea(image_base, view_size, image_prot, true /* add */, true /* rewalking */); } image_base = NULL; } } ASSERT(image_base == NULL); /* check we don't have outstanding post call */ LOG(GLOBAL, LOG_VMAREAS, 2, "\n"); STATS_ADD(num_app_code_modules, num_executable); return num_executable; } /* all_memory_areas is linux only, dummy on win32 */ void all_memory_areas_lock() { /* do nothing */ } void all_memory_areas_unlock() { /* do nothing */ } void update_all_memory_areas(app_pc start, app_pc end, uint prot, int type) { /* do nothing */ } bool remove_from_all_memory_areas(app_pc start, app_pc end) { return true; } /* Processes a mapped-in section, which may or may not be an image. * if add is false, assumes caller has already called flush_fragments_and_remove_region * for all executable areas in region (probably just for entire super-region). * returns the number of executable areas added to DR's list */ int process_mmap(dcontext_t *dcontext, app_pc pc, size_t size, bool add, const char *filepath) { PBYTE pb; MEMORY_BASIC_INFORMATION mbi; app_pc region_base; int num_executable = 0; bool image = false; uint image_prot = 0; ASSERT(!DYNAMO_OPTION(thin_client)); LOG(GLOBAL, LOG_VMAREAS, 2, "%s exec areas in region "PFX"\n", add ? "adding" : "removing", pc); pb = (PBYTE) pc; if (query_virtual_memory(pb, &mbi, sizeof(mbi)) != sizeof(mbi)) ASSERT(false); if (mbi.State == MEM_FREE) return num_executable; region_base = (app_pc) mbi.AllocationBase; if (mbi.Type == MEM_IMAGE) { image = process_image(region_base, size, mbi.Protect, add, false /* not rewalking */, filepath); image_prot = mbi.Protect; } /* Now update our vm areas executable region lists. * The protection flag doesn't tell us if there are executable areas inside, * must walk all the individual regions. * FIXME: for remove, optimize to do single flush but multiple area removals? */ while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)) { if (mbi.State == MEM_FREE || mbi.AllocationBase != region_base) break; if (process_memory_region(dcontext, &mbi, false/*!init*/, add)) { num_executable++; STATS_INC(num_app_code_modules); } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } if (image) { process_image_post_vmarea(region_base, size, image_prot, add, false /* not rewalking */); } LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 3, "Executable areas are now:\n"); DOLOG(3, LOG_SYSCALLS|LOG_VMAREAS, { print_executable_areas(GLOBAL); }); return num_executable; } app_pc get_application_base(void) { static app_pc app_start = NULL; if (app_start == NULL) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); app_start = get_own_peb()->ImageBaseAddress; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } return app_start; } app_pc get_application_end(void) { static app_pc app_end = NULL; if (app_end == NULL) { app_pc start; SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); start = get_own_peb()->ImageBaseAddress; app_end = start + get_allocation_size(start, NULL); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } return app_end; } app_pc get_image_entry(void) { static app_pc image_entry_point = NULL; if (image_entry_point == NULL) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); /* Note that peb->ImageBaseAddress = GetModuleHandle(NULL) */ image_entry_point = get_module_entry(get_own_peb()->ImageBaseAddress); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } return image_entry_point; } /* converts a local_state_t offset to a segment offset */ ushort os_tls_offset(ushort tls_offs) { ASSERT_TRUNCATE(tls_offs, ushort, tls_local_state_offs + tls_offs); return (ushort) (tls_local_state_offs + tls_offs); } local_state_t * get_local_state() { byte *teb_addr = (byte *) get_own_teb(); return (local_state_t *) (teb_addr + tls_local_state_offs); } local_state_extended_t * get_local_state_extended() { ASSERT(DYNAMO_OPTION(ibl_table_in_tls)); return (local_state_extended_t *) get_local_state(); } /* returns the thread-private dcontext pointer for the calling thread */ dcontext_t* get_thread_private_dcontext(void) { /* This routine cannot be used before processwide os_init sets up the TLS index. */ if (tls_dcontext_offs == TLS_UNINITIALIZED) return (IF_CLIENT_INTERFACE(standalone_library ? GLOBAL_DCONTEXT :) NULL); /* * We don't need to check whether this thread has been initialized under us - * Windows sets the value to 0 for us, so we'll just return NULL. */ return (dcontext_t *) get_tls(tls_dcontext_offs); } /* sets the thread-private dcontext pointer for the calling thread */ void set_thread_private_dcontext(dcontext_t *dcontext) { set_tls(tls_dcontext_offs, dcontext); } #ifdef WINDOWS_PC_SAMPLE /* routines for pc sampling on windows */ profile_t * create_profile(void *start, void *end, uint bucket_shift, dcontext_t *dcontext) { profile_t *profile; size_t buffer_size = ((((ptr_uint_t)end - (ptr_uint_t)start) >> bucket_shift) + 1) * sizeof(uint); if (dcontext == NULL) { LOG(GLOBAL, LOG_PROFILE, 1, "Creating global profile from "PFX" to "PFX" with shift %d " "for buffer size "SZFMT" bytes\n", start, end, bucket_shift, buffer_size); profile = (profile_t *) global_heap_alloc(sizeof(*profile) HEAPACCT(ACCT_STATS)); profile->buffer = (uint *) UNPROTECTED_GLOBAL_ALLOC(buffer_size HEAPACCT(ACCT_STATS)); } else { LOG(THREAD, LOG_PROFILE, 1, "Creating local profile from "PFX" to "PFX" with shift %d " "(buffer size "SZFMT" bytes)\n", start, end, bucket_shift, buffer_size); profile = (profile_t *) heap_alloc(dcontext, sizeof(*profile) HEAPACCT(ACCT_STATS)); profile->buffer = (uint *) UNPROTECTED_LOCAL_ALLOC(dcontext, buffer_size HEAPACCT(ACCT_STATS)); } memset(profile->buffer, 0, buffer_size); profile->start = start; profile->end = end; profile->bucket_shift = bucket_shift; profile->buffer_size = buffer_size; profile->enabled = false; profile->dcontext = dcontext; IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((byte *)end - (byte *)start))); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(buffer_size))); profile->handle = nt_create_profile(NT_CURRENT_PROCESS, start, (uint)((byte *)end-(byte *)start), profile->buffer, (uint)buffer_size, bucket_shift); return profile; } void free_profile(profile_t *profile) { ASSERT(!profile->enabled); close_handle(profile->handle); if (profile->dcontext == NULL) { LOG(GLOBAL, LOG_PROFILE, 2, "Freeing global profile from "PFX" to "PFX" with shift %d " "(buffer size "SZFMT" bytes)\n", profile->start, profile->end, profile->bucket_shift, profile->buffer_size); UNPROTECTED_GLOBAL_FREE(profile->buffer, profile->buffer_size HEAPACCT(ACCT_STATS)); global_heap_free(profile, sizeof(*profile) HEAPACCT(ACCT_STATS)); } else { dcontext_t *dcontext = profile->dcontext; LOG(THREAD, LOG_PROFILE, 2, "Freeing local profile from "PFX" to "PFX" with shift %d " "(buffer size "SZFMT" bytes)\n", profile->start, profile->end, profile->bucket_shift, profile->buffer_size); UNPROTECTED_LOCAL_FREE(dcontext, profile->buffer, profile->buffer_size HEAPACCT(ACCT_STATS)); heap_free(dcontext, profile, sizeof(*profile) HEAPACCT(ACCT_STATS)); } } void start_profile(profile_t *profile) { ASSERT(!profile->enabled); nt_start_profile(profile->handle); profile->enabled = true; } void stop_profile(profile_t *profile) { ASSERT(profile->enabled); nt_stop_profile(profile->handle); profile->enabled = false; } void dump_profile_range(file_t file, profile_t *profile, byte *start, byte *end) { uint i, step = (1 << profile->bucket_shift); uint start_i = (uint) (start - (byte *)profile->start) / step; uint end_i = (uint) (end - (byte *)profile->start) / step; IF_X64(ASSERT_TRUNCATE(start_i, uint, (start - (byte *)profile->start) / step)); IF_X64(ASSERT_TRUNCATE(start_i, uint, (end - (byte *)profile->start) / step)); print_file(file, "Profile Dump\nRange "PFX"-"PFX"\nStep "PFX " (%u-%u)\n", start, end, step, start_i, end_i); ASSERT(start_i < profile->buffer_size / sizeof(uint) && end_i < profile->buffer_size / sizeof(uint)); for (i = start_i; i <= end_i; i++) { if (profile->buffer[i] > 0) { print_file(file, PFX" %10d\n", (byte *)profile->start + i * step, profile->buffer[i]); } } print_file(file, "Finished Profile Dump\n"); } void dump_profile(file_t file, profile_t *profile) { dump_profile_range(file, profile, (byte *) profile->start, (byte *) profile->end); } uint sum_profile_range(profile_t *profile, byte *start, byte *end) { uint i, ret = 0, step = (1 << profile->bucket_shift); uint start_i = (uint) (start - (byte *)profile->start) / step; uint end_i = (uint) (end - (byte *)profile->start) / step; IF_X64(ASSERT_TRUNCATE(start_i, uint, (start - (byte *)profile->start) / step)); IF_X64(ASSERT_TRUNCATE(start_i, uint, (end - (byte *)profile->start) / step)); ASSERT(start_i < profile->buffer_size / sizeof(uint) && end_i < profile->buffer_size / sizeof(uint)); for (i = start_i; i <= end_i; i++) { if (profile->buffer[i] > 0) ret += profile->buffer[i]; } return ret; } uint sum_profile(profile_t *profile) { return sum_profile_range(profile, (byte *) profile->start, (byte *) profile->end); } void reset_profile(profile_t *profile) { memset(profile->buffer, 0, profile->buffer_size); } #endif /* free memory allocated from os_raw_mem_alloc */ bool os_raw_mem_free(void *p, size_t size, uint flags, heap_error_code_t *error_code) { ASSERT(error_code != NULL); ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); if (!TEST(RAW_ALLOC_RESERVE_ONLY, flags)) { *error_code = nt_decommit_virtual_memory(p, size); if (!NT_SUCCESS(*error_code)) return false; } if (!TEST(RAW_ALLOC_COMMIT_ONLY, flags)) *error_code = nt_free_virtual_memory(p); LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_free: "SZFMT" bytes @ "PFX"\n", size, p); return NT_SUCCESS(*error_code); } void * os_raw_mem_alloc(void *preferred, size_t size, uint prot, uint flags, heap_error_code_t *error_code) { void *p = preferred; uint os_prot = memprot_to_osprot(prot); ASSERT(error_code != NULL); /* should only be used on aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); *error_code = nt_allocate_virtual_memory(&p, size, os_prot, TEST(RAW_ALLOC_RESERVE_ONLY, flags) ? MEMORY_RESERVE_ONLY : (TEST(RAW_ALLOC_COMMIT_ONLY, flags) ? MEM_COMMIT : MEMORY_COMMIT)); if (!NT_SUCCESS(*error_code)) { LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed"PFX"\n", size, p); return NULL; } LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_alloc: "SZFMT" bytes @ "PFX"\n", size, p); return p; } /* caller is required to handle thread synchronization */ /* see inject.c, this must be able to free an nt_allocate_virtual_memory * pointer */ void os_heap_free(void *p, size_t size, heap_error_code_t *error_code) { ASSERT(error_code != NULL); DOSTATS({ if (!dynamo_exited_log_and_stats) LOG(GLOBAL, LOG_HEAP, 4, "os_heap_free: "SZFMT" bytes @ "PFX"\n", size, p); }); *error_code = nt_free_virtual_memory(p); ASSERT(NT_SUCCESS(*error_code)); } /* reserve virtual address space without committing swap space for it, * and of course no physical pages since it will never be touched. * executable arg is ignored. */ void * os_heap_reserve(void *preferred, size_t size, heap_error_code_t *error_code, bool executable) { void *p = preferred; ASSERT(error_code != NULL); /* should only be used on aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); *error_code = nt_allocate_virtual_memory(&p, size, PAGE_NOACCESS, MEMORY_RESERVE_ONLY); if (!NT_SUCCESS(*error_code)) return NULL; LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve: "SZFMT" bytes @ "PFX"\n", size, p); ASSERT(preferred == NULL || p == preferred); /* verify correct location */ return p; } static bool find_free_memory_in_region(byte *start, byte *end, size_t size, byte **found_start OUT, byte **found_end OUT) { byte *cur; MEMORY_BASIC_INFORMATION mbi; /* walk bounds to find a suitable location */ cur = (byte *)ALIGN_FORWARD(start, VM_ALLOCATION_BOUNDARY); /* avoid returning NULL (i#1431) */ if (cur == NULL) cur = (byte *)(ptr_uint_t) VM_ALLOCATION_BOUNDARY; while (cur + size <= (byte *)end && query_virtual_memory(cur, &mbi, sizeof(mbi)) == sizeof(mbi)) { if (mbi.State == MEM_FREE && mbi.RegionSize - (cur - (byte *)mbi.BaseAddress) >= size) { /* we have a slot */ if (found_start != NULL) *found_start = cur; if (found_end != NULL) *found_end = (byte *)mbi.BaseAddress + mbi.RegionSize; return true; } cur = (byte *)ALIGN_FORWARD((byte *)mbi.BaseAddress + mbi.RegionSize, VM_ALLOCATION_BOUNDARY); /* check for overflow or 0 region size to prevent infinite loop */ if (cur <= (byte *)mbi.BaseAddress) break; /* give up */ } return false; } /* executable arg is ignored */ void * os_heap_reserve_in_region(void *start, void *end, size_t size, heap_error_code_t *error_code, bool executable) { byte *try_start, *p = NULL; uint iters = 0; # define MAX_REGION_ITERS 100 ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(end, PAGE_SIZE)); ASSERT(ALIGNED(size, PAGE_SIZE)); ASSERT(start < end); LOG(GLOBAL, LOG_HEAP, 3, "os_heap_reserve_in_region: "SZFMT" bytes in "PFX"-"PFX"\n", size, start, end); /* if no restriction on location use regular os_heap_reserve() */ if (start == (void *)PTR_UINT_0 && end == (void *)POINTER_MAX) return os_heap_reserve(NULL, size, error_code, executable); *error_code = HEAP_ERROR_CANT_RESERVE_IN_REGION; /* loop to handle races */ while (find_free_memory_in_region(start, end, size, &try_start, NULL)) { p = (byte *)os_heap_reserve(try_start, size, error_code, executable); /* note - p could be NULL if someone grabbed some of the memory first */ LOG(GLOBAL, LOG_HEAP, (p == NULL) ? 1U : 3U, "os_heap_reserve_in_region: got "PFX" reserving "SZFMT" byte @ "PFX"\n", p, size, try_start); if (p != NULL) break; if (++iters > MAX_REGION_ITERS) { ASSERT_NOT_REACHED(); break; } } LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve_in_region: reserved "SZFMT" bytes @ "PFX" in "PFX"-"PFX"\n", size, p, start, end); return p; } /* commit previously reserved with os_heap_reserve pages */ /* returns false when out of memory */ /* A replacement of os_heap_alloc can be constructed by using os_heap_reserve and os_heap_commit on a subset of the reserved pages. */ /* caller is required to handle thread synchronization */ bool os_heap_commit(void *p, size_t size, uint prot, heap_error_code_t *error_code) { uint os_prot = memprot_to_osprot(prot); ASSERT(error_code != NULL); /* should only be used on aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); ASSERT(p); LOG(GLOBAL, LOG_HEAP, 4, "os_heap_commit attempt: "SZFMT" bytes @ "PFX"\n", size, p); *error_code = nt_commit_virtual_memory(p, size, os_prot); if (!NT_SUCCESS(*error_code)) return false; /* out of memory */ LOG(GLOBAL, LOG_HEAP, 3, "os_heap_commit: "SZFMT" bytes @ "PFX"\n", size, p); return true; } /* caller is required to handle thread synchronization and to update dynamo vm areas */ void os_heap_decommit(void *p, size_t size, heap_error_code_t *error_code) { ASSERT(error_code != NULL); if (!dynamo_exited) LOG(GLOBAL, LOG_HEAP, 3, "os_heap_decommit: "SZFMT" bytes @ "PFX"\n", size, p); *error_code = nt_decommit_virtual_memory(p, size); ASSERT(NT_SUCCESS(*error_code)); } bool os_heap_systemwide_overcommit(heap_error_code_t last_error_code) { /* some error_codes may be worth retrying, * e.g. for win32/ STATUS_COMMITMENT_MINIMUM may be a good one * to retry, and maybe worth trying if systemwide memory * pressure has brought us to the limit * * FIXME: case 7032 covers detecting this. In fact a pagefile resize, * will also cause an allocation failure, and TotalCommitLimit seems to be * the current pagefile size + physical memory not used by the OS. * * PeakCommitment should be close to TotalCommitLimit, unless * the pagefile has been resized, or if the OS has trimmed the * system cache and has made it available in the * TotalCommitLimit */ /* FIXME: conservative answer yes */ return true; } bool os_heap_get_commit_limit(size_t *commit_used, size_t *commit_limit) { NTSTATUS res; SYSTEM_PERFORMANCE_INFORMATION sperf_info; STATS_INC(commit_limit_queries); res = query_system_info(SystemPerformanceInformation, sizeof(sperf_info), &sperf_info); if (NT_SUCCESS(res)) { *commit_used = sperf_info.TotalCommittedPages; *commit_limit = sperf_info.TotalCommitLimit; return true; } else { LOG(GLOBAL, LOG_ALL, 1, "ERROR: query_system_info failed 0x%x\n", res); ASSERT_NOT_REACHED(); return false; } } /* i#939: for win8 wow64, x64 ntdll is up high but the kernel won't let us * allocate new memory within rel32 distance. Thus we clobber the padding at * the end of x64 ntdll.dll's +rx section. For typical x64 landing pads w/ * returned memory that need 5 bytes for displaced code, we need 19+5=24 bytes * each. We use 35 landing pads in a normal run. That's 0x348 bytes, so we * will fail if a new version of x64 ntdll uses more than 0xcb8 of its final +rx * page (FTR, only the win2003 versions of x64 ntdll have ever done this). * * Currently looks for one contiguous piece of executable memory and returns it. * Does not mark it as used so will return the same piece to subsequent callers! * * XXX: If this isn't enough space, we should support multiple regions * (end of .text has its own padding, separate from end of "RT" which * this returns), look for padding inside .text (have to be careful * there), and/or split the landing pads up to do 6-byte hooks with * only an 8-byte target and give up on hook chaining robustness. */ bool os_find_free_code_space_in_libs(void **start OUT, void **end OUT) { app_pc rx_end_nopad, rx_end_padded; ASSERT_CURIOSITY(get_os_version() >= WINDOWS_VERSION_8 && is_wow64_process(NT_CURRENT_PROCESS) && IF_X64_ELSE(true, false)); if (!get_executable_segment(get_ntdll_base(), NULL, &rx_end_padded, &rx_end_nopad)) return false; if (start != NULL) *start = rx_end_nopad; if (end != NULL) *end = rx_end_padded; return true; } /* yield the current thread */ void os_thread_yield() { /* main use in the busy path in mutex_lock */ nt_yield(); } void os_thread_sleep(uint64 milliseconds) { LARGE_INTEGER liDueTime; /* negative == relative */ liDueTime.QuadPart= - (int64)milliseconds * TIMER_UNITS_PER_MILLISECOND; nt_sleep(&liDueTime); } /* probably should have an option to stop all threads and then nt_sleep() */ int os_timeout(int time_in_milliseconds) { int res; LARGE_INTEGER liDueTime; liDueTime.QuadPart= - time_in_milliseconds * TIMER_UNITS_PER_MILLISECOND; LOG(THREAD_GET, LOG_ALL, 2, "os_timeout(%d)\n", time_in_milliseconds); res = nt_sleep(&liDueTime); LOG(THREAD_GET, LOG_ALL, 2, "Timeout expired res=%d.\n", res); return res; } bool os_thread_suspend(thread_record_t *tr, int timeout_ms) { return nt_thread_suspend(tr->handle, NULL); } bool os_thread_resume(thread_record_t *tr) { return nt_thread_resume(tr->handle, NULL); } bool os_thread_terminate(thread_record_t *tr) { return nt_terminate_thread(tr->handle, 0); } bool thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc) { char buf[MAX_CONTEXT_SIZE]; CONTEXT *cxt = nt_initialize_context(buf, CONTEXT_DR_STATE); if (thread_get_context(tr, cxt)) { context_to_mcontext(mc, cxt); return true; } return false; } bool thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc) { char buf[MAX_CONTEXT_SIZE]; CONTEXT *cxt = nt_initialize_context(buf, CONTEXT_DR_STATE); /* i#1033: get the context from the dst thread to make sure * segments are correctly set. */ thread_get_context(tr, cxt); mcontext_to_context(cxt, mc, false /* !set_cur_seg */); return thread_set_context(tr, cxt); } bool thread_get_context(thread_record_t *tr, CONTEXT *context) { return NT_SUCCESS(nt_get_context(tr->handle, context)); } bool thread_set_context(thread_record_t *tr, CONTEXT *context) { return NT_SUCCESS(nt_set_context(tr->handle, context)); } /* Takes an os-specific context */ void thread_set_self_context(void *cxt) { /* We use NtContinue to avoid privilege issues with NtSetContext */ nt_continue((CONTEXT *)cxt); ASSERT_NOT_REACHED(); } /* Takes a priv_mcontext_t */ void thread_set_self_mcontext(priv_mcontext_t *mc) { char buf[MAX_CONTEXT_SIZE]; CONTEXT *cxt = nt_initialize_context(buf, CONTEXT_DR_STATE); /* need ss and cs for setting my own context */ mcontext_to_context(cxt, mc, true /* set_cur_seg */); thread_set_self_context(cxt); ASSERT_NOT_REACHED(); } DR_API bool dr_mcontext_to_context(CONTEXT *dst, dr_mcontext_t *src) { /* XXX: should we make it easy to create an artificial CONTEXT by * exposing nt_initialize_context()? * XXX: should we add the reverse, dr_context_to_mcontext()? */ if (src->size != sizeof(dr_mcontext_t)) return false; /* mcontext_to_context() asserts that we have both INTEGER and CONTROL. * We want to keep the assert to catch invalid internal uses, so we just * fill it all in and then adjust the flags. */ if (TEST(DR_MC_MULTIMEDIA, src->flags)) dst->ContextFlags = CONTEXT_DR_STATE; else dst->ContextFlags = CONTEXT_DR_STATE_NO_YMM; mcontext_to_context(dst, dr_mcontext_as_priv_mcontext(src), true/*cur segs, which we document*/); /* XXX: CONTEXT_CONTROL includes xbp, while that's under DR_MC_INTEGER. * We document this difference and recommend passing both to avoid problems. */ if (!TEST(DR_MC_INTEGER, src->flags)) dst->ContextFlags &= ~(CONTEXT_INTEGER); if (!TEST(DR_MC_CONTROL, src->flags)) dst->ContextFlags &= ~(CONTEXT_CONTROL); return true; } /* CONTEXT_CONTROL includes xbp, but it's under DR_MC_INTEGER: callers beware! */ static dr_mcontext_flags_t match_mcontext_flags_to_context_flags(dr_mcontext_flags_t mc_flags, DWORD cxt_flags) { if (TEST(DR_MC_INTEGER, mc_flags) && !TESTALL(CONTEXT_INTEGER, cxt_flags)) mc_flags &= ~DR_MC_INTEGER; if (TEST(DR_MC_CONTROL, mc_flags) && !TESTALL(CONTEXT_CONTROL, cxt_flags)) mc_flags &= ~DR_MC_CONTROL; if (TEST(DR_MC_MULTIMEDIA, mc_flags) && !TESTALL(CONTEXT_DR_STATE & ~(CONTEXT_INTEGER|CONTEXT_CONTROL), cxt_flags)) mc_flags &= ~DR_MC_MULTIMEDIA; return mc_flags; } /* Only one of mc and dmc can be non-NULL. */ bool os_context_to_mcontext(dr_mcontext_t *dmc, priv_mcontext_t *mc, os_cxt_ptr_t osc) { if (dmc != NULL) { /* We have to handle mismatches between dmc->flags and osc->ContextFlags. We * come here on NtContinue where often only CONTROL|INTEGER|SEGMENTS are * available. Our general strategy: keep context_to_mcontext() happy and fix * up here. We assume it's ok to clobber parts of dmc not requested by its * flags, and ok to temporarily write to osc, even though it may be app * memory. */ DWORD orig_flags = osc->ContextFlags; if (!TESTALL(CONTEXT_DR_STATE_NO_YMM, osc->ContextFlags)) osc->ContextFlags = CONTEXT_DR_STATE_NO_YMM; context_to_mcontext(dr_mcontext_as_priv_mcontext(dmc), osc); osc->ContextFlags = orig_flags; /* We document the xbp difference: clients who care are advised to use syscall * events instead of the kernel xfer events that come through here. */ dmc->flags = match_mcontext_flags_to_context_flags(dmc->flags, orig_flags); } else if (mc != NULL) { /* We don't support coming here with an incomplete CONTEXT: it doesn't * happen in the code base currently. */ ASSERT(TESTALL(CONTEXT_DR_STATE_NO_YMM, osc->ContextFlags)); context_to_mcontext(mc, osc); } else return false; return true; } /* Only one of mc and dmc can be non-NULL. */ bool mcontext_to_os_context(os_cxt_ptr_t osc, dr_mcontext_t *dmc, priv_mcontext_t *mc) { if (dmc != NULL) { /* We document the xbp difference: clients who care are advised to use syscall * events instead of the kernel xfer events that come through here. */ dmc->flags = match_mcontext_flags_to_context_flags(dmc->flags, osc->ContextFlags); dr_mcontext_to_context(osc, dmc); } else if (mc != NULL) mcontext_to_context(osc, mc, true/*cur segs*/); else return false; return true; } int get_num_processors() { static uint num_cpu = 0; /* cached value */ if (!num_cpu) { SYSTEM_BASIC_INFORMATION sbasic_info; NTSTATUS result = query_system_info(SystemBasicInformation, sizeof(SYSTEM_BASIC_INFORMATION), &sbasic_info); if (!NT_SUCCESS(result)) num_cpu = 1; /* assume single CPU */ else num_cpu = sbasic_info.NumberProcessors; ASSERT(num_cpu); } return num_cpu; } /* Static to save stack space, is initialized at first call to debugbox or * at os_init (whichever is earlier), we are guaranteed to be single threaded * at os_init so no race conditions even though there shouldn't be any anyways * unless snwprintf does something funny with the buffer. This also ensures * that the static buffers in get_application_name and get_application_pid * get initialized while we are still single threaded. */ static wchar_t debugbox_title_buf[MAXIMUM_PATH+64]; void debugbox_setup_title(void) { snwprintf(debugbox_title_buf, BUFFER_SIZE_ELEMENTS(debugbox_title_buf), L"%hs Notice: %hs(%hs)", exception_label_core, get_application_name(), get_application_pid()); NULL_TERMINATE_BUFFER(debugbox_title_buf); } const wchar_t * debugbox_get_title(void) { return debugbox_title_buf; } /* Static buffer for debugbox. If stack-allocated, debugbox is one of * the big space hogs when reporting a crash and we risk exhausting * the stack. */ DECLARE_NEVERPROT_VAR(static wchar_t debugbox_msg_buf[MAX_LOG_LENGTH], {0}); /* draw a message box on the screen with msg */ int debugbox(char *msg) { int res; if (debugbox_title_buf[0] == 0) debugbox_setup_title(); /* FIXME: If we hit an assert in nt_messagebox, we'll deadlock when * we come back here. */ mutex_lock(&debugbox_lock); snwprintf(debugbox_msg_buf, BUFFER_SIZE_ELEMENTS(debugbox_msg_buf), L"%hs", msg); NULL_TERMINATE_BUFFER(debugbox_msg_buf); res = nt_messagebox(debugbox_msg_buf, debugbox_title_buf); mutex_unlock(&debugbox_lock); return res; } #ifdef FANCY_COUNTDOWN /* NOT IMPLEMENTED */ /* Fancy countdown box for a message with timeout */ // This is STATIC window control ID for a message box #define ID_MSGBOX_STATIC_TEXT 0x0000ffff typedef struct { char *message; char *title; HANDLE timer; int seconds_left; bool done; } timeout_context_t; #define print_timeout_message(buf, context) \ snprintf(buf, sizeof(buf), "%s\n""You have %d seconds to respond", \ ((timeout_context_t*)context)->message, \ ((timeout_context_t*)context)->seconds_left); /* FIXME: Be careful about creating a thread -- make sure we don't intercept its asynch events. Not clear how to do that -- you can turn off interception once it's created, but to not intercept its init APC, currently all you can do is globally turn off event interception, or else try to identify it when we see the init APC. */ /* based on Richter's 11-TimedMsgBox */ DWORD WINAPI message_box_timeout_thread(void *context) { timeout_context_t *tcontext = (timeout_context_t*)context; return 0; LOG(GLOBAL, LOG_ALL, 2, "message_box_timeout_thread(%d)\n", tcontext->seconds_left); do { WaitForSingleObject(tcontext->timer, tcontext->seconds_left * 1000); { HWND hwnd = FindWindow(NULL, tcontext->title); LOG(THREAD_GET, LOG_ALL, 2, "message_box_timeout_thread(%d) hwnd="PIFX"\n", tcontext->seconds_left, hwnd); if (hwnd != NULL) { char countdown[MAX_LOG_LENGTH]; tcontext->seconds_left--; print_timeout_message(countdown, context); SetDlgItemText(hwnd, ID_MSGBOX_STATIC_TEXT, countdown); if (tcontext->seconds_left == 0) { /* timeout */ EndDialog(hwnd, IDOK); return 1; } } } } while (!tcontext->done); return 0; } int os_countdown_messagebox(char *message, int time_in_milliseconds) { char title[MAXIMUM_PATH+64]; char buf[MAX_LOG_LENGTH]; LONG update_period = 1000; /* milliseconds = 1s */ uint seconds_left = time_in_milliseconds / update_period; LARGE_INTEGER liDueTime; HANDLE htimer; HANDLE hthread; timeout_context_t context = {message, title, 0, seconds_left, false}; int res; LOG(THREAD_GET, LOG_ALL, 2, "os_countdown_messagebox(%s, %d)\n", message, time_in_milliseconds); ASSERT_NOT_IMPLEMENTED(false); get_debugbox_title(title, sizeof(title)); print_timeout_message(buf, &context); liDueTime.QuadPart= - update_period * TIMER_UNITS_PER_MILLISECOND; /* create a waitable timer to get signaled periodically */ htimer = nt_create_and_set_timer(&liDueTime, update_period); context.timer = htimer; hthread = CreateThread(NULL, 0, &message_box_timeout_thread, NULL, 0, NULL); LOG(THREAD_GET, LOG_ALL, 2, "os_countdown_messagebox(%s, %d)\n", message, time_in_milliseconds); debugbox(buf); context.done = true; WaitForSingleObject(hthread, INFINITE); close_handle(htimer); close_handle(hthread); return 0; } #else int os_countdown_messagebox(char *message, int time_in_milliseconds) { char buf[MAX_LOG_LENGTH]; snprintf(buf, sizeof(buf), "%sTimeout ignored", message); NULL_TERMINATE_BUFFER(buf); debugbox(buf); return 0; } #endif /* FANCY_COUNTDOWN */ #if defined(CLIENT_INTERFACE) || defined(HOT_PATCHING_INTERFACE) shlib_handle_t load_shared_library(const char *name, bool client) { # ifdef STATIC_LIBRARY if (strcmp(name, get_application_name()) == 0) { wchar_t wname[MAX_PATH]; snwprintf(wname, BUFFER_SIZE_ELEMENTS(wname), L"%hs", name); NULL_TERMINATE_BUFFER(wname); return get_module_handle(wname); } # endif if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { /* We call locate_and_load_private_library() to support searching for * a pathless name. */ return (shlib_handle_t) locate_and_load_private_library(name, client); } else { wchar_t buf[MAX_PATH]; snwprintf(buf, BUFFER_SIZE_ELEMENTS(buf), L"%hs", name); NULL_TERMINATE_BUFFER(buf); return load_library(buf); } } #endif #if defined(CLIENT_INTERFACE) shlib_routine_ptr_t lookup_library_routine(shlib_handle_t lib, const char *name) { return (shlib_routine_ptr_t)get_proc_address(lib, name); } void unload_shared_library(shlib_handle_t lib) { if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) { unload_private_library((app_pc)lib); } else { free_library(lib); } } void shared_library_error(char *buf, int maxlen) { /* FIXME : this routine does nothing. It used to use kernel32 FormatMessage * to report errors, but now that we are kernel32 independent that will no * longer work. Would be nice if we could do something with the nt status * codes, but unclear how to propagate them to here. */ buf[0] = '\0'; } /* addr is any pointer known to lie within the library * for linux, one of addr or name is needed; for windows, neither is needed. */ bool shared_library_bounds(IN shlib_handle_t lib, IN byte *addr, IN const char *name, OUT byte **start, OUT byte **end) { size_t sz = get_allocation_size(lib, start); ASSERT(start != NULL && end != NULL); *end = *start + sz; ASSERT(addr == NULL || (addr >= *start && addr < *end)); return true; } #endif /* defined(CLIENT_INTERFACE) */ /* Returns base of the "allocation region" containing pc for allocated memory, * Note the current protection settings may not be uniform in the whole region. * Returns NULL for free memory or invalid user mode addresses. * Use get_allocation_size() when size is also needed. */ byte * get_allocation_base(byte *pc) { MEMORY_BASIC_INFORMATION mbi; size_t res = query_virtual_memory(pc, &mbi, sizeof(mbi)); if (res != sizeof(mbi)) { /* invalid address given, e.g. POINTER_MAX */ return NULL; } if (mbi.State == MEM_FREE) { ASSERT_CURIOSITY(mbi.BaseAddress == (byte*)ALIGN_BACKWARD(pc, PAGE_SIZE)); return NULL; } return mbi.AllocationBase; } /* See comments below -- this max will go away once we're sure * we won't infinite loop. Until then we keep it very large * (we've seen 128MB with a ton of single-page regions inside in case 4502) * such that should never hit it (@ 1 block per page will hit 4GB first) */ enum { MAX_QUERY_VM_BLOCKS = 512*1024 }; /* Returns size of the "allocation region" containing pc * Note that this may include several pieces of memory with different * protection and state attributes. * If base_pc != NULL returns base pc as well * If memory is free we set base_pc to NULL, but return free region * size - note that we can't efficiently go backwards to find the * maximum possible allocation size in a free hole. */ static size_t get_allocation_size_ex(HANDLE process, byte *pc, byte **base_pc) { PBYTE pb = (PBYTE) pc; MEMORY_BASIC_INFORMATION mbi; PVOID region_base; PVOID pb_base; size_t pb_size; NTSTATUS res; int num_blocks = 0; size_t size, got; res = nt_remote_query_virtual_memory(process, pb, &mbi, sizeof(mbi), &got); if (!NT_SUCCESS(res) || got != sizeof(mbi)) { /* invalid address given, e.g. POINTER_MAX */ LOG(THREAD_GET, LOG_VMAREAS, 3, "%s failed to query "PFX"\n", pb); if (base_pc != NULL) *base_pc = NULL; return 0; } if (mbi.State == MEM_FREE /* free memory doesn't have AllocationBase */) { LOG(THREAD_GET, LOG_VMAREAS, 3, "%s memory is free "PFX"\n", pb); if (base_pc != NULL) *base_pc = NULL; /* note free region from requested ALIGN_BACKWARD(pc base */ return mbi.RegionSize; } pb_base = mbi.BaseAddress; pb_size = mbi.RegionSize; region_base = mbi.AllocationBase; /* start beyond queried region */ pb = (byte *) pb_base + mbi.RegionSize; size = (app_pc)pb - (app_pc)region_base; /* must keep querying contiguous blocks until reach next region * to find this region's size */ LOG(THREAD_GET, LOG_VMAREAS, 3, "%s pc="PFX" base="PFX" region="PFX" size="PIFX"\n", __FUNCTION__, pc, pb_base, region_base, mbi.RegionSize); do { res = nt_remote_query_virtual_memory(process, pb, &mbi, sizeof(mbi), &got); LOG(THREAD_GET, LOG_VMAREAS, 4, "%s pc="PFX" base="PFX" type="PIFX" region="PFX" size="PIFX"\n", __FUNCTION__, pb, mbi.BaseAddress, mbi.State, mbi.AllocationBase, mbi.RegionSize); if (!NT_SUCCESS(res) || got != sizeof(mbi) || mbi.State == MEM_FREE || mbi.AllocationBase != region_base) break; ASSERT(mbi.RegionSize > 0); /* if > 0, we will NOT infinite loop */ size += mbi.RegionSize; if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; /* WARNING: if app is changing memory at same time as we're examining * it, we could have problems: but, if region becomes free, we'll break, * and so long as RegionSize > 0, we should make progress and hit * end of address space in worst case -- so we shouldn't need this * num_blocks max, but we'll keep it for now. FIXME. */ num_blocks++; } while (num_blocks < MAX_QUERY_VM_BLOCKS); ASSERT_CURIOSITY(num_blocks < MAX_QUERY_VM_BLOCKS); /* size may push to overflow to 0 if at end of address space */ ASSERT((ptr_uint_t)region_base + size > (ptr_uint_t)pc || (app_pc)region_base + size == NULL); if (base_pc != NULL) *base_pc = (byte *) region_base; return size; } size_t get_allocation_size(byte *pc, byte **base_pc) { return get_allocation_size_ex(NT_CURRENT_PROCESS, pc, base_pc); } static void set_memtype_from_mbi(MEMORY_BASIC_INFORMATION *mbi, OUT dr_mem_info_t *info) { if (mbi->State == MEM_FREE) { info->type = DR_MEMTYPE_FREE; info->prot = osprot_to_memprot(mbi->Protect); } else if (mbi->State == MEM_RESERVE) { /* We don't distinguish reserved-{image,mapped,private) (i#1177) */ info->type = DR_MEMTYPE_RESERVED; info->prot = DR_MEMPROT_NONE; /* mbi->Protect is undefined */ } else { info->prot = osprot_to_memprot(mbi->Protect); if (mbi->Type == MEM_IMAGE) info->type = DR_MEMTYPE_IMAGE; else info->type = DR_MEMTYPE_DATA; } } /* Returns information about the memory area (not allocation region) * containing pc. This is a single memory area all from the same allocation * region and all with the same protection and state attributes. */ static bool query_memory_internal(const byte *pc, OUT dr_mem_info_t *info, /* i#345, i#1462: this is expensive so we make it optional */ bool get_real_base) { MEMORY_BASIC_INFORMATION mbi; byte *pb = (byte *) pc; byte *alloc_base; int num_blocks = 0; ASSERT(info != NULL); if (query_virtual_memory(pb, &mbi, sizeof(mbi)) != sizeof(mbi)) { /* Kernel memory returns STATUS_INVALID_PARAMETER. We want to * distinguish that from some other failure (i#1538). */ if (!is_user_address((app_pc)pc)) info->type = DR_MEMTYPE_ERROR_WINKERNEL; else info->type = DR_MEMTYPE_ERROR; return false; } if (mbi.State == MEM_FREE /* free memory doesn't have AllocationBase */ || !get_real_base) { info->base_pc = mbi.BaseAddress; info->size = mbi.RegionSize; set_memtype_from_mbi(&mbi, info); return true; } else { /* BaseAddress is just PAGE_START(pc) and so is not the base_pc we * want: we have to loop for that information (i#345) */ byte *forward_query_start; alloc_base = (byte *) mbi.AllocationBase; forward_query_start = alloc_base; /* i#1462: the forward loop can be very expensive for large regions (we've * seen 10,000+ subregions), so we first try to walk backward and find * a different region to start from instead of the alloc base. * Experimentally this is worthwhile for even just >PAGE_SIZE differences * and not just OS_ALLOC_GRANULARITY or larger. * We subtract exponentially larger amounts, up to 2^13 to cover large * reservations. */ # define MAX_BACK_QUERY_HEURISTIC 14 if ((size_t)(pc - alloc_base) > PAGE_SIZE) { uint exponential = 1; /* The sub can't underflow b/c of the if() above */ pb = (byte *) ALIGN_BACKWARD(pc - PAGE_SIZE, PAGE_SIZE); do { /* sanity checks */ if (query_virtual_memory(pb, &mbi, sizeof(mbi)) != sizeof(mbi) || mbi.State == MEM_FREE || mbi.AllocationBase != alloc_base || mbi.RegionSize == 0) break; if ((byte *)mbi.BaseAddress + mbi.RegionSize <= pc) { forward_query_start = (byte *)mbi.BaseAddress + mbi.RegionSize; break; } if (POINTER_UNDERFLOW_ON_SUB(pb, PAGE_SIZE*exponential)) break; pb -= PAGE_SIZE * exponential; num_blocks++; exponential *= 2; } while (pb > alloc_base && num_blocks < MAX_BACK_QUERY_HEURISTIC); } /* XXX perf: if mbi.AllocationBase == mbi.BaseAddress avoid extra syscall */ pb = forward_query_start; num_blocks = 0; do { if (query_virtual_memory(pb, &mbi, sizeof(mbi)) != sizeof(mbi)) break; if (mbi.State == MEM_FREE || mbi.AllocationBase != alloc_base) break; ASSERT(mbi.RegionSize > 0); /* if > 0, we will NOT infinite loop */ if ((byte *)mbi.BaseAddress + mbi.RegionSize > pc) { /* We found the region containing the asked-for address, * and this time mbi.BaseAddress is the real lowest base of * that all-same-prot region */ ASSERT(pc >= (byte *)mbi.BaseAddress); info->base_pc = mbi.BaseAddress; info->size = mbi.RegionSize; set_memtype_from_mbi(&mbi, info); return true; } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; /* WARNING: if app is changing memory at same time as we're examining * it, we could have problems: but, if region becomes free, we'll break, * and so long as RegionSize > 0, we should make progress and hit * end of address space in worst case -- so we shouldn't need this * num_blocks max, but we'll keep it for now. */ num_blocks++; DODEBUG({ if (num_blocks > 10) { /* Try to identify any further perf problems (xref i#1462) */ SYSLOG_INTERNAL_WARNING_ONCE("i#1462: >10 queries!"); } }); } while (num_blocks < MAX_QUERY_VM_BLOCKS); ASSERT_CURIOSITY(num_blocks < MAX_QUERY_VM_BLOCKS); } info->type = DR_MEMTYPE_ERROR; return false; } /* Returns information about the memory area (not allocation region) * containing pc. This is a single memory area all from the same allocation * region and all with the same protection and state attributes. */ bool query_memory_ex(const byte *pc, OUT dr_mem_info_t *info) { return query_memory_internal(pc, info, true/*get real base*/); } /* We provide this b/c getting the bounds is expensive on Windows (i#1462). * This does not look backward to find the real base of this memory region but instead * returns the cur page as the base. The size can still be used to locate the * subsequent memory region. */ bool query_memory_cur_base(const byte *pc, OUT dr_mem_info_t *info) { return query_memory_internal(pc, info, false/*don't need real base*/); } /* Returns size and writability of the memory area (not allocation region) * containing pc. This is a single memory area all from the same allocation * region and all with the same protection and state attributes. * If base_pc != NULL returns base pc of the area. */ bool get_memory_info(const byte *pc, byte **base_pc, size_t *size, uint *prot) { if (base_pc != NULL || size != NULL) { /* BaseAddress is just PAGE_START(pc) and so is not the base_pc we * want: we have to loop for that information (i#345) */ dr_mem_info_t info; if (!query_memory_internal(pc, &info, base_pc != NULL || size != NULL) || info.type == DR_MEMTYPE_FREE) return false; if (base_pc != NULL) *base_pc = info.base_pc; if (size != NULL) *size = info.size; if (prot != NULL) *prot = info.prot; return true; } else { /* Single query is sufficient for prot or just to test whether free */ MEMORY_BASIC_INFORMATION mbi; size_t res = query_virtual_memory(pc, &mbi, sizeof(mbi)); if (res != sizeof(mbi) || mbi.State == MEM_FREE) return false; if (prot != NULL) *prot = osprot_to_memprot(mbi.Protect); } return true; } DR_API /* Calls NtQueryVirtualMemory. */ size_t dr_virtual_query(const byte *pc, MEMORY_BASIC_INFORMATION *mbi, size_t mbi_size) { size_t res = query_virtual_memory(pc, mbi, mbi_size); if (is_pretend_or_executable_writable((app_pc)pc)) { /* We can't assert !prot_is_writable(mbi->Protect) b/c we mark selfmod * as executable-but-writable and we'll come here. */ /* We can't easily add an analogue of DR_MEMPROT_PRETEND_WRITE b/c * users won't expect it due to the bulk of the flags not being * bitmasks. Should we not pretend these regions are writable, then? * User can always call dr_query_memory(). */ mbi->Protect = osprot_add_write(mbi->Protect); } return res; } /* It is ok to pass NULL for dcontext */ bool get_stack_bounds(dcontext_t *dcontext, byte **base, byte **top) { os_thread_data_t *ostd = NULL; if (dcontext != NULL) { ostd = (os_thread_data_t *) dcontext->os_field; if (ostd->teb_stack_no_longer_valid) { /* Typically this means we are on NT or 2k and the TEB is being used * as the stack for ExitThread. Xref fixme in check_for_stack_free() * about possibly handling this differently. */ return false; } if (IS_CLIENT_THREAD(dcontext) && dcontext->nudge_target == NULL) { ostd->stack_base = dcontext->dstack - DYNAMORIO_STACK_SIZE; ostd->stack_top = dcontext->dstack; } } if (dcontext == NULL || ostd->stack_base == NULL) { byte * stack_base = NULL; byte * stack_top = NULL; /* This only works if the dcontext is for the current thread. */ ASSERT(dcontext == NULL || dcontext == get_thread_private_dcontext()); /* use the TIB fields: * PVOID pvStackUserTop; // 04h Top of user stack * PVOID pvStackUserBase; // 08h Base of user stack * and assume fs is always a valid TIB pointer when called here */ stack_top = (byte *) get_tls(TOP_STACK_TIB_OFFSET); stack_base = (byte *) get_tls(BASE_STACK_TIB_OFFSET); LOG(THREAD, LOG_THREADS, 1, "app stack now is "PFX"-"PFX"\n", stack_base, stack_top); /* NULL dcontext => nop */ /* we only have current base, we need to find reserved base */ stack_base = get_allocation_base(stack_base); LOG(THREAD, LOG_THREADS, 1, "app stack region is "PFX"-"PFX"\n", stack_base, stack_top); /* NULL dcontext => nop */ /* FIXME - make curiosity? prob. could create a thread with no official * stack and we would largely be fine with that. */ ASSERT(stack_base != NULL); ASSERT(stack_base < stack_top); ASSERT((get_allocation_base(stack_top - 1) == stack_base && (get_allocation_base(stack_top) != stack_base || /* PR 252008: for WOW64 nudges we allocate an extra page. * We would test dcontext->nudge_thread but that's not set yet. */ is_wow64_process(NT_CURRENT_PROCESS))) /* client threads use dstack as sole stack */ IF_CLIENT_INTERFACE(|| is_dynamo_address(stack_base))); if (dcontext == NULL) { if (base != NULL) *base = stack_base; if (top != NULL) *top = stack_top; return true; } ostd->stack_base = stack_base; ostd->stack_top = stack_top; } if (base != NULL) *base = ostd->stack_base; if (top != NULL) *top = ostd->stack_top; return true; } /* winnt.h:#define PAGE_READONLY 2 winnt.h:#define PAGE_READWRITE 4 winnt.h:#define PAGE_WRITECOPY 8 winnt.h:#define PAGE_EXECUTE 16 winnt.h:#define PAGE_EXECUTE_READ 32 winnt.h:#define PAGE_EXECUTE_READWRITE 64 winnt.h:#define PAGE_GUARD 256 winnt.h:#define PAGE_NOACCESS 1 winnt.h:#define PAGE_NOCACHE 512 winnt.h:#define PAGE_EXECUTE_WRITECOPY 128 */ /* is_readable_without_exception checks to see that all bytes with addresses * from pc to pc+size-1 are readable and that reading from there won't * generate an exception. this is a stronger check than * !not_readable() below. * FIXME : beware of multi-thread races, just because this returns true, * doesn't mean another thread can't make the region unreadable between the * check here and the actual read later. See safe_read() as an alt. */ static bool query_is_readable_without_exception(byte *pc, size_t size) { MEMORY_BASIC_INFORMATION mbi; byte *check_pc = (byte *) ALIGN_BACKWARD(pc, PAGE_SIZE); size_t res; if (size > (size_t)((byte *)POINTER_MAX - pc)) size = (byte *)POINTER_MAX - pc; do { res = query_virtual_memory(check_pc, &mbi, sizeof(mbi)); if (res != sizeof(mbi)) { return false; } else { if (mbi.State != MEM_COMMIT || TEST(PAGE_GUARD, mbi.Protect) || !prot_is_readable(mbi.Protect)) return false; } /* FIXME: this routine can walk by mbi.RegionSize instead of pages */ check_pc += PAGE_SIZE; } while (check_pc != 0/*overflow*/ && check_pc < pc+size); return true; } /* On Windows, same as is_readable_without_exception */ bool is_readable_without_exception_query_os(byte *pc, size_t size) { return is_readable_without_exception(pc, size); } bool is_readable_without_exception_query_os_noblock(byte *pc, size_t size) { return is_readable_without_exception_query_os(pc, size); } /* Reads size bytes starting at base and puts them in out_buf, this is safe * to call even if the memory at base is unreadable, returns true if the * read succeeded */ /* FIXME : This avoids the races with an is_readable_without_exception followed * by a read. We get the os to do the read for us via ReadVirtualMemory, * however this is still much slower then a structured exception handling * solution since we expect this to succeed most of the time. Ref PR 206278 * and 208562 on using the faster TRY/EXCEPT. */ static bool safe_read_syscall(const void *base, size_t size, void *out_buf, size_t *bytes_read) { if (bytes_read != NULL) *bytes_read = 0; return nt_read_virtual_memory(NT_CURRENT_PROCESS, base, out_buf, size, bytes_read); } bool safe_read_ex(const void *base, size_t size, void *out_buf, size_t *bytes_read) { STATS_INC(num_safe_reads); /* XXX i#350: we'd like to always use safe_read_fast() and remove this extra * call layer, but safe_read_fast() requires fault handling to be set up. * There are complications with moving windows fault handling earlier in * the init process, so we just fall back to the syscall during init. */ if (!dynamo_initialized) { return safe_read_syscall(base, size, out_buf, bytes_read); } else { return safe_read_fast(base, size, out_buf, bytes_read); } } /* FIXME - fold this together with safe_read_ex() (is a lot of places to update) */ bool safe_read(const void *base, size_t size, void *out_buf) { size_t bytes_read = 0; return (safe_read_ex(base, size, out_buf, &bytes_read) && bytes_read == size); } /* Writes size bytes starting at base from in_buf, this is safe * to call even if the memory at base is unreadable, returns true if the * write succeeded. See safe_read_ex() on using more performant TRY/EXCEPT. */ bool safe_write_ex(void *base, size_t size, const void *in_buf, size_t *bytes_written) { if (bytes_written != NULL) *bytes_written = 0; STATS_INC(num_safe_writes); /* i#2224: on win10, NtWriteVirtualMemory no longer returns the number of * bytes written and instead returns -1! Thus if the caller cares we fall * back to a try-except version. This also means that callers who want to * fail on partial writes should pass in NULL for bytes_written! */ if (get_os_version() >= WINDOWS_VERSION_10 && bytes_written != NULL) return safe_write_try_except(base, size, in_buf, bytes_written); return nt_write_virtual_memory(NT_CURRENT_PROCESS, base, in_buf, size, bytes_written); } /* FIXME - fold this together with safe_write_ex() (is a lot of places to update) */ bool safe_write(void *base, size_t size, const void *in_buf) { return safe_write_ex(base, size, in_buf, NULL); } /* unlike get_memory_info() we return osprot preserving complete * protection info. Note errors or bad addresses are ignored and * return PAGE_NOACCESS instead. If the difference between invalid * address or PAGE_NOACCESS is essential users must use * query_virtual_memory() */ uint get_current_protection(byte *pc) { PBYTE pb = (PBYTE) pc; MEMORY_BASIC_INFORMATION mbi; size_t res = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(res == sizeof(mbi)); ASSERT(mbi.State != MEM_FREE); /* caller assumes this is a valid page */ if (res != sizeof(mbi) || mbi.State == MEM_FREE) { /* note we could also return 0 since PAGE_NOACCESS is 1 */ ASSERT_CURIOSITY(false && "in get_memory_osprot"); return PAGE_NOACCESS; } return mbi.Protect; } /* see note on is_readable_without_exception for differences between the two * returns true if any byte with address from pc to pc+size-1 is not_readable * FIXME: reverse the logic to make this is_readable * Also CHECK that we actually need this routine */ bool not_readable(byte *pc, size_t size) { MEMORY_BASIC_INFORMATION mbi; byte *check_pc = (byte *) ALIGN_BACKWARD(pc, PAGE_SIZE); size_t res; if (size > (size_t)((byte *)POINTER_MAX - pc)) size = (byte *)POINTER_MAX - pc; while (check_pc != 0/*overflow*/ && check_pc < pc+size) { res = query_virtual_memory(check_pc, &mbi, sizeof(mbi)); if (res != sizeof(mbi) || mbi.State == MEM_FREE) return true; else if (!prot_is_readable(mbi.Protect)) return true; check_pc += PAGE_SIZE; } return false; } void mark_page_as_guard(byte *pc) { uint old_prot; int res; /* NOACCESS combined w/ GUARD is invalid -- apparently you specify what you want * after the guard triggers */ uint flags = PAGE_READWRITE | PAGE_GUARD; ASSERT(ALIGNED(pc, PAGE_SIZE)); res = protect_virtual_memory((void *) pc, PAGE_SIZE, flags, &old_prot); ASSERT(res); } /* Removes guard protection from page containing pc */ bool unmark_page_as_guard(byte *pc, uint prot) { uint old_prot; int res; byte *start_page = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); uint flags = memprot_to_osprot(prot & ~MEMPROT_GUARD); res = protect_virtual_memory(start_page, PAGE_SIZE, flags, &old_prot); ASSERT(res); /* It is possible that another thread accessed the guarded page * while we wanted to remove this protection. The returned value * can be checked for such a case. */ return TEST(PAGE_GUARD, old_prot); } /* Change page protection for pc:pc+size. * If set is false, makes [un]writable depending on add_writable argument, * preserving other flags; else, sets protection to new_prot. * If cow is true and set is false and writable is true, sets to * be not only writable but copy-on-write. * Requires pc and size are multiples of the * PAGE_SIZE. * * Returns true if all protection requests succeeded, false if * protection on any subregion fails: all callers that make memory * writable should be able to handle the unrecoverable yet failure on * out of commit memory. * changed_protection is set to true if changes were necessary, or * false if protection already meets requirements. Note that any * reserved yet not committed subregion will be skipped (and change * protection is not needed). */ static bool internal_change_protection(byte *start, size_t requested_size, bool set, bool writable, bool cow, uint new_prot, bool *changed_protection /* OUT */) { byte *pc = start; size_t remaining_size = requested_size; bool changed_permissions = false; bool subregions_failed = false; /* i#936: prevent cl v16 (VS2010) from combining the two * stats incs into one prior to the actual protection change. * Though note that code movement was not sufficient for i#936. * Fortunately here it's only debug-build stats and our debug build * shouldn't hit that high-optimization: but if we make these RSTATS * we should be careful. */ volatile bool writable_volatile = writable; /* while this routine may allow crossing allocation bases * it is supposed to be in error, a MEM_FREE block would terminate it */ DEBUG_DECLARE(app_pc first_allocation_base = NULL;) /* we no longer allow you to pass in 0 */ ASSERT(requested_size > 0); ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(requested_size, PAGE_SIZE)); /* we can call protect_virtual_memory only on regions that have * the same attributes, we have to split the requested region into * multiple proper subregions */ do { MEMORY_BASIC_INFORMATION mbi; uint old_prot; size_t res; uint flags, new_flags; size_t allow_size; /* remaining size with same privileges */ size_t subregion_size; /* should be a subregion <= allow_size */ ASSERT(remaining_size > 0); /* FIXME: note that a faster version of this routine when we * know the desired flags can do without the * query_virtual_memory() calls and only needs to process the * results of protect_virtual_memory() to decide whether needs * more iterations. */ /* needed for current flags and region size */ res = query_virtual_memory((PBYTE)pc, &mbi, sizeof(mbi)); if (res != sizeof(mbi)) { /* can get here if executing from kernel address space - case 9022 */ goto finish; } ASSERT(res == sizeof(mbi)); ASSERT(mbi.State != MEM_FREE); ASSERT(mbi.State == MEM_COMMIT || mbi.State == MEM_RESERVE); ASSERT(ALIGNED(pc, PAGE_SIZE) && ALIGNED(remaining_size, PAGE_SIZE)); ASSERT(first_allocation_base == NULL || first_allocation_base == mbi.AllocationBase); DODEBUG({first_allocation_base = mbi.AllocationBase;}); ASSERT(pc == mbi.BaseAddress); /* if pc is page aligned, but just in case */ allow_size = mbi.RegionSize - (pc - (byte *)mbi.BaseAddress); /* to maintain old prot flags, * we have to do each os region separately */ if (remaining_size > allow_size) { LOG(THREAD_GET, LOG_VMAREAS, 2, "WARNING: make_%swritable "PFX": param size "PIFX" vs. " "mbi size "PIFX" base "PFX"\n", writable ? "" : "un", pc, remaining_size, mbi.RegionSize, mbi.BaseAddress); /* we see this on make_writable when we've merged regions * that we made read-only and we go to restore their permissions. * we can see it for the same region many times in a row * (e.g., on javac in SPECJVM98), */ /* flag in GLOBAL LOG */ LOG(GLOBAL, LOG_VMAREAS, pc == start ? 1U : 2U, "make_%swritable called with size "PFX "> region size "PFX" at pc "PFX"\n", writable ? "" : "un", remaining_size, allow_size, pc); /* needed most commonly when a PAGE_WRITECOPY breaks up a * region or when MEM_RESERVE subregion is processed, * for the time being adding a curiosity on any other use */ /* for this invocation, just do region size */ subregion_size = allow_size; } else { subregion_size = remaining_size; } ASSERT( subregion_size <= allow_size); LOG(THREAD_GET, LOG_VMAREAS, 3, "make_%swritable: pc "PFX"-"PFX ", currently %s %s\n", writable ? "" : "un", pc, pc+subregion_size, prot_string(mbi.Protect), mbi.State == MEM_COMMIT ? "committed" : "reserved"); /* mbi.Protect is defined only for mbi.State == MEM_COMMIT * we use gratuitously in this LOG */ if (mbi.State == MEM_RESERVE) { LOG(THREAD_GET, LOG_VMAREAS, 2, "make_%swritable: WARNING skipping reserved region "PFX"-"PFX"\n", pc, pc+subregion_size); /* There is nothing we can do about reserved memory. * Assume nobody will really reference this uncomitted * memory, and in case it is caller error, that we'll find * out on write. */ goto skip; } if (mbi.State == MEM_FREE) { /* now this is always supposed to be an error */ ASSERT_NOT_REACHED(); subregions_failed = true; goto finish; } flags = mbi.Protect & ~PAGE_PROTECTION_QUALIFIERS; if (set) { new_flags = new_prot; } else if (writable) { switch (flags) { case PAGE_NOACCESS: new_flags = PAGE_READWRITE; break; case PAGE_READONLY: new_flags = PAGE_READWRITE; break; case PAGE_READWRITE: goto skip; case PAGE_WRITECOPY: goto skip; case PAGE_EXECUTE: new_flags = PAGE_EXECUTE_READWRITE; break; case PAGE_EXECUTE_READ: new_flags = PAGE_EXECUTE_READWRITE; break; case PAGE_EXECUTE_READWRITE: goto skip; case PAGE_EXECUTE_WRITECOPY: goto skip; default: ASSERT_NOT_REACHED(); /* not possible since we handle MEM_RESERVE earlier */ /* do not attempt changing permissions to be robust */ goto skip; } if (cow) new_flags = osprot_add_writecopy(new_flags); } else { switch (flags) { case PAGE_NOACCESS: goto skip; case PAGE_READONLY: goto skip; case PAGE_READWRITE: new_flags = PAGE_READONLY; break; case PAGE_WRITECOPY: new_flags = PAGE_READONLY; break; case PAGE_EXECUTE: goto skip; case PAGE_EXECUTE_READ: goto skip; case PAGE_EXECUTE_READWRITE: new_flags = PAGE_EXECUTE_READ; break; case PAGE_EXECUTE_WRITECOPY: new_flags = PAGE_EXECUTE_READ; break; default: ASSERT_NOT_REACHED(); goto skip; } } /* preserve other flags */ new_flags = (mbi.Protect & ~flags) | new_flags; DOSTATS({ /* once on each side of prot, to get on right side of writability */ if (!writable_volatile) { STATS_INC(protection_change_calls); STATS_ADD(protection_change_pages, subregion_size / PAGE_SIZE); } }); res = protect_virtual_memory((void *)pc, subregion_size, new_flags, &old_prot); if (!res) { /* FIXME: we may want to really make sure that we are out * of commit memory, if we are marking this up as failure * here */ subregions_failed = true; /* FIXME: case 10551 we may want to use the techniques in * vmm_heap_commit to wait a little for someone else to * free up memory, or free any of our own. */ } /* we ignore any failures due to TOCTOU races on subregion protection */ ASSERT_CURIOSITY(res && "protect_virtual_memory failed"); DOSTATS({ /* once on each side of prot, to get on right side of writability */ if (writable_volatile) { STATS_INC(protection_change_calls); STATS_ADD(protection_change_pages, subregion_size / PAGE_SIZE); } }); changed_permissions = true; skip: pc += subregion_size; remaining_size -= subregion_size; } while ( remaining_size > 0); finish: if (changed_protection != NULL) *changed_protection = changed_permissions; return !subregions_failed; } /* change protections on memory region starting at pc of length length * this does not update the all memory area info */ bool os_set_protection(byte *pc, size_t size, uint prot/*MEMPROT_*/) { return set_protection(pc, size, prot); } /* Set protections on memory region starting at pc of length size * (padded to page boundaries). * returns false on failure, e.g. out of commit memory */ bool set_protection(byte *pc, size_t size, uint prot) { byte *start_page = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); size_t num_bytes = ALIGN_FORWARD(size + (pc - start_page), PAGE_SIZE); return internal_change_protection(start_page, num_bytes, true/*set*/, false/*ignored*/, false/*ignored*/, memprot_to_osprot(prot), NULL); } /* Change protections on memory region starting at pc of length size * (padded to page boundaries). This method is meant to be used on DR memory * as part of protect from app and is safe with respect to stats and the data * segment. */ bool change_protection(byte *pc, size_t size, bool writable) { byte *start_page = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); size_t num_bytes = ALIGN_FORWARD(size + (pc - start_page), PAGE_SIZE); return internal_change_protection(start_page, num_bytes, false/*relative*/, writable, false/*not cow*/, 0, NULL); } /* makes pc-:c+size (page_padded) writable preserving other flags */ bool make_hookable(byte *pc, size_t size, bool *changed_prot) { byte *start_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); size_t num_bytes = ALIGN_FORWARD(size + (pc - start_pc), PAGE_SIZE); return internal_change_protection(start_pc, num_bytes, false/*relative*/, true, false/*not cow*/, 0, changed_prot); } /* if changed_prot makes pc:pc+size (page padded) unwritable preserving * other flags */ void make_unhookable(byte *pc, size_t size, bool changed_prot) { if (changed_prot) { byte *start_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE); size_t num_bytes = ALIGN_FORWARD(size + (pc - start_pc), PAGE_SIZE); internal_change_protection(start_pc, num_bytes, false/*relative*/, false, false/*ignored*/, 0, NULL); } } /* requires that pc is page aligned and size is multiple of the page size * and marks that memory writable, preserves other flags */ /* returns false if out of commit memory! */ bool make_writable(byte *pc, size_t size) { return internal_change_protection(pc, size, false/*relative*/, true, false/*not cow*/, 0, NULL); } /* requires that pc is page aligned and size is multiple of the page size * and marks that memory writable and copy-on-write, preserving other flags. * note: only usable if allocated COW */ bool make_copy_on_writable(byte *pc, size_t size) { return internal_change_protection(pc, size, false/*relative*/, true, true/*cow*/, 0, NULL); } /* requires that pc is page aligned and size is multiple of the page size * and marks that memory NOT writable, preserves other flags */ void make_unwritable(byte *pc, size_t size) { internal_change_protection(pc, size, false/*relative*/, false, false/*ignored*/, 0, NULL); } #endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ bool convert_NT_to_Dos_path(OUT wchar_t *buf, IN const wchar_t *fname, IN size_t buf_len/*# elements*/) { /* RtlNtPathNameToDosPathName is only available on XP+ */ HANDLE objdir; UNICODE_STRING ustr; wchar_t drive[3] = { L'x', L':', L'\0' }; PROCESS_DEVICEMAP_INFORMATION map; uint i, len; NTSTATUS res; bool ans = false; const wchar_t *lanman = L"\\Device\\LanmanRedirector\\"; LOG(THREAD_GET, LOG_NT, 3, "%s: converting %S\n", __FUNCTION__, fname); /* Network paths: FIXME: what other forms do they take? */ if (wcsstr(fname, lanman) == fname) { _snwprintf(buf, buf_len, L"\\\\%s", fname + wcslen(lanman)); buf[buf_len - 1] = L'\0'; LOG(THREAD_GET, LOG_NT, 3, "%s: result %S\n", __FUNCTION__, buf); return true; } /* Plan for local files: * 1) NtQueryInformationProcess ProcessDeviceMap => list of valid drive * letter symlinks (emulating kernel32!GetLogicalDriveStrings) * 2) loop through each drive symlink, calling NtOpenSymbolicLinkObject * to get the target (emulating kernel32!QueryDosDevice) * 3) when find a match, replace \Device\HarddiskVolumeX with drive letter */ /* We could cache the drive map but it can change so we re-create every time */ IF_X64(map.Flags = 0); /* required: i#419 */ res = nt_get_drive_map(NT_CURRENT_PROCESS, &map); if (!NT_SUCCESS(res)) { LOG(THREAD_GET, LOG_NT, 2, "%s: drive map error 0x%x\n", __FUNCTION__, res); return false; } /* Open the \?? Dos devices dir, which is where the drive symlinks live. * FIXME: via NtSetInformationProcess ProcessDeviceMap, can the device * dir be different from "\??"? How do we know? */ res = nt_open_object_directory(&objdir, L"\\??", false); if (!NT_SUCCESS(res)) { LOG(THREAD_GET, LOG_NT, 2, "%s: \\?? error 0x%x\n", __FUNCTION__, res); return false; } LOG(THREAD_GET, LOG_NT, 2, "%s: DriveMap=%d\n", __FUNCTION__, map.Query.DriveMap); /* We use buf for our temporary buffer as well as final result */ ustr.Length = 0; ustr.MaximumLength = (USHORT) buf_len*sizeof(wchar_t); ustr.Buffer = buf; for (i = 0; i < sizeof(map.Query.DriveType)/sizeof(UCHAR); i++) { if (map.Query.DriveType[i] != DRIVE_UNKNOWN) { drive[0] = L'A' + (wchar_t)i; res = nt_get_symlink_target(objdir, drive, &ustr, &len); if (NT_SUCCESS(res)) { /* i#845: ustr.Buffer might not be null-terminated */ ustr.Buffer[MIN(ustr.Length / sizeof(ustr.Buffer[0]), ustr.MaximumLength / sizeof(ustr.Buffer[0]) - 1)] = L'\0'; LOG(THREAD_GET, LOG_NT, 3, "%s: drive %d=%c: type=%d => %S\n", __FUNCTION__, i, 'A'+(wchar_t)i, map.Query.DriveType[i], ustr.Buffer); } else { LOG(THREAD_GET, LOG_NT, 3, "%s: failed to query symlink: 0x%x\n", __FUNCTION__, res); } if (wcsstr(fname, ustr.Buffer) == fname) { /* We start with the \\ so we don't need to add one */ _snwprintf(buf, buf_len, L"%s%s", drive, fname+wcslen(ustr.Buffer)); buf[buf_len - 1] = L'\0'; LOG(THREAD_GET, LOG_NT, 3, "%s: result %S\n", __FUNCTION__, buf); ans = true; break; } } } close_handle(objdir); return ans; } #ifndef NOT_DYNAMORIO_CORE_PROPER /* b/c of global_heap_* */ /* If the conversion succeeds and fits in fixedbuf, returns fixedbuf. * If the conversion won't fit in fixedbuf, allocates memory and * returns that memory, along with its size in allocbuf_sz. * In that case, the memory should be freed by calling * convert_to_NT_file_path_wide_free(); * Always null-terminates when it returns non-NULL. */ wchar_t * convert_to_NT_file_path_wide(OUT wchar_t *fixedbuf, IN const wchar_t *fname, IN size_t fixedbuf_len/*# elements*/, OUT size_t *allocbuf_sz/*#bytes*/) { /* XXX: we could templatize this to share code w/ convert_to_NT_file_path(), * but a lot of the extra stuff there is curiosities for use within DR, * while this routine is mainly used by drwinapi. * If you change the logic here, change convert_to_NT_file_path(). */ bool is_UNC = false; bool is_device = false; size_t relative_sz = 0; const wchar_t *name = fname; wchar_t *buf; int i, size; size_t wchars_needed, buf_len; ASSERT(fixedbuf != NULL && fixedbuf_len != 0); if (name[0] == L'\\') { name += 1; /* eat the first \ */ if (name[0] == L'\\') { if (name[1] == L'.' && name[2] == L'\\') { /* convert \\.\foo to \??\foo (i#499) */ is_UNC = false; is_device = true; name += 3; } else if (name[1] == L'?' && name[2] == L'\\') { /* convert \\?\foo to \??\foo */ name += 3; } else { /* is \\server type */ is_UNC = true; } } else { /* \??\UNC\server or \??\c:\ */ if (name[0] != L'\0' && name[1] != L'\0' && name[2] != L'\0') { name += 3; } else { return NULL; } } if (!is_UNC && !is_device) { /* we've eaten the initial \\?\ or \??\ check for UNC */ if ((name[0] == L'U' || name[0] == L'u') && (name[1] == L'N' || name[1] == L'n') && (name[2] == L'C' || name[2] == L'c')) { is_UNC = true; name += 3; } } } else if (name[1] == L':' && (name[2] == L'/' || name[2] == L'\\')) { /* something like "c:\" */ } else if (name[0] != '/' && name[0] != '\\') { #ifndef NOT_DYNAMORIO_CORE_PROPER /* i#298: support relative paths. * We don't support absolute for the current drive ("\foo.txt"). * We also don't support relative for other drives ("c:foo.txt"). */ char *cwd_end = cwd + strlen(cwd) - 1; relative_sz = strlen(cwd); if (name[0] == L'.' && (name[1] == L'/' || name[1] == L'\\')) { name += 2; } else { while (name[0] == L'.' && name[1] == L'.' && (name[2] == L'/' || name[2] == L'\\')) { name += 3; /* Walk backward in cwd past the next backslash. We assume cwd * has no trailing slash and is all backslashes (no forward slashes). */ while (relative_sz > 0 && *(cwd_end+1) != '\\') { cwd_end--; relative_sz--; } if (relative_sz == 0) return false; } } #endif } /* should now have either ("c:\" and !is_UNC) or ("\server" and is_UNC) */ wchars_needed = (wcslen(name) + wcslen(L"\\??\\") + (is_UNC ? wcslen(L"UNC") : 0) + 1/*null*/); if (fixedbuf_len >= wchars_needed) { buf = fixedbuf; buf_len = fixedbuf_len; } else { /* We allocate regardless of the path contents to handle * larger-than-MAX_PATH paths (technically drwinapi only has to do * that for "\\?\" paths). */ buf = (wchar_t *) global_heap_alloc(wchars_needed * sizeof(wchar_t) HEAPACCT(ACCT_OTHER)); buf_len = wchars_needed; *allocbuf_sz = wchars_needed * sizeof(wchar_t); } size = snwprintf(buf, buf_len, L"\\??\\%s%.*hs%s%s", is_UNC ? L"UNC" : L"", #ifdef NOT_DYNAMORIO_CORE_PROPER 0, "", L"", #else relative_sz, cwd, (relative_sz > 0) ? L"\\" : L"", #endif name); buf[buf_len-1] = L'\0'; if (size < 0 || size == (int)buf_len) { if (buf != fixedbuf) global_heap_free(buf, *allocbuf_sz HEAPACCT(ACCT_OTHER)); return NULL; } /* change / to \ */ for (i = 0; i < size; i++) { if (buf[i] == L'/') buf[i] = L'\\'; } return buf; } void convert_to_NT_file_path_wide_free(wchar_t *buf, size_t alloc_sz) { global_heap_free(buf, alloc_sz HEAPACCT(ACCT_OTHER)); } #endif /* NOT_DYNAMORIO_CORE_PROPER, b/c of global_heap_* */ /* Always null-terminates when it returns true. */ bool convert_to_NT_file_path(OUT wchar_t *buf, IN const char *fname, IN size_t buf_len/*# elements*/) { bool is_UNC = false; bool is_device = false; size_t relative_sz = 0; const char *name = fname; int i, size; ASSERT(buf != NULL && buf_len != 0); /* need nt file path, prepend \??\ so is \??\c:\.... make sure everyone * gives us a fullly qualified absolute path, no . .. relative etc. * For UNC names(//server/name), the path should be \??\UNC\server\name. */ /* NOTE - for process control we use an app path (image location) with this routine * so we should handle all possible file name prefixes, we've seen - * c:\ \??\c:\ \\?\c:\ \\server \??\UNC\server \\?\UNC\server */ /* FIXME - could we ever get any other path formats here (xref case 9146 and the * reactos src. See DEVICE_PATH \\.\foo, UNC_DOT_PATH \\., etc. * For i#499 we now convert \\.\foo to \??\foo. */ /* CHECK - at the api level, paths longer then MAX_PATH require \\?\ prefix, unclear * if we would need to use that at this level instead of \??\ for long paths (not * that it matters since our buffer in this routine limits us to MAX_PATH anyways). /* FIXME - handle . and .. */ /* FIMXE : there is also ntdll!RtlDosPathNameToNtPathName_U that does the * translation for us, used by CreateDirectory CreateFile etc. but looking * at the dissasembly it grabs the loader lock! why does it need * to do that? is it to translate . or ..?, better just to do the * translation here where we know what's going on */ /* XXX: if you change the logic here, change convert_to_NT_file_path_wide() */ if (name[0] == '\\') { name += 1; /* eat the first \ */ if (name[0] == '\\') { if (name[1] == '.' && name[2] == '\\') { /* convert \\.\foo to \??\foo (i#499) */ is_UNC = false; is_device = true; name += 3; } else if (name[1] == '?') { /* is \\?\UNC\server or \\?\c:\ type, * chop off the \\?\ and we'll check for the UNC later */ ASSERT_CURIOSITY(CLIENT_OR_STANDALONE() || (name[2] == '\\' && "create file invalid name")); /* safety check, don't go beyond end of string */ if (name[2] != '\0') { name += 3; } else { return false; } } else { /* is \\server type */ is_UNC = true; } } else { /* is \??\UNC\server for \??\c:\ type * chop off the \??\ and we'll check for the UNC later */ ASSERT_CURIOSITY(CLIENT_OR_STANDALONE() || (name[0] == '?' && name[1] == '?' && name[2] == '\\' && "create file invalid name")); /* safety check, don't go beyond end of string */ if (name[0] != '\0' && name[1] != '\0' && name[2] != '\0') { name += 3; } else { return false; } } if (!is_UNC && !is_device) { /* we've eaten the initial \\?\ or \??\ check for UNC */ if ((name[0] == 'U' || name[0] == 'u') && (name[1] == 'N' || name[1] == 'n') && (name[2] == 'C' || name[2] == 'c')) { /* is \??\UNC\server or \\?\UNC\server type, chop of the UNC * (we'll re-add below) * NOTE '/' is not a legal separator for a \??\ or \\?\ path */ ASSERT_CURIOSITY(CLIENT_OR_STANDALONE() || (name[3] == '\\' && "create file invalid name")); is_UNC = true; name += 3; } else { /* is \??\c:\ or \\?\c:\ type, * NOTE '/' is not a legal separator for a \??\ or \\?\ path */ ASSERT_CURIOSITY(CLIENT_OR_STANDALONE() || (name[1] == ':' && name[2] == '\\' && "create file invalid name")); } } } else if (name[1] == ':' && (name[2] == '/' || name[2] == '\\')) { /* is c:\ type, NOTE case 9329 c:/ is also legal */ } else if (name[0] != '/' && name[0] != '\\') { #ifndef NOT_DYNAMORIO_CORE_PROPER /* i#298: support relative paths. * We don't support absolute for the current drive ("\foo.txt"). * We also don't support relative for other drives ("c:foo.txt"). */ char *cwd_end = cwd + strlen(cwd) - 1; relative_sz = strlen(cwd); if (name[0] == '.' && (name[1] == '/' || name[1] == '\\')) { name += 2; } else { while (name[0] == '.' && name[1] == '.' && (name[2] == '/' || name[2] == '\\')) { name += 3; /* Walk backward in cwd past the next backslash. We assume cwd * has no trailing slash and is all backslashes (no forward slashes). */ while (relative_sz > 0 && *(cwd_end+1) != '\\') { cwd_end--; relative_sz--; } if (relative_sz == 0) return false; } } #endif } /* should now have either ("c:\" and !is_UNC) or ("\server" and is_UNC) */ size = snwprintf(buf, buf_len, L"\\??\\%ls%.*hs%ls%hs", is_UNC ? L"UNC" : L"", #ifdef NOT_DYNAMORIO_CORE_PROPER 0, "", L"", #else relative_sz, cwd, (relative_sz > 0) ? L"\\" : L"", #endif name); buf[buf_len-1] = L'\0'; if (size < 0 || size == (int)buf_len) return false; /* change / to \ */ for (i = 0; i < size; i++) { if (buf[i] == L'/') buf[i] = L'\\'; /* Eliminate double slashes as we'll get STATUS_OBJECT_NAME_INVALID (i#1559) */ if (i > 1 && buf[i] == L'\\' && buf[i-1] == L'\\') { int j; for (j = i; j < size; j++) buf[j] = buf[j+1]; ASSERT(buf[j] == L'\0'); } } return true; } static file_t os_internal_create_file(const char *fname, bool is_dir, ACCESS_MASK rights, uint sharing, uint create_disposition) { wchar_t buf[MAX_PATH]; if (!convert_to_NT_file_path(buf, fname, BUFFER_SIZE_ELEMENTS(buf))) return INVALID_FILE; NULL_TERMINATE_BUFFER(buf); /* be paranoid */ return create_file(buf, is_dir, rights, sharing, create_disposition, true); } static bool os_internal_create_file_test(const char *fname, bool is_dir, ACCESS_MASK rights, uint sharing, uint create_disposition) { HANDLE file = os_internal_create_file(fname, is_dir, rights, sharing, create_disposition); if (INVALID_FILE == file) { return false; } os_close(file); return true; } bool os_file_exists(const char *fname, bool is_dir) { /* Perhaps we should use the simpler NtQueryAttributesFile? */ return os_internal_create_file_test(fname, is_dir, 0, /* We can get sharing violations if we don't * include write (drmem i#1025) */ FILE_SHARE_READ|FILE_SHARE_WRITE, FILE_OPEN); } /* Returns true and sets 'size' of file on success; returns false on failure. * Note: This size is different from the allocation size of the file, which can * be larger or smaller (if file compression is turned on - case 8272). */ bool os_get_file_size(const char *file, uint64 *size) { wchar_t filename[MAXIMUM_PATH + 1]; FILE_NETWORK_OPEN_INFORMATION file_info; ASSERT(file != NULL && size != NULL); if (file == NULL || size == NULL) return false; /* See FIXME in os_internal_create_file() about prepending \??\ to the path * directly. */ /* FIXME: case 9182 this won't work for remote files */ _snwprintf(filename, BUFFER_SIZE_ELEMENTS(filename), L"\\??\\%hs", file); NULL_TERMINATE_BUFFER(filename); if (query_full_attributes_file(filename, &file_info)) { ASSERT(sizeof(*size) == sizeof(file_info.EndOfFile.QuadPart)); *size = file_info.EndOfFile.QuadPart; return true; } return false; } bool os_get_file_size_by_handle(IN HANDLE file_handle, uint64 *end_of_file /* OUT */) { FILE_STANDARD_INFORMATION standard_info; NTSTATUS res = nt_query_file_info(file_handle, &standard_info, sizeof(standard_info), FileStandardInformation); /* should always be able to get this */ ASSERT(NT_SUCCESS(res) && "bad file handle?"); if (!NT_SUCCESS(res)) { return false; } *end_of_file = standard_info.EndOfFile.QuadPart; return true; } bool os_set_file_size(IN HANDLE file_handle, uint64 end_of_file) { NTSTATUS res; FILE_END_OF_FILE_INFORMATION file_end_info; ASSERT_CURIOSITY(end_of_file != 0); file_end_info.EndOfFile.QuadPart = end_of_file; res = nt_set_file_info(file_handle, &file_end_info, sizeof(file_end_info), FileEndOfFileInformation); ASSERT(NT_SUCCESS(res) && "can't set size: bad handle?"); return NT_SUCCESS(res); } /* returns available and total quota for the current thread's user (if * impersonated), as well as total available on the volume * Note that any valid handle on the volume can be used. */ bool os_get_disk_free_space(IN HANDLE file_handle, OUT uint64 *AvailableQuotaBytes OPTIONAL, OUT uint64 *TotalQuotaBytes OPTIONAL, OUT uint64 *TotalVolumeBytes OPTIONAL) { /* FIXME: considering that we don't usually care about the actual * bytes available on the volume, we may use just * FILE_FS_SIZE_INFORMATION instead of FILE_FS_FULL_SIZE_INFORMATION * case 9000: need to check if both are available on NT */ /* Windows Driver Kit: Installable File System Drivers :: * FILE_FS_FULL_SIZE_INFORMATION * * "The size of the buffer passed ... must be at least sizeof * (FILE_FS_FULL_SIZE_INFORMATION). This structure must be * aligned on a LONGLONG (8-byte) boundary. " * * Although on XP SP2 this call succeeds even on a non-aligned * value, to be sure we'll follow the recommendation. */ FILE_FS_FULL_SIZE_INFORMATION unaligned_fs_full_size[2]; FILE_FS_FULL_SIZE_INFORMATION *FileFsFullSize = (FILE_FS_FULL_SIZE_INFORMATION *) ALIGN_FORWARD(unaligned_fs_full_size, sizeof(LONGLONG)); uint64 BytesPerUnit; NTSTATUS res; ASSERT(sizeof(LONGLONG) < sizeof(FILE_FS_FULL_SIZE_INFORMATION)); ASSERT(ALIGNED(FileFsFullSize, sizeof(LONGLONG))); res = nt_query_volume_info(file_handle, FileFsFullSize, sizeof(*FileFsFullSize), FileFsFullSizeInformation); if (!NT_SUCCESS(res)) return false; BytesPerUnit = FileFsFullSize->SectorsPerAllocationUnit * FileFsFullSize->BytesPerSector; if (AvailableQuotaBytes != NULL) { *AvailableQuotaBytes = FileFsFullSize-> CallerAvailableAllocationUnits.QuadPart * BytesPerUnit; } if (TotalQuotaBytes != NULL) { *TotalQuotaBytes = FileFsFullSize-> TotalAllocationUnits.QuadPart * BytesPerUnit; } if (TotalVolumeBytes != NULL) { *TotalVolumeBytes = FileFsFullSize-> ActualAvailableAllocationUnits.QuadPart * BytesPerUnit; } return true; } /* NYI: os_copy_file - copies a portion of a file onto another. Note * that if new_file is non-empty we are overwriting only the * appropriate subregion. os_copy_file() can be used as a full file * copy (with offset 0 in both files). With an offset os_copy_file() * can be used to overwrite the portions of a file that are not mapped * in memory or are suffixes not at all covered by the PE format. */ /* NOTE: cf CopyFileEx which also claims to be doing something special * to preserve OLE structured storage? * * NOTE: we do don't support NTFS alternate data streams, * e.g. downloaded.dll:Zone.Identifier since we would expect that any * checks by say Software Restriction Policies are done on the * original file, not on what we really open. * * NOTE we don't preserve extended attributes, file attributes. If we * care to have these should see see * kernel32!CreateFile(,hTemplateFile) which supplies file attributes * and extended attributes for the new file. * * Note we don't preserve security attributes - see * shell32!SHFileOperation if we need this. * * We don't deal in any way with encrypted files - they are opened * raw. FIXME: may want to at least make sure that encrypted files * aren't shared. * * FIXME: testing: doublecheck compressed file offsets are properly * used - test both encrypted and compressed folders. */ bool os_copy_file(HANDLE new_file, HANDLE original_file, uint64 new_file_offset, uint64 original_file_offset) { /* we don't care to have the fastest filecopy implementation * current uses are rare enough. See p.64 and 02 FileCopy from * Richter&Clark if a fast one is needed. */ /* Note that NTFS will make the calls synchronously */ /* FIXME: it may be useful to set the expected total file size * right away with os_set_file_size(), but that should be done * only in case the current size is smaller (e.g. we shouldn't * truncate if trying to overwrite a subsection ) */ ASSERT_NOT_IMPLEMENTED(false); return false; } bool os_create_dir(const char *fname, create_directory_flags_t create_dir_flags) { bool require_new = TEST(CREATE_DIR_REQUIRE_NEW, create_dir_flags); bool force_owner = TEST(CREATE_DIR_FORCE_OWNER, create_dir_flags); /* case 9057 note that hard links are only between files but not directories */ /* upcoming symlinks can be between either, for consistency should * always require_new, * FIXME: not all current users do this properly */ return os_internal_create_file_test(fname, true, 0, FILE_SHARE_READ, (require_new ? FILE_CREATE : FILE_OPEN_IF) | (force_owner ? FILE_DISPOSITION_SET_OWNER : 0)); } file_t os_open_directory(const char *fname, int os_open_flags) { uint sharing = FILE_SHARE_READ /* case 10255: allow persisted cache file renaming in * directory */ | FILE_SHARE_WRITE; uint access = READ_CONTROL; /* FIXME: only 0 is allowed by create_file for now */ if (TEST(OS_OPEN_READ, os_open_flags)) access |= FILE_GENERIC_READ; return os_internal_create_file(fname, true, access, sharing, FILE_OPEN); } /* FIXME : investigate difference between GENERIC_* and FILE_GENERIC_* * both seem to work as expected (and CreateFile uses the GENERIC_* while * the ddk uses FILE_GENERIC_*) but they resolve differently, some confusion. * ntddk.h has GENERIC_* as a single bit flag while FILE_GENERIC_* is * a combination including FILE_{READ,WRITE}_DATA, so going with the latter. */ file_t os_open(const char *fname, int os_open_flags) { uint access = 0; /* FIXME case 8865: should default be no sharing? */ uint sharing = FILE_SHARE_READ; if (TEST(OS_EXECUTE, os_open_flags)) access |= FILE_GENERIC_EXECUTE; if (TEST(OS_OPEN_READ, os_open_flags)) access |= FILE_GENERIC_READ; if (TEST(OS_SHARE_DELETE, os_open_flags)) sharing |= FILE_SHARE_DELETE; if (!TEST(OS_OPEN_WRITE, os_open_flags)) return os_internal_create_file(fname, false, access, sharing, FILE_OPEN); /* We ignore OS_OPEN_WRITE_ONLY: Linux-only */ /* clients are allowed to open the file however they want, xref PR 227737 */ ASSERT_CURIOSITY_ONCE((TEST(OS_OPEN_REQUIRE_NEW, os_open_flags) || standalone_library IF_CLIENT_INTERFACE(|| CLIENTS_EXIST())) && "symlink risk PR 213492"); return os_internal_create_file(fname, false, access | (TEST(OS_OPEN_APPEND, os_open_flags) ? /* FILE_GENERIC_WRITE minus * FILE_WRITE_DATA, so we get auto-append */ (STANDARD_RIGHTS_WRITE | FILE_APPEND_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA) : FILE_GENERIC_WRITE), sharing, (TEST(OS_OPEN_REQUIRE_NEW, os_open_flags) ? FILE_CREATE : (TEST(OS_OPEN_APPEND, os_open_flags) ? FILE_OPEN_IF : FILE_OVERWRITE_IF)) | (TEST(OS_OPEN_FORCE_OWNER, os_open_flags) ? FILE_DISPOSITION_SET_OWNER : 0)); } void os_close(file_t f) { close_handle(f); } /* not isolating files on windows */ file_t os_open_protected(const char *fname, int os_open_flags) { return os_open(fname, os_open_flags); } void os_close_protected(file_t f) { os_close(f); } bool os_get_current_dir(char *buf, size_t bufsz) { int len = snprintf(buf, bufsz, "%S", get_own_peb()->ProcessParameters->CurrentDirectoryPath.Buffer); buf[bufsz-1] = '\0'; if (len < 0 || (size_t)len == bufsz) return false; /* for consistency with Linux we remove the trailing separator */ if (buf[len-1] == '\\') buf[len-1] = '\0'; return true; } #ifndef NOT_DYNAMORIO_CORE_PROPER /* so drinject can use drdecode's copy */ /* We take in size_t count to match linux, but Nt{Read,Write}File only * takes in a ULONG (==uint), though they return a ULONG_PTR (size_t) */ ssize_t os_write(file_t f, const void *buf, size_t count) { /* file_t is HANDLE opened with CreateFile */ size_t written = 0; ssize_t out = -1; bool ok; if (f == INVALID_FILE) return out; IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(count))); ok = write_file(f, buf, (uint) count, NULL, &written); if (ok) { ASSERT(written <= INT_MAX && written <= count); out = (ssize_t)written; } else { ASSERT(written == 0); } return out; } #endif /* We take in size_t count to match linux, but Nt{Read,Write}File only * takes in a ULONG (==uint), though they return a ULONG_PTR (size_t) */ ssize_t os_read(file_t f, void *buf, size_t count) { /* file_t is HANDLE opened with CreateFile */ size_t read = 0; ssize_t out = -1; bool ok; if (f == INVALID_FILE) return out; IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(count))); ok = read_file(f, buf, (uint) count, NULL, &read); if (ok) { ASSERT(read <= INT_MAX && read <= count); out = (ssize_t)read; } else { ASSERT(read == 0); } return out; } void os_flush(file_t f) { nt_flush_file_buffers(f); } /* seek current file position to offset bytes from origin, return true if successful */ bool os_seek(file_t f, int64 offset, int origin) { FILE_POSITION_INFORMATION info; NTSTATUS res; int64 abs_offset = offset; switch (origin) { case OS_SEEK_SET: break; case OS_SEEK_CUR: { int64 cur_pos = os_tell(f); ASSERT(cur_pos != -1 && "bad file handle?"); /* shouldn't fail */ abs_offset += cur_pos; } break; case OS_SEEK_END: { uint64 file_size = 0; bool res = os_get_file_size_by_handle(f, &file_size); ASSERT(res && "bad file handle?"); /* shouldn't fail */ abs_offset += file_size; } break; default: ASSERT(false && "os_seek: invalid origin"); return false; } info.CurrentByteOffset.QuadPart = abs_offset; res = nt_set_file_info(f, &info, sizeof(info), FilePositionInformation); /* can fail if invalid seek (past end of read only file for ex.) */ return NT_SUCCESS(res); } /* return the current file position, -1 on failure */ int64 os_tell(file_t f) { FILE_POSITION_INFORMATION info; NTSTATUS res = nt_query_file_info(f, &info, sizeof(info), FilePositionInformation); /* should always be able to get this */ ASSERT(NT_SUCCESS(res) && "bad file handle?"); if (!NT_SUCCESS(res)) { return -1; } return info.CurrentByteOffset.QuadPart; } #ifndef NOT_DYNAMORIO_CORE_PROPER /* around most of file, to exclude preload */ /* Tries to delete a file that may be mapped in by this or another process. * We use FILE_DELETE_ON_CLOSE, which works only on SEC_COMMIT, not on SEC_IMAGE. * There is no known way to immediately delete a mapped-in SEC_IMAGE file. * Xref case 9964. */ bool os_delete_mapped_file(const char *filename) { NTSTATUS res; HANDLE hf; FILE_DISPOSITION_INFORMATION file_dispose_info; bool deleted = false; wchar_t wname[MAX_FILE_NAME_LENGTH]; if (!convert_to_NT_file_path(wname, filename, BUFFER_SIZE_ELEMENTS(wname))) return false; NULL_TERMINATE_BUFFER(wname); /* be paranoid */ res = nt_create_file(&hf, wname, NULL, 0, SYNCHRONIZE | DELETE, FILE_ATTRIBUTE_NORMAL, FILE_SHARE_DELETE | /* if already deleted */ FILE_SHARE_READ, FILE_OPEN, FILE_SYNCHRONOUS_IO_NONALERT | FILE_DELETE_ON_CLOSE /* This should open a handle on a symlink rather * than its target, and avoid other reparse code. * Otherwise the FILE_DELETE_ON_CLOSE would cause * us to delete the target of a symlink! * FIXME: fully test this: case 10067 */ | FILE_OPEN_REPARSE_POINT); if (!NT_SUCCESS(res)) { LOG(GLOBAL, LOG_NT, 2, "os_delete_mapped_file: unable to open handle to %s: "PFX"\n", filename, res); return false; } /* Try to delete immediately. If the file is mapped in, this will fail * with STATUS_CANNOT_DELETE 0xc0000121. */ file_dispose_info.DeleteFile = TRUE; res = nt_set_file_info(hf, &file_dispose_info, sizeof(file_dispose_info), FileDispositionInformation); if (NT_SUCCESS(res)) deleted = true; else { LOG(GLOBAL, LOG_NT, 2, "os_delete_mapped_file: unable to mark for deletion %s: "PFX"\n", filename, res); /* continue on */ } close_handle(hf); if (!deleted) { /* We can't accurately tell if FILE_DELETE_ON_CLOSE worked but we can try to * open and assume nobody created a new file of the same name. */ res = nt_create_file(&hf, wname, NULL, 0, SYNCHRONIZE, FILE_ATTRIBUTE_NORMAL, FILE_SHARE_DELETE | FILE_SHARE_READ, FILE_OPEN, FILE_SYNCHRONOUS_IO_NONALERT | FILE_OPEN_REPARSE_POINT); LOG(GLOBAL, LOG_NT, 2, "os_delete_mapped_file: opening after close %s: "PFX"\n", filename, res); if (NT_SUCCESS(res)) close_handle(hf); else if (res == STATUS_DELETE_PENDING || res == STATUS_OBJECT_NAME_NOT_FOUND) { deleted = true; } else ASSERT_CURIOSITY(false && "unable to confirm close-on-delete"); } /* FIXME case 10048: if failure here, schedule for smss-on-boot deletion */ return deleted; } /* file_name must already be in NT format */ bool os_delete_file_w(const wchar_t *file_name, HANDLE directory_handle) { NTSTATUS res; HANDLE hf; FILE_DISPOSITION_INFORMATION file_dispose_info; /* XXX: we should be able to use nt_delete_file() but it doesn't take * in a base dir: need to examine all callers. */ res = nt_create_module_file(&hf, file_name, directory_handle, DELETE, FILE_ATTRIBUTE_NORMAL, FILE_SHARE_DELETE | /* if already deleted */ FILE_SHARE_READ | FILE_SHARE_WRITE, FILE_OPEN, 0); /* note that FILE_DELETE_ON_CLOSE will act on the target of a * symbolic link (in Longhorn), while we want to act on the link * itself */ /* this is expected to be called only when a file is in the way */ ASSERT_CURIOSITY(NT_SUCCESS(res) && "can't open for deletion"); if (!NT_SUCCESS(res)) return false; file_dispose_info.DeleteFile = TRUE; res = nt_set_file_info(hf, &file_dispose_info, sizeof(file_dispose_info), FileDispositionInformation); /* close regardless of success */ close_handle(hf); ASSERT_CURIOSITY(NT_SUCCESS(res) && "couldn't mark for deletion"); /* file may have sections mapped (the usual case for DLLs in ASLR cache) */ /* we don't expect to be deleting files that are in use by others */ /* if we had the only handle, the file should be deleted by now */ return NT_SUCCESS(res); } bool os_delete_file(const char *name) { wchar_t wname[MAX_FILE_NAME_LENGTH]; if (!convert_to_NT_file_path(wname, name, BUFFER_SIZE_ELEMENTS(wname))) return false; NULL_TERMINATE_BUFFER(wname); /* be paranoid */ return os_delete_file_w(wname, NULL); } bool os_delete_dir(const char *name) { /* os_delete_file_w() assumes it's not passed a dir so we use nt_delete_file */ wchar_t wname[MAX_FILE_NAME_LENGTH]; NTSTATUS res; if (!convert_to_NT_file_path(wname, name, BUFFER_SIZE_ELEMENTS(wname))) return false; NULL_TERMINATE_BUFFER(wname); /* be paranoid */ res = nt_delete_file(wname); return NT_SUCCESS(res); } /* We take in orig_name instead of a file handle so that we can abstract * away the privileges required to rename a file when opening the handle. * We also do not take in a rootdir handle to be parallel to the linux * system call, so caller must specify full path. * This will not rename a file across volumes. * * see os_rename_file_in_directory() for a Win32-specific interface */ bool os_rename_file(const char *orig_name, const char *new_name, bool replace) { file_t fd = INVALID_FILE; NTSTATUS res; FILE_RENAME_INFORMATION info; wchar_t worig[MAX_FILE_NAME_LENGTH]; if (!convert_to_NT_file_path(info.FileName, new_name, BUFFER_SIZE_ELEMENTS(info.FileName))) return false; NULL_TERMINATE_BUFFER(info.FileName); /* be paranoid */ /* We could use os_open if we added OS_DELETE => DELETE+FILE_OPEN, * but then we couldn't rename directories; ditto for create_file, * so we directly call nt_create_file. */ if (!convert_to_NT_file_path(worig, orig_name, BUFFER_SIZE_ELEMENTS(worig))) return false; NULL_TERMINATE_BUFFER(worig); /* be paranoid */ res = nt_create_file(&fd, worig, NULL, 0, DELETE | SYNCHRONIZE, FILE_ATTRIBUTE_NORMAL, /* need F_S_READ if currently open w/ F_S_READ */ FILE_SHARE_READ | FILE_SHARE_DELETE, FILE_OPEN, /* FILE_SUPERSEDE fails */ /* no FILE_{NON_,}DIRECTORY_FILE */ FILE_SYNCHRONOUS_IO_NONALERT); if (!NT_SUCCESS(res) || fd == INVALID_FILE) { LOG(GLOBAL, LOG_NT, 2, "os_rename_file: unable to open handle to %s: "PFX"\n", orig_name, res); return false; } /* I tried three rename options with NtSetFileInformation: * 1) set FileRenameInformation: works on FAT, NTFS, all platforms * 2) set FileNameInformation: not allowed; only for get * 3) set FileShortNameInformation: I couldn't get this to work, but * was probably missing some privilege; but, only available on NTFS XP+ */ info.ReplaceIfExists = (BOOLEAN) replace; info.RootDirectory = NULL; IF_X64(ASSERT_TRUNCATE(info.FileNameLength, uint, wcslen(info.FileName) * sizeof(wchar_t))); info.FileNameLength = (uint) (wcslen(info.FileName) * sizeof(wchar_t)); res = nt_set_file_info(fd, &info, sizeof(info), FileRenameInformation); /* Renaming will fail if a file handle (other than this one) is open */ if (!NT_SUCCESS(res)) { LOG(GLOBAL, LOG_NT, 2, "os_rename_file: NtSetFileInformation error "PFX"\n", res); } close_handle(fd); return NT_SUCCESS(res); } /* similar to os_rename_file(), but more geared to Windows users * We take in orig_name instead of a file handle, so that we can abstract * away the privileges required to rename a file when opening the handle. * Note however, that any other handle must be closed before calling. * Both names are relative to rootdir handle, since renaming files in * same directory is our primary use. */ bool os_rename_file_in_directory(IN HANDLE rootdir, const wchar_t *orig_name, const wchar_t *new_name, bool replace) { file_t fd = INVALID_FILE; NTSTATUS res; FILE_RENAME_INFORMATION info; res = nt_create_file(&fd, orig_name, rootdir, 0, DELETE | SYNCHRONIZE, FILE_ATTRIBUTE_NORMAL, /* need F_S_READ if currently open w/ F_S_READ */ FILE_SHARE_READ | FILE_SHARE_DELETE, FILE_OPEN, /* FILE_SUPERSEDE fails */ /* no FILE_{NON_,}DIRECTORY_FILE */ FILE_SYNCHRONOUS_IO_NONALERT); if (!NT_SUCCESS(res) || fd == INVALID_FILE) { LOG(GLOBAL, LOG_NT, 2, "os_rename_file: unable to open handle to %s: "PFX"\n", orig_name, res); return false; } info.ReplaceIfExists = (BOOLEAN) replace; info.RootDirectory = rootdir; wcsncpy(info.FileName, new_name, BUFFER_SIZE_ELEMENTS(info.FileName)); NULL_TERMINATE_BUFFER(info.FileName); /* be paranoid */ IF_X64(ASSERT_TRUNCATE(info.FileNameLength, uint, wcslen(info.FileName) * sizeof(wchar_t))); info.FileNameLength = (uint) (wcslen(info.FileName) * sizeof(wchar_t)); res = nt_set_file_info(fd, &info, sizeof(info), FileRenameInformation); /* Renaming will fail if a file handle (other than this one) is open */ if (!NT_SUCCESS(res)) { LOG(GLOBAL, LOG_NT, 2, "os_rename_file_in_directory: NtSetFileInformation error "PFX"\n", res); } close_handle(fd); return NT_SUCCESS(res); } byte * os_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot, map_flags_t map_flags) { NTSTATUS res; HANDLE section; byte *map = addr; uint osprot = memprot_to_osprot(prot); #ifdef X64 bool loop = false; byte *region_start = NULL, *region_end = NULL; uint iters = 0; # define MAX_MAP_LOOP_ITERS 100 #endif LARGE_INTEGER li_offs; li_offs.QuadPart = offs; if (TEST(MAP_FILE_COPY_ON_WRITE, map_flags) && TEST(MEMPROT_WRITE, prot)) { /* Ask for COW for both the section and the view, though we should only * need it for the view (except on win98, according to Richter p604) */ osprot = osprot_add_writecopy(osprot); } res = nt_create_section(&section, SECTION_ALL_ACCESS, /* FIXME: maybe less privileges needed */ NULL, /* full file size, even if partial view map */ osprot, /* can only be SEC_IMAGE if a PE file */ /* FIXME: SEC_RESERVE shouldn't work w/ COW yet * it did in my test */ TEST(MAP_FILE_IMAGE, map_flags) ? SEC_IMAGE : SEC_COMMIT, f, /* process private - no security needed */ /* object name attributes */ NULL /* unnamed */, 0, NULL, NULL); if (!NT_SUCCESS(res)) { LOG(GLOBAL, LOG_NT, 2, "os_map_file: NtCreateSection error "PFX"\n", res); return NULL; } #ifdef X64 if (TEST(MAP_FILE_REACHABLE, map_flags)) { loop = true; vmcode_get_reachable_region(&region_start, &region_end); /* addr need not be NULL: we'll use it if it's in the region */ } while (!loop || (map != NULL && map >= region_start && map+*size <= region_end) || find_free_memory_in_region(region_start, region_end, *size, &map, NULL)) { #endif res = nt_raw_MapViewOfSection(section, /* 0 */ NT_CURRENT_PROCESS, /* 1 */ &map, /* 2 */ 0, /* 3 */ 0 /* not page-file-backed */, /* 4 */ &li_offs, /* 5 */ (PSIZE_T) size, /* 6 */ ViewUnmap /* FIXME: expose? */, /* 7 */ 0 /* no special top-down or anything */, /* 8 */ osprot); /* 9 */ #ifdef X64 if (!loop || NT_SUCCESS(res)) break; if (++iters > MAX_MAP_LOOP_ITERS) { ASSERT_NOT_REACHED(); break; } map = NULL; /* pick a new one */ } if (NT_SUCCESS(res) && TEST(MAP_FILE_REACHABLE, map_flags)) ASSERT(map >= region_start && map+*size <= region_end); #endif /* We do not need to keep the section handle open */ close_handle(section); if (!NT_SUCCESS(res)) { LOG(GLOBAL, LOG_NT, 2, "os_map_file: NtMapViewOfSection error "PFX"\n", res); return NULL; } return map; } bool os_unmap_file(byte *map, size_t size/*unused*/) { int res = nt_raw_UnmapViewOfSection(NT_CURRENT_PROCESS, map); return NT_SUCCESS(res); } /* FIXME : should check context flags, what if only integer or only control! */ /* Translates the context cxt for the given thread trec * Like any instance where a thread_record_t is used by a thread other than its * owner, the caller must hold the thread_initexit_lock to ensure that it * remains valid. * Requires thread trec is at_safe_spot(). * We assume that the segments CS and SS have been set in the cxt properly. */ bool translate_context(thread_record_t *trec, CONTEXT *cxt, bool restore_memory) { priv_mcontext_t mc; bool res; /* ensure we have eip and esp */ ASSERT(TESTALL(CONTEXT_CONTROL/*2 bits so ALL*/, cxt->ContextFlags)); /* really we should have the full state */ ASSERT(TESTALL(CONTEXT_DR_STATE, cxt->ContextFlags)); context_to_mcontext(&mc, cxt); res = translate_mcontext(trec, &mc, restore_memory, NULL); if (res) { /* assuming cs/ss has been set properly */ mcontext_to_context(cxt, &mc, false /* set_cur_seg */); } return res; } /* be careful about args: for windows different versions have different offsets * see SYSCALL_PARAM_OFFSET in win32/os.c. * * This routine is assumed to only be used for NtRaiseException, where changes * to regs or even the stack will be unrolled or else the app will exit: * i.e., there is no need to restore the changes ourselves. */ static void set_mcontext_for_syscall(dcontext_t *dcontext, int sys_enum, #ifdef X64 reg_t arg1, reg_t arg2, reg_t arg3 #else reg_t sys_arg, size_t args_size #endif ) { priv_mcontext_t *mc = get_mcontext(dcontext); #ifdef X64 LOG(THREAD, LOG_SYSCALLS, 2, "issue_last_system_call_from_app(0x%x, "PFX" "PFX" "PFX")\n", syscalls[sys_enum], arg1, arg2, arg3); #else LOG(THREAD, LOG_SYSCALLS, 2, "issue_last_system_call_from_app(0x%x, "PFX")\n", syscalls[sys_enum], sys_arg); #endif mc->xax = syscalls[sys_enum]; if (get_syscall_method() == SYSCALL_METHOD_WOW64 && syscall_uses_wow64_index()) { mc->xcx = wow64_index[sys_enum]; } #ifdef X64 mc->xcx = arg1; mc->xdx = arg2; mc->r8 = arg3; #else if (syscall_uses_edx_param_base()) mc->xdx = sys_arg; else { /* The syscall itself is going to write to the stack for its call * so go ahead and push the args. See comment up top about not * needing to restore the stack. */ mc->xsp -= args_size; if (!safe_write((byte *)mc->xsp, args_size, (const void *)sys_arg)) { SYSLOG_INTERNAL_WARNING("failed to store args for NtRaiseException"); /* just keep going I suppose: going to crash though w/ uninit args */ } } #endif } /* raise an exception in the application context */ /* FIXME : see os_forge_exception's call of this function for issues */ void os_raise_exception(dcontext_t *dcontext, EXCEPTION_RECORD* pexcrec, CONTEXT* pcontext) { #ifdef X64 set_mcontext_for_syscall(dcontext, SYS_RaiseException, (reg_t)pexcrec, (reg_t)pcontext, (reg_t)true); #else /* ZwRaiseException arguments */ struct _raise_exception_arguments_t { PEXCEPTION_RECORD ExceptionRecord; PCONTEXT Context; DWORD SearchFrames; } raise_exception_arguments = {pexcrec, pcontext, true}; /* NOTE this struct stays on dstack when the syscall is executed! */ /* args are on our stack so offset bytes are valid, we won't return * here so is ok if os clobbers them, though it won't since natively * they hold return addresses */ reg_t arg_pointer = (reg_t) ((ptr_uint_t)&raise_exception_arguments) - SYSCALL_PARAM_OFFSET(); set_mcontext_for_syscall(dcontext, SYS_RaiseException, arg_pointer, sizeof(raise_exception_arguments) + SYSCALL_PARAM_OFFSET()), #endif issue_last_system_call_from_app(dcontext); ASSERT_NOT_REACHED(); } /*************************************************************************** * CORE DUMPS */ /* all static vars here are not persistent across cache execution, so unprot */ START_DATA_SECTION(NEVER_PROTECTED_SECTION, "w"); static char dump_core_buf[256] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; /* protected by dumpcore_lock */ static char dump_core_file_name[MAXIMUM_PATH] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; /* protected by dumpcore_lock */ static void os_dump_core_segment_info(file_t file, HANDLE h, ULONG selector, const char *name) { NTSTATUS res; DESCRIPTOR_TABLE_ENTRY entry = {0,}; entry.Selector = selector; res = query_seg_descriptor(h, &entry); /* This feature from PR 212905 does not work on x64 b/c there is no * support for the underlying system call: we get STATUS_NOT_IMPLEMENTED. */ if (NT_SUCCESS(res)) { snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "%s=0x%04x (0x%08x 0x%08x)\n", name, entry.Selector, /* print the raw bits in the descriptor */ *((PULONG)&entry.Descriptor), *(((PULONG)&entry.Descriptor)+1)); } else { snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "%s=0x%04x\n", name, entry.Selector); } NULL_TERMINATE_BUFFER(dump_core_buf); os_write(file, dump_core_buf, strlen(dump_core_buf)); } static void os_dump_core_dump_thread(file_t file, thread_id_t tid, TEB *teb, HANDLE h, int handle_rights, CONTEXT *cxt, dcontext_t *dcontext) { app_pc win32_start_addr = 0; /* for x64, FIXME PR 249988: need to coordinate w/ ldmp.c */ snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "Thread="PFX"\nTEB="PFX"\n" "HandleRights=0x%08x\n" "Eax="PFX", Ebx="PFX", Ecx="PFX", Edx="PFX"\n" "Esi="PFX", Edi="PFX", Esp="PFX", Ebp="PFX"\n" "EFlags="PFX", Eip="PFX"\n", tid, teb, handle_rights, cxt->CXT_XAX, cxt->CXT_XBX, cxt->CXT_XCX, cxt->CXT_XDX, cxt->CXT_XSI, cxt->CXT_XDI, cxt->CXT_XSP, cxt->CXT_XBP, cxt->CXT_XFLAGS, cxt->CXT_XIP); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(file, dump_core_buf, strlen(dump_core_buf)); /* print segment selectors and associated descriptors */ os_dump_core_segment_info(file, h, cxt->SegCs, "Cs"); os_dump_core_segment_info(file, h, cxt->SegSs, "Ss"); os_dump_core_segment_info(file, h, cxt->SegDs, "Ds"); os_dump_core_segment_info(file, h, cxt->SegEs, "Es"); os_dump_core_segment_info(file, h, cxt->SegFs, "Fs"); os_dump_core_segment_info(file, h, cxt->SegGs, "Gs"); /* Print the win32 start address. This is saved away in the * dcontext when the thread is created. */ if (dcontext != NULL) { win32_start_addr = dcontext->win32_start_addr; } /* if the dcontext is unavailable, use the syscall */ else { NTSTATUS res = query_win32_start_addr(h, &win32_start_addr); ASSERT(NT_SUCCESS(res) && "failed to obtain win32 start address"); } snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "Win32StartAddr="PFX"\n", win32_start_addr); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(file, dump_core_buf, strlen(dump_core_buf)); } #pragma warning( push ) /* warning is from GET_OWN_CONTEXT: flow in/out of asm code suppresses global opt */ #pragma warning( disable : 4740) static bool os_dump_core_live_dump(const char *msg, char *path OUT, size_t path_sz) { /* like the dump_core_buf, all the locals are protected by the * dumpcore_lock and are static to save stack space (CONTEXT is quite * sizable) */ static file_t dmp_file VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = INVALID_FILE; static thread_record_t *tr VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = NULL; static thread_record_t *my_tr VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = NULL; static int i VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0; static thread_id_t my_id VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0; static bool have_all_threads_lock VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = false; static PBYTE pb VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = NULL; static MEMORY_BASIC_INFORMATION mbi VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; static CONTEXT cxt VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; DEBUG_DECLARE(static bool suspend_failures VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = false;) /* initialize */ pb = NULL; have_all_threads_lock = false; my_id = get_thread_id(); my_tr = NULL; /* We should eventually add xmm regs to ldmp and use CONTEXT_DR_STATE here * (xref PR 264138) */ cxt.ContextFlags = CONTEXT_CONTROL|CONTEXT_INTEGER|CONTEXT_SEGMENTS; /* get logfile */ /* use no option synch for syslogs to avoid grabbing locks and risking * deadlock, caller should have synchronized already anyways */ if (!get_unique_logfile(".ldmp", dump_core_file_name, sizeof(dump_core_file_name), false, &dmp_file) || dmp_file == INVALID_FILE) { SYSLOG_INTERNAL_NO_OPTION_SYNCH(SYSLOG_WARNING, "Unable to open core dump file"); return false; } /* Write message */ if (msg != NULL) { size_t length = strlen(msg); /* we start with length of message to make parsing easier */ snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), PFX"\n", length+1 /* +1 for the \n */); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(dmp_file, dump_core_buf, strlen(dump_core_buf)); os_write(dmp_file, msg, length); os_write(dmp_file, "\n", 1); } /* synch with all threads */ /* Don't use get_list_of_threads, it grabs a lock and allocates memory * both of which might be dangerous on this path, instead walk table * by hand (we try to grab the necessary locks, but we will go ahead * and walk the table if we can't FIXME) * FIXME : share with dynamo.c */ /* Try to grab locks, * NOTE os_dump_core, already turned off deadlock_avoidance for us */ #ifdef DEADLOCK_AVOIDANCE /* ref case 4174, deadlock avoidance will assert if we try to grab a lock * we already own, even if its only a trylock and even if the option is * turned off! We hack around it here */ if (all_threads_lock.owner == get_thread_id()) { LOG(GLOBAL, LOG_ALL, 1, "WARNING : live dump, faulting thread already owns the all_threads lock, " "let's hope things are consistent\n"); } else { #endif for (i=0; i < 100 /* arbitrary num */; i++) { if (mutex_trylock(&all_threads_lock)) { have_all_threads_lock = true; break; } else { os_thread_yield(); } } DODEBUG({ if (!have_all_threads_lock) { LOG(GLOBAL, LOG_ALL, 1, "WARNING : live dump unable to grab all_threads lock, " "continuing without it\n"); } }); #ifdef DEADLOCK_AVOIDANCE } #endif /* print out peb address */ snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "PEB="PFX"\n", get_own_peb()); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(dmp_file, dump_core_buf, strlen(dump_core_buf)); /* print out DR address */ snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "dynamorio.dll="PFX"\n", get_dynamorio_dll_start()); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(dmp_file, dump_core_buf, strlen(dump_core_buf)); /* for all threads, suspend and dump context */ /* FIXME : do we care about segment, sse, float, or debug registers? */ /* Do current thread first, first get thread record */ if (all_threads != NULL) { for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) { for (tr = all_threads[i]; tr != NULL; tr = tr->next) { if (tr->id == my_id) my_tr = tr; } } } GET_OWN_CONTEXT(&cxt); os_dump_core_dump_thread(dmp_file, my_id, get_own_teb(), NT_CURRENT_THREAD, my_tr != NULL ? nt_get_handle_access_rights(my_tr->handle) : 0, &cxt, my_tr != NULL ? my_tr->dcontext : NULL); /* now walk all threads, skipping current thread */ if (all_threads != NULL) { for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) { for (tr = all_threads[i]; tr != NULL; tr = tr->next) { if (tr->id != my_id) { ACCESS_MASK handle_rights = nt_get_handle_access_rights(tr->handle); TEB *teb_addr = get_teb(tr->handle); DEBUG_DECLARE(bool res = ) os_thread_suspend(tr, 0); /* we can't assert here (could infinite loop) */ DODEBUG({ suspend_failures = suspend_failures || !res; }); if (thread_get_context(tr, &cxt)) { os_dump_core_dump_thread(dmp_file, tr->id, teb_addr, tr->handle, handle_rights, &cxt, tr->dcontext); } else { snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "Thread=0x%08x\nTEB="PFX"\n" "HandleRights=0x%08x\n" "<error state not available>\n\n", tr->id, teb_addr, handle_rights); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(dmp_file, dump_core_buf, strlen(dump_core_buf)); } } } } } else { char *msg = "<error all threads list is already freed>"; os_write(dmp_file, msg, strlen(msg)); /* FIXME : if other threads are active (say in the case of detaching) * walking the memory below could be racy, what if another thread * frees some chunk of memory while we are copying it! Just live with * the race for now. */ } /* dump memory */ /* FIXME : print_ldr_data() ? */ while (query_virtual_memory(pb, &mbi, sizeof(mbi)) == sizeof(mbi)) { snprintf(dump_core_buf, BUFFER_SIZE_ELEMENTS(dump_core_buf), "\n" "BaseAddress="PFX"\n" "AllocationBase="PFX"\n" "AllocationProtect=0x%08x %s\n" "RegionSize=0x%08x\n" "State=0x%08x %s\n" "Protect=0x%08x %s\n" "Type=0x%08x %s\n", mbi.BaseAddress, mbi.AllocationBase, mbi.AllocationProtect, prot_string(mbi.AllocationProtect), mbi.RegionSize, mbi.State, mem_state_string(mbi.State), mbi.Protect, prot_string(mbi.Protect), mbi.Type, mem_type_string(mbi.Type)); NULL_TERMINATE_BUFFER(dump_core_buf); os_write(dmp_file, dump_core_buf, strlen(dump_core_buf)); if (mbi.State == MEM_COMMIT && !TEST(PAGE_GUARD, mbi.Protect) && prot_is_readable(mbi.Protect)) { os_write(dmp_file, mbi.BaseAddress, mbi.RegionSize); } if (POINTER_OVERFLOW_ON_ADD(pb, mbi.RegionSize)) break; pb += mbi.RegionSize; } /* dump handles */ { /* see Nebbett examples 1.2 and 2.1, may not be able to do this * in the general case one methodolgy requires the debug privilege * the other requires that a global flag is set at boot time * FIXME */ } /* end dump, forensics file will have call stacks and module list */ /* unsynch with threads */ if (all_threads != NULL) { for (i = 0; i < HASHTABLE_SIZE(ALL_THREADS_HASH_BITS); i++) { for (tr = all_threads[i]; tr != NULL; tr = tr->next) { if (tr->id != my_id) { /* we assume that if a suspend failed, the corresponding * resume will also fail -- o/w we could end up resuming * a thread that a caller suspended! */ DEBUG_DECLARE(bool res = ) os_thread_resume(tr); /* we can't assert here (could infinite loop) */ DODEBUG({ suspend_failures = suspend_failures || !res; }); } } } } /* cleanup */ if (have_all_threads_lock) mutex_unlock(&all_threads_lock); close_file(dmp_file); /* write an event indicating the file was created */ SYSLOG_NO_OPTION_SYNCH(SYSLOG_INFORMATION, LDMP, 3, get_application_name(), get_application_pid(), dump_core_file_name); if (path != NULL) { strncpy(path, dump_core_file_name, path_sz); path[path_sz-1] = '\0'; } DODEBUG({ if (suspend_failures) { SYSLOG_INTERNAL_NO_OPTION_SYNCH(SYSLOG_ERROR, "suspend/resume failures during ldmp creation"); } }); return true; } #pragma warning( pop ) #ifdef INTERNAL static void os_dump_core_external_dump() { /* static buffers save stack space, this is do-once anyway, protected by * dumpcore_lock from os_dump_core() */ static char oncrash_var[MAXIMUM_PATH] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; static wchar_t oncrash_cmdline[MAXIMUM_PATH] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; static wchar_t oncrash_exe[MAXIMUM_PATH] VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = {0,}; /* the ONCRASH key tells us exactly what to launch, with our pid appended */ int retval = get_parameter(PARAM_STR(DYNAMORIO_VAR_ONCRASH), oncrash_var, sizeof(oncrash_var)); if (IS_GET_PARAMETER_SUCCESS(retval)) { HANDLE child; /* ASSUMPTION: no spaces in exe name, should be ok since only developers will * specify a name for this key, everyone else will use tools */ char *c = strchr(oncrash_var, ' '); if (c == NULL) c = oncrash_var + strlen(oncrash_var); ASSERT(c - oncrash_var < sizeof(oncrash_exe)/sizeof(wchar_t)); snwprintf(oncrash_exe, c - oncrash_var, L"%hs", oncrash_var); oncrash_exe[c - oncrash_var] = L'\0'; snwprintf(oncrash_cmdline, sizeof(oncrash_cmdline)/sizeof(wchar_t), L"%hs %hs", oncrash_var, get_application_pid()); NULL_TERMINATE_BUFFER(oncrash_cmdline); SYSLOG_INTERNAL_INFO("Thread %d dumping core via \"%ls\"", get_thread_id(), oncrash_cmdline); child = create_process(oncrash_exe, oncrash_cmdline); if (child != INVALID_HANDLE_VALUE) { /* wait for child to exit * FIXME: this makes ntsd have to do a 30-second wait to break in! * plus it causes drwtsn32 to hang, then timeout and kill us * w/o producing a dump file -- and only the header on the log file * BUT, if we don't do this, we only get dumps for -kill_thread! */ nt_wait_event_with_timeout(child, INFINITE_WAIT); close_handle(child); } else SYSLOG_INTERNAL_WARNING("Unable to dump core via \"%ls\"", oncrash_cmdline); } else { SYSLOG_INTERNAL_WARNING("Unable to dump core due to missing parameter"); } } #endif /* INTERNAL */ /* return value is mostly about the ldmp, for dr_create_memory_dump */ static bool os_dump_core_internal(const char *msg, bool live_only, char *path OUT, size_t path_sz) { static thread_id_t current_dumping_thread_id VAR_IN_SECTION(NEVER_PROTECTED_SECTION) = 0; bool res = true; thread_id_t current_id = get_thread_id(); #ifdef DEADLOCK_AVOIDANCE dcontext_t *dcontext = get_thread_private_dcontext(); thread_locks_t * old_thread_owned_locks = NULL; #endif if (current_id == current_dumping_thread_id) return false; /* avoid infinite loop */ /* FIXME : A failure in the mutex_lock or mutex_unlock of the * dump_core_lock could lead to an infinite recursion, also a failure while * holding the eventlog_lock would lead to a deadlock at the syslog in * livedump (but we would likely deadlock later anyways), all other * recursion/deadlock cases should be handled by the above check */ #ifdef DEADLOCK_AVOIDANCE /* first turn off deadlock avoidance for this thread (needed for live dump * to try to grab all_threads and thread_initexit locks) */ if (dcontext != NULL) { old_thread_owned_locks = dcontext->thread_owned_locks; dcontext->thread_owned_locks = NULL; } #endif /* only allow one thread to dumpcore at a time, also protects static * buffers and current_dumping_thread_id */ mutex_lock(&dump_core_lock); current_dumping_thread_id = current_id; if (live_only || DYNAMO_OPTION(live_dump)) { res = os_dump_core_live_dump(msg, path, path_sz); } #ifdef INTERNAL /* not else-if, allow to be composable */ if (!live_only && DYNAMO_OPTION(external_dump)) { os_dump_core_external_dump(); } #endif current_dumping_thread_id = 0; mutex_unlock(&dump_core_lock); #ifdef DEADLOCK_AVOIDANCE /* restore deadlock avoidance for this thread */ if (dcontext != NULL) { dcontext->thread_owned_locks = old_thread_owned_locks; } #endif return res; } void os_dump_core(const char *msg) { os_dump_core_internal(msg, false, NULL, 0); } bool os_dump_core_live(const char *msg, char *path OUT, size_t path_sz) { return os_dump_core_internal(msg, true/*live only*/, path, path_sz); } /* back to normal section */ END_DATA_SECTION() /***************************************************************************/ /***************************************************************************/ /* detaching routines */ static bool internal_detach = false; /* Handle any outstanding callbacks. * * For sysenter system calls the kernel callback return returns to a known fixed * location that does a ret. To regain control we have overwritten the return * address on the stack to point back to the after syscall location and need to restore * the original target here. * * For all other types of system calls the kernel will return the instruction after * the system call which is in our generated code. We allocate a piece of thread * shared code here followed by an array of thread private detach_callback_stack_ts and * an array of the callback return addresses. We redirect all after syscall locations * to that shared code which then dispatches on thread_id to find the proper * detach_callback_stack_t, get the right return address from it and then jmp to it. * * Returns true if there are outstanding non-sysenter callbacks. */ /* XXX: should we give each thread private code its own top heap_mmap so * that can be left behind to reduce the memory hit? */ bool detach_handle_callbacks(int num_threads, thread_record_t **threads, bool *cleanup_tpc /* array of size num_threads */) { int i, num_threads_with_callbacks = 0, num_stacked_callbacks = 0; /* First walk counts the number of threads with outstanding callbacks and the number * of stacked callbacks (and also fixes the stack for sysenter system calls) so we * now how much memory to allocate for non-sysenter system calls. */ for (i = 0; i < num_threads; i++) { dcontext_t *dcontext = threads[i]->dcontext; cleanup_tpc[i] = true; /* default to clean up */ if (dcontext->prev_unused != NULL && dcontext->prev_unused->valid) { dcontext_t *tmp_dc = dcontext->prev_unused; int count = 0; LOG(GLOBAL, LOG_ALL, 1, "Detach : thread "TIDFMT" has stacked callbacks\n", threads[i]->id); do { count++; LOG(GLOBAL, LOG_ALL, 1, "callback %d has ret pc "PFX"\n", count, POST_SYSCALL_PC(tmp_dc)); ASSERT(POST_SYSCALL_PC(tmp_dc) != NULL && !is_dynamo_address(POST_SYSCALL_PC(tmp_dc))); if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && INTERNAL_OPTION(detach_fix_sysenter_on_stack)) { /* Fix up our stack modifications. Since the kernel returns to a * fixed location this is all we need to do to restore app state. * Note that shared syscall saves xsp for us, so xsp should be * correct. */ ASSERT(*((app_pc *)get_mcontext(tmp_dc)->xsp) == after_do_syscall_code(dcontext) || *((app_pc *)get_mcontext(tmp_dc)->xsp) == after_shared_syscall_code(dcontext)); /* fix return address */ LOG(GLOBAL, LOG_ALL, 1, "callback %d patching stack address "PFX" from "PFX" to "PFX"\n", get_mcontext(tmp_dc)->xsp, *((app_pc *)get_mcontext(tmp_dc)->xsp), POST_SYSCALL_PC(tmp_dc)); *((app_pc *)get_mcontext(tmp_dc)->xsp) = POST_SYSCALL_PC(tmp_dc); if (DYNAMO_OPTION(sygate_sysenter)) { *((app_pc *)(get_mcontext(tmp_dc)->xsp+XSP_SZ)) = dcontext->sysenter_storage; } } tmp_dc = tmp_dc->prev_unused; } while (tmp_dc != NULL && tmp_dc->valid); num_threads_with_callbacks++; num_stacked_callbacks += count; /* can't free thread private syscall code if not SYSENTER since kernel * will return to there */ cleanup_tpc[i] = (get_syscall_method() == SYSCALL_METHOD_SYSENTER && INTERNAL_OPTION(detach_fix_sysenter_on_stack)); LOG(GLOBAL, LOG_ALL, 1, "Detach : thread "TIDFMT" had %d stacked callbacks\n", threads[i]->id, count); } else { /* no saved callback state, done with this thread */ LOG(GLOBAL, LOG_ALL, 1, "Detach : thread "TIDFMT" has no stacked callbacks\n", threads[i]->id); } } /* Second walk (only needed for non-sysenter systemcalls). Allocate and populate * the callback dispatch code and data structures. */ if (num_stacked_callbacks > 0 && (get_syscall_method() != SYSCALL_METHOD_SYSENTER || !INTERNAL_OPTION(detach_fix_sysenter_on_stack))) { /* callback handling buf layout * { * byte dispatch_code[DETACH_CALLBACK_CODE_SIZE]; * detach_callback_stack_t per_thread[num_threads_with_callbacks] * app_pc callback_addrs[num_stacked_callbacks] * } * Not a real struct since variable size arrays. Note that nothing requires the * above elements to be in that order (or even in the same allocation). We * allocate them together to save memory since we must leak this. FIXME - find * a way to free the allocation once we are finished with it. */ int callback_buf_size = DETACH_CALLBACK_CODE_SIZE + num_threads_with_callbacks * sizeof(detach_callback_stack_t) + num_stacked_callbacks * sizeof(app_pc); /* FIXME - this should (along with any do/shared syscall containing gencode) be * allocated outside of our vmmheap so that we can free the vmmheap reservation * on detach. */ byte *callback_buf = (byte *)heap_mmap(callback_buf_size, VMM_SPECIAL_MMAP); detach_callback_stack_t *per_thread = (detach_callback_stack_t *)(callback_buf + DETACH_CALLBACK_CODE_SIZE); app_pc *callback_addrs = (app_pc *)(&per_thread[num_threads_with_callbacks]); int j = 0; /* per_thread index */ emit_detach_callback_code(GLOBAL_DCONTEXT, callback_buf, per_thread); #ifdef X64 /* we only emit shared/do_syscall in shared_code on 64-bit */ arch_patch_syscall(GLOBAL_DCONTEXT, callback_buf); /* patch the shared syscalls */ #endif for (i = 0; i < num_threads; i++) { dcontext_t *dcontext = threads[i]->dcontext; if (dcontext->prev_unused != NULL && dcontext->prev_unused->valid) { dcontext_t *tmp_dc = dcontext->prev_unused; arch_patch_syscall(dcontext, callback_buf); emit_detach_callback_final_jmp(dcontext, &(per_thread[j])); per_thread[j].callback_addrs = callback_addrs; per_thread[j].tid = dcontext->owning_thread; per_thread[j].count = 0; /* NOTE - we are walking the stacked dcontexts in reverse order * (see callback.c, the last dcontext is considered the top of the * stack). This is ok since our emitted code expects this. */ do { *callback_addrs++ = POST_SYSCALL_PC(tmp_dc); ASSERT((byte *)callback_addrs - (byte *)per_thread <= callback_buf_size); per_thread[j].count++; tmp_dc = tmp_dc->prev_unused; } while (tmp_dc != NULL && tmp_dc->valid); j++; } } ASSERT(j == num_threads_with_callbacks); return true; } return false; } void detach_remove_image_entry_hook(int num_threads, thread_record_t **threads) { /* If we hooked the image entry point and haven't unhooked it yet * we do so now. We can tell from the callback hack: look for a thread with * LOST_CONTROL_AT_CALLBACK in the under_dynamo_control bool. */ bool did_unhook = false; int i; for (i = 0; i < num_threads; i++) { if (IS_UNDER_DYN_HACK(threads[i]->under_dynamo_control)) { LOG(GLOBAL, LOG_ALL, 1, "Detach : unpatching image entry point (from thread "TIDFMT")\n", threads[i]->id); ASSERT(!did_unhook); /* should only happen once, at most! */ did_unhook = true; remove_image_entry_trampoline(); } } if (!did_unhook) { /* case 9347/9475 if detaching before we have taken over the primary thread */ if (dr_injected_secondary_thread && !dr_late_injected_primary_thread) { LOG(GLOBAL, LOG_ALL, 1, "Detach : unpatching image entry point (from primary)\n"); did_unhook = true; /* note that primary thread is unknown and therefore not suspended */ remove_image_entry_trampoline(); } } } bool detach_do_not_translate(thread_record_t *tr) { if (IS_UNDER_DYN_HACK(tr->under_dynamo_control)) { LOG(GLOBAL, LOG_ALL, 1, "Detach : thread "TIDFMT" running natively since lost control at callback " "return and have not regained it, no need to translate context\n", tr->id); /* We don't expect to be at do_syscall (and therefore require translation * even though native) since we should've re-taken over by then. */ DOCHECK(1, { priv_mcontext_t mc; bool res = thread_get_mcontext(tr, &mc); ASSERT(res); ASSERT(!is_at_do_syscall(tr->dcontext, (app_pc)mc.pc, (byte *)mc.xsp)); }); return true; } return false; } void detach_finalize_translation(thread_record_t *tr, priv_mcontext_t *mc) { dcontext_t *dcontext = tr->dcontext; /* Handle special case of vsyscall, need to hack the return address * on the stack as part of the translation. */ if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && mc->pc == (app_pc) vsyscall_after_syscall) { ASSERT(get_os_version() >= WINDOWS_VERSION_XP); /* handle special case of vsyscall */ /* case 5441 Sygate hack means after_syscall will be at * esp+4 (esp will point to sysenter_ret_address in ntdll) */ if (*(cache_pc *)(mc->xsp + (DYNAMO_OPTION(sygate_sysenter) ? XSP_SZ : 0)) == after_do_syscall_code(dcontext) || *(cache_pc *)(mc->xsp + (DYNAMO_OPTION(sygate_sysenter) ? XSP_SZ : 0)) == after_shared_syscall_code(dcontext)) { LOG(GLOBAL, LOG_ALL, 1, "Detach : thread "TIDFMT" suspended at vsysall with ret to after " "shared syscall, fixing up by changing ret to "PFX"\n", tr->id, POST_SYSCALL_PC(dcontext)); /* need to restore sysenter_storage for Sygate hack */ if (DYNAMO_OPTION(sygate_sysenter)) *(app_pc *)(mc->xsp+XSP_SZ) = dcontext->sysenter_storage; *(app_pc *)mc->xsp = POST_SYSCALL_PC(dcontext); } else { LOG(GLOBAL, LOG_ALL, 1, "Detach, thread "TIDFMT" suspended at vsyscall with ret to " "unknown addr, must be running native!\n", tr->id); } } } void detach_finalize_cleanup(void) { #ifndef DEBUG /* for debug, os_slow_exit() will zero the slots for us; else we must do it */ tls_cfree(true/*need to synch*/, (uint) tls_local_state_offs, TLS_NUM_SLOTS); #endif } /* Note: detaching is not transparent while suspending since suspend count * will be different (and the number of threads if a non-app-API-triggered detach). */ void detach_helper(int detach_type) { dcontext_t *my_dcontext = get_thread_private_dcontext(); /* Caller (generic_nudge_handler) should have already checked these and * verified the nudge is valid. */ ASSERT(my_dcontext != NULL); if (my_dcontext == NULL) return; ASSERT(detach_type < DETACH_NORMAL_TYPE || ((my_dcontext != NULL && my_dcontext->whereami == WHERE_FCACHE) || /* If detaching in thin_client/hotp_only mode, must only be WHERE_APP! */ (RUNNING_WITHOUT_CODE_CACHE() && my_dcontext->whereami == WHERE_APP))); detach_on_permanent_stack(internal_detach, detach_type != DETACH_BAD_STATE_NO_CLEANUP); if (detach_type == DETACH_BAD_STATE_NO_CLEANUP) { SYSLOG_INTERNAL_WARNING("finished detaching, skipping cleanup"); /* do a quick exit, skipping all cleanup except eventlog */ eventlog_fast_exit(); /* We don't even unload our dll since it's no longer required to unload * our dll for proper tools function. */ /* FIXME : since we reached detach_helper via a clean call out of the * cache, if we return we will return back into the cache! It would be * cleaner for the thread to die by returning from its start function, * but to avoid complications we just kill it here. */ /* NOTE - ref case 4923 (2k3sp1 doesn't free the LdrLock when the owning * thread dies unlike earlier versions). With the fix for that case we * should no longer be holding any application locks at this point. */ nt_terminate_thread(NT_CURRENT_THREAD, 0); ASSERT_NOT_REACHED(); return; } /* FIXME : unload dll, be able to have thread continue etc. */ /* FIXME : since we reached detach_helper via a clean call out of the * cache, if we return we will return back into the cache! It would be * cleaner for the thread to die by returning from its start function, * but to avoid complications we just kill it here. */ /* NOTE - ref case 4923 (2k3sp1 doesn't free the LdrLock when the owning * thread dies unlike earlier versions). With the fix for that case we * should no longer be holding any application locks at this point. */ nt_terminate_thread(NT_CURRENT_THREAD, 0); ASSERT_NOT_REACHED(); return; } /* FIXME : we create a thread to do the detaching, and all other dlls will * be notifed of its creation by dll_thread_attach, is a transparency issue. */ /* sets detach in motion and then returns */ void detach_internal() { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); internal_detach = true; /* we go ahead and re-protect though detach thread will soon un-prot */ SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); LOG(GLOBAL, LOG_ALL, 1, "Starting detach\n"); nudge_internal(get_process_id(), NUDGE_GENERIC(detach), NULL, 0 /* ignored */, 0); LOG(GLOBAL, LOG_ALL, 1, "Created detach thread\n"); } /* mcontext must be valid, including the pc field (native) and app_errno * must not be holding any locks */ /* sets detach in motion and never returns */ void detach_internal_synch() { dcontext_t *dcontext = get_thread_private_dcontext(); detach_internal(); /* to be safe with flush */ enter_threadexit(dcontext); /* make sure we spin forever */ adjust_wait_at_safe_spot(dcontext, 1); check_wait_at_safe_spot(dcontext, THREAD_SYNCH_VALID_MCONTEXT); } bool is_thread_currently_native(thread_record_t *tr) { return (!tr->under_dynamo_control || /* start/stop doesn't change under_dynamo_control and has its own field */ (tr->dcontext != NULL && tr->dcontext->currently_stopped) || IS_UNDER_DYN_HACK(tr->under_dynamo_control)); } /* contended path of mutex operations */ bool ksynch_var_initialized(HANDLE *event) { return (*event != NULL); } static contention_event_t mutex_get_contended_event(contention_event_t *contended_event, EVENT_TYPE event_type) { contention_event_t ret = *contended_event; if (ret == NULL) { contention_event_t new_event; bool not_yet_created; /* not signaled */ /* EVENT_ALL_ACCESS, although observed access mask 0x100003 (SYNCHRONIZE|0x3) */ new_event = nt_create_event(event_type); not_yet_created = atomic_compare_exchange_ptr((ptr_uint_t*)contended_event, (ptr_uint_t)NULL, (ptr_uint_t)new_event); if (not_yet_created) { /* we were first to create it */ ret = new_event; } else { /* already created by someone else */ ret = *contended_event; close_handle(new_event); } } ASSERT(ksynch_var_initialized(&ret)); return ret; } void mutex_free_contended_event(mutex_t *lock) { os_close(lock->contended_event); } /* common wrapper that also attempts to detect deadlocks. Returns false on * timeout, true on signalled. * * A 0 timeout_ms means to wait forever. * A non-NULL mc will mark this thread safe to suspend and transfer; setting mc * requires a non-NULL dcontext to be passed. */ static bool os_wait_event(event_t e, int timeout_ms _IF_CLIENT_INTERFACE(bool set_safe_for_synch) _IF_CLIENT_INTERFACE(dcontext_t *dcontext) _IF_CLIENT_INTERFACE(priv_mcontext_t *mc)) { wait_status_t res; bool reported_timeout = false; LARGE_INTEGER timeout; #ifdef CLIENT_INTERFACE if (mc != NULL) { ASSERT(dcontext != NULL); *get_mcontext(dcontext) = *mc; } #endif KSTART(wait_event); /* we allow using this in release builds as well */ if (timeout_ms == 0 && DYNAMO_OPTION(deadlock_timeout) > 0) { timeout.QuadPart= - ((int)DYNAMO_OPTION(deadlock_timeout)) * TIMER_UNITS_PER_MILLISECOND; #ifdef CLIENT_INTERFACE /* if set_safe_for_synch dcontext must be non-NULL */ ASSERT(!set_safe_for_synch || dcontext != NULL); if (set_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = true; if (mc != NULL) set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT); #endif res = nt_wait_event_with_timeout(e, &timeout /* debug timeout */); #ifdef CLIENT_INTERFACE if (set_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = false; if (mc != NULL) set_synch_state(dcontext, THREAD_SYNCH_NONE); #endif if (res == WAIT_SIGNALED) { KSTOP(wait_event); return true; /* all went well */ } ASSERT(res == WAIT_TIMEDOUT); /* We could use get_own_peb()->BeingDebugged to determine whether * there was a debugger, but we can't just ignore this. It's better * to explicitly overwrite the hidden DO_ONCE variable from a debugging * session if this is getting in the way. */ /* FIXME - instead of DO_ONCE we may want a named static variable that * we can access easily from the debugger. */ DO_ONCE({ reported_timeout = true; report_dynamorio_problem(NULL, DUMPCORE_TIMEOUT, NULL, NULL, "Timeout expired - 1st wait, possible deadlock " "(or you were debugging)"); /* do a 2nd wait so we can get two dumps to compare for progress */ /* FIXME - use shorter timeout for the 2nd wait? */ res = nt_wait_event_with_timeout(e, &timeout /* debug timeout */); if (res == WAIT_SIGNALED) { /* 2nd wait succeeded! We must not have been really deadlocked. * Syslog a warning to ignore the first ldmp and continue. */ /* FIXME - should we reset the DO_ONCE now? */ /* FIXME - should this be a report_dynamorio_problem or some * such so is more useful in release builds? */ SYSLOG_INTERNAL_WARNING("WARNING - 2nd wait after deadlock timeout " "expired succeeded! Not really deadlocked."); KSTOP(wait_event); return true; } ASSERT(res == WAIT_TIMEDOUT); report_dynamorio_problem(NULL, DUMPCORE_TIMEOUT, NULL, NULL, "Timeout expired - 2nd wait, possible deadlock " "(or you were debugging)"); }); } /* fallback to waiting forever */ #ifdef CLIENT_INTERFACE if (set_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = true; if (mc != NULL) set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT); #endif if (timeout_ms > 0) timeout.QuadPart= -timeout_ms * TIMER_UNITS_PER_MILLISECOND; res = nt_wait_event_with_timeout(e, timeout_ms > 0 ? &timeout : INFINITE_WAIT); #ifdef CLIENT_INTERFACE if (set_safe_for_synch) dcontext->client_data->client_thread_safe_for_synch = false; if (mc != NULL) set_synch_state(dcontext, THREAD_SYNCH_NONE); #endif if (reported_timeout) { /* Our wait eventually succeeded so not truly a deadlock. Syslog a * warning to that effect. */ /* FIXME - should we reset the DO_ONCE now? */ /* FIXME - should this be a report_dynamorio_problem or some * such so is more useful in release builds? */ SYSLOG_INTERNAL_WARNING("WARNING - Final wait after reporting deadlock timeout " "expired succeeded! Not really deadlocked."); } KSTOP(wait_event); return (res == WAIT_SIGNALED); } #endif /* !NOT_DYNAMORIO_CORE_PROPER */ wait_status_t os_wait_handle(HANDLE h, uint64 timeout_ms) { LARGE_INTEGER li; LARGE_INTEGER *timeout; if (timeout_ms == INFINITE) timeout = INFINITE_WAIT; else { li.QuadPart = - (int64)timeout_ms * TIMER_UNITS_PER_MILLISECOND; timeout = &li; } return nt_wait_event_with_timeout(h, timeout); } #ifndef NOT_DYNAMORIO_CORE_PROPER void mutex_wait_contended_lock(mutex_t *lock _IF_CLIENT_INTERFACE(priv_mcontext_t *mc)) { contention_event_t event = mutex_get_contended_event(&lock->contended_event, SynchronizationEvent); #ifdef CLIENT_INTERFACE dcontext_t *dcontext = get_thread_private_dcontext(); bool set_safe_for_sync = (dcontext != NULL && IS_CLIENT_THREAD(dcontext) && (mutex_t *)dcontext->client_data->client_grab_mutex == lock); ASSERT(!set_safe_for_sync || dcontext != NULL); /* set_safe_for_sync can't be true at the same time as passing an mcontext to return into: nothing would be able to reset the client_thread_safe_for_sync flag. */ ASSERT(!(set_safe_for_sync && mc != NULL)); #endif os_wait_event(event, 0 _IF_CLIENT_INTERFACE(set_safe_for_sync) _IF_CLIENT_INTERFACE(dcontext) _IF_CLIENT_INTERFACE(mc)); /* the event was signaled, and this thread was released, the auto-reset event is again nonsignaled for all other threads to wait on */ } void mutex_notify_released_lock(mutex_t *lock) { contention_event_t event = mutex_get_contended_event(&lock->contended_event, SynchronizationEvent); nt_set_event(event); } void rwlock_wait_contended_writer(read_write_lock_t *rwlock) { contention_event_t event = mutex_get_contended_event(&rwlock->writer_waiting_readers, SynchronizationEvent); os_wait_event(event, 0 _IF_CLIENT_INTERFACE(false) _IF_CLIENT_INTERFACE(NULL) _IF_CLIENT_INTERFACE(NULL)); /* the event was signaled, and this thread was released, the auto-reset event is again nonsignaled for all other threads to wait on */ } void rwlock_notify_writer(read_write_lock_t *rwlock) { contention_event_t event = mutex_get_contended_event(&rwlock->writer_waiting_readers, SynchronizationEvent); nt_set_event(event); } /* The current implementation uses auto events and will wake up only a single reader. We then expect each of them to wake up any other ones by properly counting. */ void rwlock_wait_contended_reader(read_write_lock_t *rwlock) { contention_event_t notify_readers = mutex_get_contended_event(&rwlock->readers_waiting_writer, SynchronizationEvent); os_wait_event(notify_readers, 0 _IF_CLIENT_INTERFACE(false) _IF_CLIENT_INTERFACE(NULL) _IF_CLIENT_INTERFACE(NULL)); /* the event was signaled, and only a single threads waiting on * this event are released, if this was indeed the last reader */ } void rwlock_notify_readers(read_write_lock_t *rwlock) { contention_event_t notify_readers = mutex_get_contended_event(&rwlock->readers_waiting_writer, SynchronizationEvent); /* this will wake up only one since we're using an auto event */ nt_set_event(notify_readers); } /***************************************************************************/ event_t create_event() { return nt_create_event(SynchronizationEvent); } void destroy_event(event_t e) { nt_close_event(e); } void signal_event(event_t e) { nt_set_event(e); } void reset_event(event_t e) { /* should be used only for manual events (NotificationEvent) */ nt_clear_event(e); } bool wait_for_event(event_t e, int timeout_ms) { return os_wait_event(e, timeout_ms _IF_CLIENT_INTERFACE(false) _IF_CLIENT_INTERFACE(NULL) _IF_CLIENT_INTERFACE(NULL)); } timestamp_t get_timer_frequency() { LARGE_INTEGER ignore_tsc; LARGE_INTEGER freq; timestamp_t processor_speed; nt_query_performance_counter(&ignore_tsc, /* not optional */ &freq); DOLOG(2, LOG_ALL, { timestamp_t tsc; RDTSC_LL(tsc); LOG(GLOBAL, LOG_ALL, 2, "Starting RDTSC: "UINT64_FORMAT_STRING " nt_query_performance_counter: "UINT64_FORMAT_STRING " freq:" UINT64_FORMAT_STRING "\n", tsc, ignore_tsc.QuadPart, freq.QuadPart); }); processor_speed = freq.QuadPart / 1000; /* convert to KHz */ /* case 2937 - windows sometimes is using RTC */ if (processor_speed < 500*1000 /* 500 MHz too low for a modern machine */) { processor_speed = 2937*1000; LOG(GLOBAL, LOG_ALL, 1, "get_timer_frequency: OS is using RTC! Reported speed is bogus.\n"); } return processor_speed; } uint os_random_seed() { LARGE_INTEGER tsc_or_rtc; uint seed = (uint) get_thread_id(); seed ^= (uint) query_time_millis(); /* safer to use than RDTSC, since it defaults to real time clock * if TSC is not available, either one is good enough for randomness */ nt_query_performance_counter(&tsc_or_rtc, NULL); seed ^= tsc_or_rtc.LowPart; seed ^= tsc_or_rtc.HighPart; LOG(GLOBAL, LOG_ALL, 1, "os_random_seed: %d\n", seed); return seed; } void early_inject_init() { dcontext_t *dcontext = get_thread_private_dcontext(); module_handle_t mod; bool under_dr_save; where_am_i_t whereami_save; wchar_t buf[MAX_PATH]; int os_version = get_os_version(); GET_NTDLL(LdrLoadDll, (IN PCWSTR PathToFile OPTIONAL, IN PULONG Flags OPTIONAL, IN PUNICODE_STRING ModuleFileName, OUT PHANDLE ModuleHandle)); ASSERT(dcontext != NULL); early_inject_location = DYNAMO_OPTION(early_inject_location); /* check for option override of the address */ if (DYNAMO_OPTION(early_inject_location) == INJECT_LOCATION_LdrCustom) { early_inject_address = (app_pc)DYNAMO_OPTION(early_inject_address); ASSERT(early_inject_address != NULL); LOG(GLOBAL, LOG_TOP, 1, "early_inject using option provided address "PFX" at location %d\n", early_inject_address, early_inject_location); return; } /* We only need to figure out the address for Ldr* locations */ if (!INJECT_LOCATION_IS_LDR(early_inject_location)) { LOG(GLOBAL, LOG_TOP, 1, "early_inject is using location %d, no need to find address\n", early_inject_location); return; } /* Figure out which location we're using, keep in synch with * LdrpLoadImportModule check in options.c */ if (DYNAMO_OPTION(early_inject_location) == INJECT_LOCATION_LdrDefault) { LOG(GLOBAL, LOG_TOP, 2, "early_inject using default ldr location for this os_ver\n"); switch (os_version) { case WINDOWS_VERSION_NT: /* LdrpImportModule is best but we can't find that address * automatically since one of the stack frames we need to walk * for it doesn't use frame ptrs (we can get LdrpLoadDll though), * LdrpLoadDll seems to work fairly well, but won't get us in til * after some of the static dlls are loaded. */ /* if someone provided a location for us go ahead and use that on * the presumption they're providing LdrpLoadImportModule for us. */ if (DYNAMO_OPTION(early_inject_address != 0)) { early_inject_address = (app_pc)DYNAMO_OPTION(early_inject_address); LOG(GLOBAL, LOG_TOP, 1, "early_inject using option provided address "PFX" at location %d\n", early_inject_address, early_inject_location); return; } /* Case 7806, on some NT machines LdrpLoadDll causes problems * while on others it doesn't. Just turn off early injection * on NT for now (LdrpLoadDll wasn't giving very good aslr support * anyways and isn't a desktop target). FIXME - we could just * hardcode a table of LdrpLoadImportModule addresses for NT since * we don't expect Microsoft to release any more patches for it. */ options_make_writable(); dynamo_options.early_inject = false; options_restore_readonly(); return; case WINDOWS_VERSION_2000: /* LdrpImportModule is best, LdrpLoadDll kind of works but won't * get us in til after most of the static dlls are loaded */ early_inject_location = INJECT_LOCATION_LdrpLoadImportModule;; break; case WINDOWS_VERSION_XP: /* LdrpLoadDll is best, LdrpLoadImportModule also works but it * misses the load of kernel32 */ early_inject_location = INJECT_LOCATION_LdrpLoadDll; break; case WINDOWS_VERSION_2003: case WINDOWS_VERSION_VISTA: case WINDOWS_VERSION_7: case WINDOWS_VERSION_8: case WINDOWS_VERSION_8_1: case WINDOWS_VERSION_10: case WINDOWS_VERSION_10_1511: case WINDOWS_VERSION_10_1607: case WINDOWS_VERSION_10_1703: case WINDOWS_VERSION_10_1709: /* LdrLoadDll is best but LdrpLoadDll seems to work just as well * (XXX: would it be better just to use that so matches XP?), * LdrpLoadImportModule also works but it misses the load of * kernel32. */ early_inject_location = INJECT_LOCATION_LdrLoadDll; break; default: /* is prob. a newer windows version so the 2003 location is the * most likely to work */ early_inject_location = INJECT_LOCATION_LdrLoadDll; ASSERT(os_version > WINDOWS_VERSION_10); } } ASSERT(early_inject_location != INJECT_LOCATION_LdrDefault); LOG(GLOBAL, LOG_TOP, 1, "early_inject is using location %d, finding address\n", early_inject_location); /* check if we already have the right address */ if (dr_early_injected && INJECT_LOCATION_IS_LDR_NON_DEFAULT(early_inject_location) && early_inject_location == dr_early_injected_location /* don't use parent's address if stress option set */ && !(INTERNAL_OPTION(early_inject_stress_helpers) && early_inject_location == INJECT_LOCATION_LdrpLoadImportModule)) { /* We've got the right address to use already (from parent) */ early_inject_address = parent_early_inject_address; ASSERT(early_inject_address != NULL); ASSERT(early_inject_location != INJECT_LOCATION_LdrLoadDll || early_inject_address == (app_pc)LdrLoadDll); LOG(GLOBAL, LOG_TOP, 1, "early_inject using parent supplied address "PFX"\n", early_inject_address); return; } switch (early_inject_location) { case INJECT_LOCATION_LdrLoadDll: early_inject_address = (app_pc)LdrLoadDll; break; case INJECT_LOCATION_LdrpLoadDll: /* If we were early injected have to have already gotten this address * from parent as our DllMain stack walk will have gotten the wrong * locations (during process init the Ldr delays calling DllMains * until all static dlls are loaded unless GetProcAddress is called * on the dll first, in that case its DllMain is called from there * not LdrpLoadDll as we expect). */ /* FIXME - we could use a helper dll to get this, but it won't work * when early_injected for the same reason dr's DllMain walk doesn't. * Maybe there's some flag we can pass to the Ldr to tell it to call * the DllMain right away (could then use it when trampoline loads * dr dll). Other option is we could wait and use the helper dll * once the Ldr is in a state where it will do what we expect * (the image entry point would qualify, though we could prob. find * somewhere earlier then that, say when we see the execution of the * DllMain of one of the non ntdll system dlls or something). That * said in the product I expect any given platform (let alone machine) * to always use the same inject location. */ ASSERT_NOT_IMPLEMENTED(!dr_early_injected && "process early injected" "at non LdrpLoadDll location is configured to" "use LdrpLoadDll location which is NYI"); if (os_version == WINDOWS_VERSION_NT) early_inject_address = ldrpLoadDll_address_NT; else early_inject_address = ldrpLoadDll_address_not_NT; break; case INJECT_LOCATION_LdrpLoadImportModule: /* We use helper dlls to determine this address at runtime. We pretend * to be a native_exec thread and load drearlyhelper1.dll which * statically links to drearlyhelper2.dll. We watch for the * NtMapViewOfSection call that loads drearlyhelper2.dll in * syscall_while_native. At that point we expect the stack to look * like this: * (in NtMapViewOfSection) * ntdll!LdrpMapDll * ntdll!LdrpLoadImportModule (what we want) * After that don't really care (is one of the Ldrp*ImportDescriptor* * routines. So we walk the stack back and get the desired address. */ ASSERT(DYNAMO_OPTION(native_exec_syscalls)); LOG(GLOBAL, LOG_ALL, 1, "early_inject using helper dlls to find LdrpLoadImportModule\n"); /* Pretend to be native, so Ki & Ldr hooks don't bother us. NOTE that * since we're still pre dynamo_initialized no other threads can be * running in dr code (so we're ok with the synch routines which could * otherwise be a problem since we're still on the appstack at this * point so could pass at_safe_spot while we were native). Hotpatch * nudge dll loading does the same trick. This does assume that, * like hotpatch nudge, we aren't running on the dstack as that * will be clobbered. Alt. we could remove the KSTATS issue and * the stack restriction by special casing this thread in * syscall_while_native (just let all system calls run natively except * MapViewOfSection which we do there so we can check the result). */ ASSERT(!is_currently_on_dstack(dcontext)); under_dr_save = dcontext->thread_record->under_dynamo_control; dcontext->thread_record->under_dynamo_control = false; whereami_save = dcontext->whereami; /* FIXME - this is an ugly hack to get the kstack in a form compatible * with dispatch for processing the native exec syscalls we'll hit * while loading the helper dll (hotpatch has a similar issue but * lucks out with having a compatible stack). Shouldn't mess things * up too much though. We do have to use non-matching stops so not * sure how accurate these times will be (should be tiny anyways) * should poke around dispatch sometime and figure out some way to * do this nicer. */ KSTART(dispatch_num_exits); KSTART(dispatch_num_exits); string_option_read_lock(); snwprintf(buf, BUFFER_SIZE_ELEMENTS(buf), L"%hs", DYNAMO_OPTION(early_inject_helper_dll)); NULL_TERMINATE_BUFFER(buf); string_option_read_unlock(); /* load the helper library, post syscall hook will fill in * ldrpLoadImportModule_address for us */ early_inject_load_helper_dcontext = dcontext; /* FIXME : if we are early_injected and the load fails because either * of the helper dlls don't exist/can't be found the Ldr treats * that as a process init failure and aborts the process. Wonder if * there's a flag we can pass to the Ldr to tell it not to do that. * Anyways, in normal usage we expect to use the parent's address when * early_injected (would only fail to do so if the parent was using * a different inject_location which would be unexpected in a product * configuration). */ EXITING_DR(); /* FIXME - we are making the assumption (currently true) that our * load_library() & free_library() routines themselves don't write to * any self protected regions, if changes we may need special versions * here. */ mod = load_library(buf); if (mod != NULL) { free_library(mod); } ENTERING_DR(); /* clean up & restore state */ dcontext->whereami = whereami_save; early_inject_load_helper_dcontext = NULL; dcontext->thread_record->under_dynamo_control = under_dr_save; /* Undo the kstack hack (see comment above) */ KSTOP_NOT_MATCHING_NOT_PROPAGATED(dispatch_num_exits); KSTOP_NOT_PROPAGATED(dispatch_num_exits); ASSERT(mod != NULL && ldrpLoadImportModule_address != NULL && "check that drearlyhelp*.dlls are installed"); /* FIXME - should we do anything if the address isn't found for some * reason (most likely would be the helper dlls didn't exist/couldn't * be found)? Could choose to fall back to another os version * appropriate location. As is, in release build we'd just fail to * follow children when we couldn't find the address (see FIXME in * inject_into_process()). I expect QA is going to run into this * occasionally (esp. till nodemgr etc. handle the helper dlls), so * can we do anything to make things easier/more apparent for them? */ early_inject_address = ldrpLoadImportModule_address; break; default: ASSERT_NOT_REACHED(); } /* FIXME - if failed to get address for any reason and we were early * injected, we could fall back to parent's address. */ ASSERT(early_inject_address != NULL); /* Since we are using a non-overridden Ldr* location can assert that * early_inject_address is in ntdll */ ASSERT(get_allocation_base(early_inject_address) == get_ntdll_base()); LOG(GLOBAL, LOG_TOP, 1, "early_inject found address "PFX" to use\n", early_inject_address); } /* Called with DR library mapped in but without its imports processed. * DR is not initialized at all so be careful what you call here. */ bool earliest_inject_init(byte *arg_ptr) { earliest_args_t *args = (earliest_args_t *) arg_ptr; /* Set up imports w/o making any library calls */ if (!privload_bootstrap_dynamorio_imports(args->dr_base, args->ntdll_base)) { /* XXX: how handle failure? too early to ASSERT. how bail? * should we just silently go native? */ } else { /* Restore +rx to hook location before DR init scans it */ uint old_prot; if (!bootstrap_protect_virtual_memory(args->hook_location, EARLY_INJECT_HOOK_SIZE, PAGE_EXECUTE_READ, &old_prot)) { /* XXX: again, how handle failure? */ } } /* We can't walk Ldr list to get this so set it from parent args */ set_ntdll_base(args->ntdll_base); /* We can't get DR path from Ldr list b/c DR won't be in there even once * it's initialized so we pass it in from parent. * Imports are set up so we can call strncpy now. */ strncpy(dynamorio_library_path, args->dynamorio_lib_path, BUFFER_SIZE_ELEMENTS(dynamorio_library_path)); NULL_TERMINATE_BUFFER(dynamorio_library_path); /* XXX i#627: handle extra early threads * "for apc early hook, need special handling in callback.c to replace * the early hook and then touch up the hook code to handle any queued * up threads (and be finally early remote thread safe)." * which implies the hook should have 1st thread invoke DR and the others * spin in some fashion: for now not handling super-early threads */ return !args->late_injection; } /* For cleanup we can't do before DR syscalls are set up */ void earliest_inject_cleanup(byte *arg_ptr) { earliest_args_t *args = (earliest_args_t *) arg_ptr; byte *tofree = args->tofree_base; NTSTATUS res; /* Free tofree (which contains args). * We could free this in earliest_inject_init() via adding * bootstrap_free_virtual_memory() but in case we need to add * more cleanup later, going this route. */ LOG(GLOBAL, LOG_ALL, 1, "freeing early inject args @"PFX"\n", tofree); res = nt_remote_free_virtual_memory(NT_CURRENT_PROCESS, tofree); ASSERT(NT_SUCCESS(res)); } #define SECURITY_MAX_SID_STRING_SIZE \ (2 + MAX_DWORD_STRING_LENGTH + 1 + MAX_DWORD_STRING_LENGTH \ + (MAX_DWORD_STRING_LENGTH * SID_MAX_SUB_AUTHORITIES) + 1) /* S-SID_REVISION- + IdentifierAuthority- + subauthorities- + NULL */ static const char* get_process_SID_string() { static char process_SID[SECURITY_MAX_SID_STRING_SIZE]; if (!process_SID[0]) { wchar_t sid_string[SECURITY_MAX_SID_STRING_SIZE]; /* FIXME: we only need to query NtOpenProcessToken, but we'll * assume that this function is called early enough before any * impersonation could have taken place and NtOpenThreadToken */ get_current_user_SID(sid_string, sizeof(sid_string)); snprintf(process_SID, BUFFER_SIZE_ELEMENTS(process_SID), "%ls", sid_string); NULL_TERMINATE_BUFFER(process_SID); } return process_SID; } static const PSID get_Everyone_SID() { static PSID everyone_SID = NULL; static UCHAR everyone_buf[LengthRequiredSID(1)]; if (everyone_SID == NULL) { SID_IDENTIFIER_AUTHORITY world = SECURITY_WORLD_SID_AUTHORITY; everyone_SID = (PSID)everyone_buf; initialize_known_SID(&world, SECURITY_WORLD_RID, everyone_SID); } return everyone_SID; } /* default owner SID for created objects */ static const PSID get_process_owner_SID() { static PSID owner_SID = NULL; /* owner SID will be self-referenced in TOKEN_OWNER */ static UCHAR owner_buf[SECURITY_MAX_SID_SIZE + sizeof(TOKEN_OWNER)]; if (owner_SID == NULL) { PTOKEN_OWNER powner = (PTOKEN_OWNER)owner_buf; NTSTATUS res; ASSERT(!dynamo_initialized); /* .data still writable */ /* initialization expected with os_user_directory() */ res = get_primary_owner_token(powner, sizeof(owner_buf)); ASSERT(NT_SUCCESS(res)); if (!NT_SUCCESS(res)) { /* while we don't expect to fail even once, we better fail * all the time, otherwise we'll crash later when writing * to owner_buf */ return NULL; } owner_SID = powner->Owner; } /* static buffer, no need to deallocate */ return owner_SID; } static bool os_validate_owner_equals(HANDLE file_or_directory_handle, PSID expected_owner) { /* see comments in os_current_user_directory() */ /* when this scheme would work. * * Note that we only allow files used by initial process, so we * must memoize initial SID. */ /* Note on Unix this scheme doesn't work - anyone can chown(2) * a directory or file to pretend to be created by the victim * - we can only ask a trusted component to create a directory * writable only by the corresponding user. On Windows, * however, chown() requries restore or TCB privileges. * therefore it doesn't present a privilege escalation route. */ /* FIXME: If we do allow anyone to create their own directory, * then we'd have to verify it wasn't created by somebody else - * after we open a file we should validate that we are its * rightful owner (and we'll assume we have maintained the correct * ACLs) to maintain that nobody else could have had write access * to the file. */ /* Note that we assume that TokenUser == TokenOwner, so all * created files' owner will be the current user (in addition to * being readable by the current user). We also assume that the * cache\ directory is on the local system. * FIXME: case 10884 we can't assume that, we have to create our files explicitly * * (FIXME: unclear whether Machine account will be available for * us on the network for services) */ /* FIXME: having a open handle to the directory instead of * concatenating strings would allow us to do the check only on * the directory, and not on the files. We only need to make sure * there are no TOCTOU races: no symbolic links allowed, and * that directories cannot be renamed or deleted. */ /* just owner */ UCHAR sd_buf[SECURITY_MAX_SID_SIZE + sizeof(SECURITY_DESCRIPTOR)]; PSECURITY_DESCRIPTOR sd = (PSECURITY_DESCRIPTOR)sd_buf; /* it is really SECURITY_DESCRIPTOR_RELATIVE */ PSID owner; DWORD actual_sd_length; NTSTATUS res; /* This buffer must be aligned on a 4-byte boundary. */ ASSERT(ALIGNED(sd, sizeof(DWORD))); /* FIXME: unlike SIDs which we can bound, there is no good bound * for a complete SD. We need to ensure that only one SID would * be returned to us here. */ /* We need READ_CONTROL access to the file_or_directory_handle */ res = nt_query_security_object(file_or_directory_handle, OWNER_SECURITY_INFORMATION, sd, sizeof(sd_buf), &actual_sd_length); if (!NT_SUCCESS(res)) { if (res == STATUS_ACCESS_DENIED) { ASSERT_CURIOSITY(false && "verify handle allows READ_CONTROL"); } return false; } ASSERT(actual_sd_length < sizeof(sd_buf)); if (get_owner_sd(sd, &owner)) { /* FIXME: on Vista services using restricted SIDs may require * obtaining the SID that we can use for creating files */ if (!equal_sid(owner, expected_owner)) { /* !sid poi(owner) */ LOG(GLOBAL, LOG_TOP, 1, "os_validate_owner_equals: owner not matching expected_owner\n"); return false; } return true; } ASSERT_NOT_REACHED(); return false; } /* Recommended that callers check ownership of a file that is * guaranteed to not be writable. */ bool os_filesystem_supports_ownership(HANDLE file_or_directory_handle) { /* Can we verify we are on FAT32 in a documented way to be certain? */ /* Currently done by checking if cache\ directory is Owned by * Everyone - which certainly should only happen on FAT32. */ /* FIXME: Alternatively we can test for support for file * ID/reference, since creation by file reference is only * supported on NTFS */ /* either FAT32 or we have a proper owner */ if (os_validate_owner_equals(file_or_directory_handle, get_Everyone_SID())) { /* On FAT32 : * 0:000> !sid poi(owner) * SID is: S-1-1-0 Everyone * * We assume that a malicious user cannot set the SID to * Everyone. Although Everyone is not the same as Anonymous * Logon S-1-5-7, just in case malware can run as Everyone and * creates a file we cannot decide we're on FAT32 just based * on this for files that. */ SYSLOG_INTERNAL_WARNING_ONCE("cache root directory is on FAT32, no security\n"); return false; } else { /* we have a real owner - presumably NTFS */ return true; } return false; } /* opens the cache\ directory that should be modifed only by trusted users */ /* and is used by both ASLR and persistent cache trusted producers */ HANDLE open_trusted_cache_root_directory(void) { char base_directory[MAXIMUM_PATH]; wchar_t wbuf[MAXIMUM_PATH]; HANDLE directory_handle; bool param_ok = false; if (DYNAMO_OPTION(aslr) != 0 || DYNAMO_OPTION(aslr_cache) != 0) { /* only use cache config var */ int retval = get_parameter(PARAM_STR(DYNAMORIO_VAR_CACHE_ROOT), base_directory, BUFFER_SIZE_ELEMENTS(base_directory)); param_ok = !IS_GET_PARAMETER_FAILURE(retval); } else { /* no aslr so this is just for pcache */ ASSERT(strcmp(DYNAMORIO_VAR_CACHE_ROOT, DYNAMORIO_VAR_PERSCACHE_ROOT) == 0); param_ok = perscache_dirname(base_directory, BUFFER_SIZE_ELEMENTS(base_directory)); } if (!param_ok || double_strchr(base_directory, DIRSEP, ALT_DIRSEP) == NULL) { #ifndef STATIC_LIBRARY SYSLOG_INTERNAL_WARNING("%s not correctly set!", DYNAMORIO_VAR_CACHE_ROOT); #endif return INVALID_HANDLE_VALUE; } NULL_TERMINATE_BUFFER(base_directory); if (!convert_to_NT_file_path(wbuf, base_directory, BUFFER_SIZE_ELEMENTS(wbuf))) return INVALID_HANDLE_VALUE; /* the cache root directory is supposed to be created by nodemgr * and owned by Administrators, and the directory ACL should not * allow changes. We should not create one if it doesn't exist, * even if we did we wouldn't have the correct ACLs for its * children. */ directory_handle = create_file(wbuf, true /* is_dir */, READ_CONTROL /* generic rights */, FILE_SHARE_READ /* case 10255: allow persisted cache files * in same directory */ | FILE_SHARE_WRITE, FILE_OPEN, true); if (directory_handle == INVALID_HANDLE_VALUE) { SYSLOG_INTERNAL_ERROR("%s=%s is invalid!", DYNAMORIO_VAR_CACHE_ROOT, base_directory); } return directory_handle; } bool os_user_directory_supports_ownership() { /* should evaluate early so no need for .data unprotection */ static int user_directory_has_ownership = -1; /* not evaluated yet */ /* note using explicit int, to not rely on bool true values */ if (user_directory_has_ownership < 0) { if (DYNAMO_OPTION(validate_owner_dir) || DYNAMO_OPTION(validate_owner_file)) { HANDLE root_handle = open_trusted_cache_root_directory(); /* Note that if root_handle is INVALID_HANDLE_VALUE we * don't care about user_directory_has_ownership, it is * undefined. Since all users that verify ownership * construct paths based on this directory, they should * all fail and we don't really care. We assume that this * directory is created with correct privileges, so if * anyone controls the registry key or can create the * directory we have lost already. (Interestingly, * nt_query_security_object() returns current user for * owner of -1, and so os_filesystem_supports_ownership() * does return true instead.) */ if (os_filesystem_supports_ownership(root_handle)) user_directory_has_ownership = 1; else user_directory_has_ownership = 0; close_handle(root_handle); } else { user_directory_has_ownership = 0; /* nobody cares whether it supports */ } } return (user_directory_has_ownership == 1); } /* validate we are the rightful owner */ /* Note. we assume all calls to os_validate_owner_equals are on the same volume as * DYNAMORIO_VAR_CACHE_ROOT. * Handle needs to have READ_CONTROL access (FILE_GENERIC_READ provides that). */ bool os_validate_user_owned(HANDLE file_or_directory_handle) { /* note that Creator and Owner don't have to match, but we expect * that we'll be creating new files with current token as owner */ PSID process_SID = get_process_primary_SID(); /* Note we only trust the primary token! If we are impersonating, * we also need ACLs allowing us to open other files created by * the primary token */ if (os_validate_owner_equals(file_or_directory_handle, process_SID)) { return true; } if (!os_user_directory_supports_ownership()) { /* Although on FAT32 there is no owner (or any other ACLs), we * get as owner Everyone. Since file ACLs are unsupported by * file system on the system drive (where we install), we can * assume that privilege escalation is irrelevant for this * host. */ /* nobody really cares about this owner validation on FAT32 */ ASSERT(os_validate_owner_equals(file_or_directory_handle, get_Everyone_SID())); return true; } ASSERT_CURIOSITY(false && "unauthorized user tried to forge our files"); return false; } /* append per-user directory name to provided directory_prefix, * and optionally create a new one if possible * * Note 'current' is actually the primary process token: we currently * allow only read-only access for impersonated threads. */ bool os_current_user_directory(char *directory_prefix /* INOUT */, uint directory_len, bool create) { char *directory = directory_prefix; char *dirend = directory_prefix + strlen(directory_prefix); snprintf(dirend, directory_len - (dirend - directory_prefix), "%c%s", DIRSEP, get_process_SID_string()); directory_prefix[directory_len - 1] = '\0'; directory = directory_prefix; LOG(GLOBAL, LOG_CACHE, 2, "\tper-user dir is %s\n", directory); DODEBUG({ if (!equal_sid(get_process_owner_SID(), get_process_primary_SID())) { LOG(GLOBAL, LOG_CACHE, 1, "Default owner is not current user, we must be an Administrator?\n"); /* FIXME: we could try to really check */ } }); /* Note that if an application impersonates threads, data for a * single application will be spread across different users secure * storage locations. This may be a vulnerability - if a secure * server loads a DLL while impersonated we may be erroneously * using (without validation) a DLL controlled by lower privilege. * Delay-loaded DLLs may provide such unexpected DLL loads. * * ACLs: We may want to leave files readable by Everyone - allows * any impersonated threads to read files in the directory of the * original process token. (Note that Anonymous token belongs to * Everyone). World readable files also allow us to share files * produced by TCB services. Yet, for stronger security against * local privilege exploits, there is some value in not allowing * anyone else to read our persistent files - the layout may be * useful to attackers; and general need to know principle: * normally other process don't need to read these. */ /* FIXME: Of course, at beginning we want be dealing with * impersonation at all, but we should try to detect it here if we * fail to open a directory due to impersonated thread. */ /* create directory if it doesn't exist */ /* check for existence first so we can require new during creation */ if (!os_file_exists(directory, true/*is dir*/) && create) { /* CREATE_DIR_FORCE_OWNER case 10884 - NoDefaultAdminOwner - * the default owner doesn't have to be the current user, if * member of Administrators. Therefore we specify our own * SecurityDescriptor.Owner when creating a file so that we * don't use SE_OWNER_DEFAULTED, but we still want a default * DACL and we don't care about group. */ /* FIXME: we should ensure we do not follow symlinks! */ if (!os_create_dir(directory, CREATE_DIR_REQUIRE_NEW | CREATE_DIR_FORCE_OWNER)) { LOG(GLOBAL, LOG_CACHE, 2, "\terror creating per-user dir %s\n", directory); /* FIXME: currently this is expected for the 4.2 ACLs */ /* Note SYSLOG can be just a Warning since we will still * run correctly without persistence. */ SYSLOG_INTERNAL_ERROR_ONCE("Persistent cache per-user needed.\n" "mkdir \"%s\"\n" "cacls \"%s\" /E /G username:F", /* Note cacls needs a real user * name, while subinacl does take SIDs */ directory, directory); return false; } else { LOG(GLOBAL, LOG_CACHE, 2, "\tcreated per-user dir %s\n", directory); } } /* FIXME: case 8812 if the cache\ directory inheritable ACLs are * setup accordingly we should be able to automatically create a * our own per-user folder, without dealing with forging ACLs * here, and without asking a trusted components to create it for * us. * currently each user MUST call os_validate_user_owned() * before trusting a file, or if a directory handle is * guaranteed to be open at all times such that renaming is * disallowed, then only the directory needs to be validated */ return true; } /* checks for compatibility OS specific options, returns true if * modified the value of any options to make them compatible */ bool os_check_option_compatibility(void) { bool changed_options = false; bool os_has_aslr = get_os_version() >= (int)INTERNAL_OPTION(os_aslr_version); /* ASLR introduced in Vista Beta2, but we support only RTM+ so * WINDOWS_VERSION_VISTA */ if (!os_has_aslr) return false; if (TEST(OS_ASLR_DISABLE_PCACHE_ALL, DYNAMO_OPTION(os_aslr))) { /* completely disable pcache */ /* enabled by -desktop, but can be enabled independently as well */ if (DYNAMO_OPTION(coarse_enable_freeze)) { dynamo_options.coarse_enable_freeze = false; changed_options = true; } if (DYNAMO_OPTION(coarse_freeze_at_unload)) { dynamo_options.coarse_freeze_at_unload = false; changed_options = true; } if (DYNAMO_OPTION(use_persisted)) { dynamo_options.use_persisted = false; changed_options = true; } if (changed_options) SYSLOG_INTERNAL_WARNING_ONCE("pcache completely disabled, Vista+"); } /* note dynamorio.dll is not marked as ASLR friendly so we keep * using our own -aslr_dr */ if (TEST(OS_ASLR_DISABLE_PCACHE_ALL, DYNAMO_OPTION(os_aslr))) { /* completely disable ASLR */ /* enabled by -client, but can be enabled independently as well */ if (DYNAMO_OPTION(aslr) != 0) { dynamo_options.aslr = 0; changed_options = true; SYSLOG_INTERNAL_WARNING_ONCE("ASLR completely disabled, Vista+"); } if (DYNAMO_OPTION(aslr_cache) != 0) { dynamo_options.aslr_cache = 0; changed_options = true; } } ASSERT(os_has_aslr); return changed_options; } #endif /* !NOT_DYNAMORIO_CORE_PROPER: around most of file, to exclude preload */ size_t os_page_size(void) { /* FIXME i#1680: Determine page size using system call. */ return 4096; }
1
12,676
I think something like `size_res` would be more descriptive.
DynamoRIO-dynamorio
c
@@ -96,9 +96,9 @@ module Bolt @target = target @value = {} @action = 'apply' - value['report'] = report if report - value['_error'] = error if error - value['_output'] = metrics_message if metrics_message + @value['report'] = report if report + @value['_error'] = error if error + @value['_output'] = metrics_message if metrics_message end def event_metrics
1
# frozen_string_literal: true require 'json' require 'bolt/error' require 'bolt/result' module Bolt class ApplyResult < Result def self.puppet_missing_error(result) error_hash = result.error_hash exit_code = error_hash['details']['exit_code'] if error_hash && error_hash['details'] # If we get exit code 126 or 127 back, it means the shebang command wasn't found; Puppet isn't present if [126, 127].include?(exit_code) { 'msg' => "Puppet is not installed on the target, please install it to enable 'apply'", 'kind' => 'bolt/apply-error' } elsif exit_code == 1 && (error_hash['msg'] =~ /Could not find executable 'ruby.exe'/ || error_hash['msg'] =~ /The term 'ruby.exe' is not recognized as the name of a cmdlet/) # Windows does not have Ruby present { 'msg' => "Puppet was not found on the target or in $env:ProgramFiles, please install it to enable 'apply'", 'kind' => 'bolt/apply-error' } elsif exit_code == 1 && error_hash['msg'] =~ /cannot load such file -- puppet \(LoadError\)/ # Windows uses a Ruby that doesn't have Puppet installed # TODO: fix so we don't find other Rubies, or point to a known issues URL for more info { 'msg' => 'Found a Ruby without Puppet present, please install Puppet ' \ "or remove Ruby from $env:Path to enable 'apply'", 'kind' => 'bolt/apply-error' } end end def self.resource_error(result) if result.value['status'] == 'failed' resources = result.value['resource_statuses'] failed = resources.select { |_, r| r['failed'] }.flat_map do |key, resource| resource['events'].select { |e| e['status'] == 'failure' }.map do |event| "\n #{key}: #{event['message']}" end end { 'msg' => "Resources failed to apply for #{result.target.name}#{failed.join}", 'kind' => 'bolt/resource-failure' } end end def self.invalid_report_error(result) # These are the keys ApplyResult methods rely on. expected_report_keys = %w[metrics resource_statuses status] missing_keys = expected_report_keys.reject { |k| result.value.include?(k) } unless missing_keys.empty? if result['_output'] # rubocop:disable Layout/LineLength msg = "Report result contains an '_output' key. Catalog application may have printed extraneous output to stdout: #{result['_output']}" # rubocop:enable Layout/LineLength else msg = "Report did not contain all expected keys missing: #{missing_keys.join(', ')}" end { 'msg' => msg, 'kind' => 'bolt/invalid-report' } end end def self.from_task_result(result) if (puppet_missing = puppet_missing_error(result)) new(result.target, error: puppet_missing, report: result.value.reject { |k| k == '_error' }) elsif !result.ok? new(result.target, error: result.error_hash) elsif (invalid_report = invalid_report_error(result)) new(result.target, error: invalid_report, report: result.value.reject { |k| %w[_error _output].include?(k) }) elsif (resource_error = resource_error(result)) new(result.target, error: resource_error, report: result.value.reject { |k| k == '_error' }) else new(result.target, report: result.value) end end # Other pcore methods are inherited from Result def _pcore_init_hash { 'target' => @target, 'error' => value['_error'], 'report' => value['report'] } end def initialize(target, error: nil, report: nil) @target = target @value = {} @action = 'apply' value['report'] = report if report value['_error'] = error if error value['_output'] = metrics_message if metrics_message end def event_metrics if (events = value.dig('report', 'metrics', 'resources', 'values')) events.each_with_object({}) { |ev, h| h[ev[0]] = ev[2] } end end def logs value.dig('report', 'logs') || [] end # Return only log messages associated with resources def resource_logs logs.reject { |log| log['source'] == 'Puppet' } end def metrics_message if (metrics = event_metrics) changed = metrics['changed'] failed = metrics['failed'] skipped = metrics['skipped'] unchanged = metrics['total'] - changed - failed - skipped noop = metrics['out_of_sync'] - changed - failed "changed: #{changed}, failed: #{failed}, unchanged: #{unchanged} skipped: #{skipped}, noop: #{noop}" end end def report @value['report'] end def generic_value {} end end end
1
17,145
Oh lol so this was...really more of a bug?
puppetlabs-bolt
rb
@@ -7,11 +7,11 @@ using System.Threading.Tasks; namespace Microsoft.Rest { - public class PlatformTaskEx + [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Naming", + "CA1711:IdentifiersShouldNotHaveIncorrectSuffix", + Justification="We think with Ex is better than using 2")] + public static class PlatformTaskEx { - //Per FxCop performance rule to prevent the compiler from generating a default constructor. - private PlatformTaskEx() { } - public static Task FromResult(object result) { #if NET45
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. using System; using System.Threading; using System.Threading.Tasks; namespace Microsoft.Rest { public class PlatformTaskEx { //Per FxCop performance rule to prevent the compiler from generating a default constructor. private PlatformTaskEx() { } public static Task FromResult(object result) { #if NET45 return Task.FromResult<object>(result); #else return TaskEx.FromResult<object>(result); #endif } public static Task Delay(TimeSpan delay) { #if NET45 return Task.Delay(delay); #else return TaskEx.Delay(delay); #endif } public static Task Delay(int millisecondsDelay, CancellationToken cancellationToken) { #if NET45 return Task.Delay(millisecondsDelay, cancellationToken); #else return TaskEx.Delay(millisecondsDelay, cancellationToken); #endif } } }
1
20,611
Well, it is providing a platform neutral way of calling Task or TaskEx. We could just call it PlatformTask.
Azure-autorest
java
@@ -89,7 +89,7 @@ struct wlr_xwayland *wlr_xwayland_create(struct wl_display *wl_display, }; xwayland->server = wlr_xwayland_server_create(wl_display, &options); if (xwayland->server == NULL) { - free(xwayland->server); + free(xwayland); return NULL; }
1
#define _POSIX_C_SOURCE 200809L #include <errno.h> #include <fcntl.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <stdlib.h> #include <sys/socket.h> #include <sys/types.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <wayland-server-core.h> #include <wlr/util/log.h> #include <wlr/xwayland.h> #include "sockets.h" #include "util/signal.h" #include "xwayland/xwm.h" struct wlr_xwayland_cursor { uint8_t *pixels; uint32_t stride; uint32_t width; uint32_t height; int32_t hotspot_x; int32_t hotspot_y; }; static void handle_server_destroy(struct wl_listener *listener, void *data) { struct wlr_xwayland *xwayland = wl_container_of(listener, xwayland, server_destroy); wlr_xwayland_destroy(xwayland); } static void handle_server_ready(struct wl_listener *listener, void *data) { struct wlr_xwayland *xwayland = wl_container_of(listener, xwayland, server_ready); struct wlr_xwayland_server_ready_event *event = data; xwayland->xwm = xwm_create(xwayland, event->wm_fd); if (!xwayland->xwm) { return; } if (xwayland->seat) { xwm_set_seat(xwayland->xwm, xwayland->seat); } if (xwayland->cursor != NULL) { struct wlr_xwayland_cursor *cur = xwayland->cursor; xwm_set_cursor(xwayland->xwm, cur->pixels, cur->stride, cur->width, cur->height, cur->hotspot_x, cur->hotspot_y); free(cur); xwayland->cursor = NULL; } wlr_signal_emit_safe(&xwayland->events.ready, NULL); } void wlr_xwayland_destroy(struct wlr_xwayland *xwayland) { if (!xwayland) { return; } wl_list_remove(&xwayland->server_destroy.link); wl_list_remove(&xwayland->server_ready.link); wlr_xwayland_set_seat(xwayland, NULL); wlr_xwayland_server_destroy(xwayland->server); free(xwayland); } struct wlr_xwayland *wlr_xwayland_create(struct wl_display *wl_display, struct wlr_compositor *compositor, bool lazy) { struct wlr_xwayland *xwayland = calloc(1, sizeof(struct wlr_xwayland)); if (!xwayland) { return NULL; } xwayland->wl_display = wl_display; xwayland->compositor = compositor; wl_signal_init(&xwayland->events.new_surface); wl_signal_init(&xwayland->events.ready); struct wlr_xwayland_server_options options = { .lazy = lazy, .enable_wm = true, }; xwayland->server = wlr_xwayland_server_create(wl_display, &options); if (xwayland->server == NULL) { free(xwayland->server); return NULL; } xwayland->display_name = xwayland->server->display_name; xwayland->server_destroy.notify = handle_server_destroy; wl_signal_add(&xwayland->server->events.destroy, &xwayland->server_destroy); xwayland->server_ready.notify = handle_server_ready; wl_signal_add(&xwayland->server->events.ready, &xwayland->server_ready); return xwayland; } void wlr_xwayland_set_cursor(struct wlr_xwayland *xwayland, uint8_t *pixels, uint32_t stride, uint32_t width, uint32_t height, int32_t hotspot_x, int32_t hotspot_y) { if (xwayland->xwm != NULL) { xwm_set_cursor(xwayland->xwm, pixels, stride, width, height, hotspot_x, hotspot_y); return; } free(xwayland->cursor); xwayland->cursor = calloc(1, sizeof(struct wlr_xwayland_cursor)); if (xwayland->cursor == NULL) { return; } xwayland->cursor->pixels = pixels; xwayland->cursor->stride = stride; xwayland->cursor->width = width; xwayland->cursor->height = height; xwayland->cursor->hotspot_x = hotspot_x; xwayland->cursor->hotspot_y = hotspot_y; } static void xwayland_handle_seat_destroy(struct wl_listener *listener, void *data) { struct wlr_xwayland *xwayland = wl_container_of(listener, xwayland, seat_destroy); wlr_xwayland_set_seat(xwayland, NULL); } void wlr_xwayland_set_seat(struct wlr_xwayland *xwayland, struct wlr_seat *seat) { if (xwayland->seat) { wl_list_remove(&xwayland->seat_destroy.link); } xwayland->seat = seat; if (xwayland->xwm) { xwm_set_seat(xwayland->xwm, seat); } if (seat == NULL) { return; } xwayland->seat_destroy.notify = xwayland_handle_seat_destroy; wl_signal_add(&seat->events.destroy, &xwayland->seat_destroy); }
1
15,476
Typo: this should free `xwayland` instead
swaywm-wlroots
c
@@ -19,9 +19,9 @@ package pingpong import ( "bytes" - "crypto/rand" "encoding/hex" "math" + "math/rand" "strings" "sync" "sync/atomic"
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package pingpong import ( "bytes" "crypto/rand" "encoding/hex" "math" "strings" "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" "github.com/mysteriumnetwork/node/eventbus" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/identity/registry" "github.com/mysteriumnetwork/node/services/openvpn/discovery/dto" "github.com/mysteriumnetwork/payments/crypto" "github.com/pkg/errors" "github.com/rs/zerolog/log" ) // ErrConsumerPromiseValidationFailed represents an error where consumer tries to cheat us with incorrect promises. var ErrConsumerPromiseValidationFailed = errors.New("consumer failed to issue promise for the correct amount") // ErrAccountantFeeTooLarge indicates that we do not allow accountants with such high fees var ErrAccountantFeeTooLarge = errors.New("accountants fee exceeds") // PeerInvoiceSender allows to send invoices. type PeerInvoiceSender interface { Send(crypto.Invoice) error } type feeProvider interface { FetchSettleFees() (registry.FeesResponse, error) } type bcHelper interface { GetAccountantFee(accountantAddress common.Address) (uint16, error) IsRegistered(registryAddress, addressToCheck common.Address) (bool, error) } type providerInvoiceStorage interface { Get(providerIdentity, consumerIdentity identity.Identity) (crypto.Invoice, error) Store(providerIdentity, consumerIdentity identity.Identity, invoice crypto.Invoice) error GetNewAgreementID(providerIdentity identity.Identity) (uint64, error) StoreR(providerIdentity identity.Identity, agreementID uint64, r string) error GetR(providerID identity.Identity, agreementID uint64) (string, error) } type accountantPromiseStorage interface { Store(providerID, accountantID identity.Identity, promise AccountantPromise) error Get(providerID, accountantID identity.Identity) (AccountantPromise, error) } type accountantCaller interface { RequestPromise(em crypto.ExchangeMessage) (crypto.Promise, error) RevealR(r string, provider string, agreementID uint64) error } // ErrExchangeWaitTimeout indicates that we did not get an exchange message in time. var ErrExchangeWaitTimeout = errors.New("did not get a new exchange message") // ErrMissmatchingHashlock represents an error where a consumer sends a hashlock we're not waiting for var ErrMissmatchingHashlock = errors.New("hashlock missmatch") // ErrExchangeValidationFailed indicates that there was an error with the exchange signature. var ErrExchangeValidationFailed = errors.New("exchange validation failed") // ErrConsumerNotRegistered represents the error that the consumer is not registered var ErrConsumerNotRegistered = errors.New("consumer not registered") const chargePeriodLeeway = time.Hour * 2 type lastInvoice struct { invoice crypto.Invoice r []byte } // InvoiceTracker keeps tab of invoices and sends them to the consumer. type InvoiceTracker struct { peer identity.Identity stop chan struct{} peerInvoiceSender PeerInvoiceSender exchangeMessageChan chan crypto.ExchangeMessage chargePeriod time.Duration exchangeMessageWaitTimeout time.Duration accountantFailureCount uint64 notReceivedExchangeMessageCount uint64 maxNotReceivedExchangeMessages uint64 once sync.Once invoiceStorage providerInvoiceStorage accountantPromiseStorage accountantPromiseStorage timeTracker timeTracker paymentInfo dto.PaymentRate providerID identity.Identity accountantID identity.Identity lastInvoice lastInvoice lastExchangeMessage crypto.ExchangeMessage accountantCaller accountantCaller registryAddress string maxAccountantFailureCount uint64 maxAllowedAccountantFee uint16 bcHelper bcHelper publisher eventbus.Publisher feeProvider feeProvider transactorFee uint64 maxRRecoveryLength uint64 channelAddressCalculator channelAddressCalculator } // InvoiceTrackerDeps contains all the deps needed for invoice tracker. type InvoiceTrackerDeps struct { Peer identity.Identity PeerInvoiceSender PeerInvoiceSender InvoiceStorage providerInvoiceStorage TimeTracker timeTracker ChargePeriod time.Duration ExchangeMessageChan chan crypto.ExchangeMessage ExchangeMessageWaitTimeout time.Duration PaymentInfo dto.PaymentRate ProviderID identity.Identity AccountantID identity.Identity AccountantCaller accountantCaller AccountantPromiseStorage accountantPromiseStorage Registry string MaxAccountantFailureCount uint64 MaxRRecoveryLength uint64 MaxAllowedAccountantFee uint16 BlockchainHelper bcHelper Publisher eventbus.Publisher FeeProvider feeProvider ChannelAddressCalculator channelAddressCalculator } // NewInvoiceTracker creates a new instance of invoice tracker. func NewInvoiceTracker( itd InvoiceTrackerDeps) *InvoiceTracker { return &InvoiceTracker{ peer: itd.Peer, stop: make(chan struct{}), peerInvoiceSender: itd.PeerInvoiceSender, exchangeMessageChan: itd.ExchangeMessageChan, exchangeMessageWaitTimeout: itd.ExchangeMessageWaitTimeout, chargePeriod: itd.ChargePeriod, invoiceStorage: itd.InvoiceStorage, timeTracker: itd.TimeTracker, paymentInfo: itd.PaymentInfo, providerID: itd.ProviderID, accountantCaller: itd.AccountantCaller, accountantPromiseStorage: itd.AccountantPromiseStorage, accountantID: itd.AccountantID, maxNotReceivedExchangeMessages: calculateMaxNotReceivedExchangeMessageCount(chargePeriodLeeway, itd.ChargePeriod), maxAccountantFailureCount: itd.MaxAccountantFailureCount, maxAllowedAccountantFee: itd.MaxAllowedAccountantFee, bcHelper: itd.BlockchainHelper, publisher: itd.Publisher, registryAddress: itd.Registry, feeProvider: itd.FeeProvider, channelAddressCalculator: itd.ChannelAddressCalculator, maxRRecoveryLength: itd.MaxRRecoveryLength, } } func calculateMaxNotReceivedExchangeMessageCount(chargeLeeway, chargePeriod time.Duration) uint64 { return uint64(math.Round(float64(chargeLeeway) / float64(chargePeriod))) } func (it *InvoiceTracker) generateInitialInvoice() error { agreementID, err := it.invoiceStorage.GetNewAgreementID(it.providerID) if err != nil { return errors.Wrap(err, "could not get new agreement id") } r := it.generateR() invoice := crypto.CreateInvoice(agreementID, it.paymentInfo.GetPrice().Amount, 0, r) invoice.Provider = it.providerID.Address it.lastInvoice = lastInvoice{ invoice: invoice, r: r, } return nil } // Start stars the invoice tracker func (it *InvoiceTracker) Start() error { log.Debug().Msg("Starting...") it.timeTracker.StartTracking() isConsumerRegistered, err := it.bcHelper.IsRegistered(common.HexToAddress(it.registryAddress), it.peer.ToCommonAddress()) if err != nil { return errors.Wrap(err, "could not check customer identity registration status") } if !isConsumerRegistered { return ErrConsumerNotRegistered } fees, err := it.feeProvider.FetchSettleFees() if err != nil { return errors.Wrap(err, "could not fetch fees") } it.transactorFee = fees.Fee fee, err := it.bcHelper.GetAccountantFee(common.HexToAddress(it.accountantID.Address)) if err != nil { return errors.Wrap(err, "could not get accountants fee") } if fee > it.maxAllowedAccountantFee { log.Error().Msgf("Accountant fee too large, asking for %v where %v is the limit", fee, it.maxAllowedAccountantFee) return ErrAccountantFeeTooLarge } err = it.generateInitialInvoice() if err != nil { return errors.Wrap(err, "could not generate initial invoice") } // give the consumer a second to start up his payments before sending the first request firstSend := time.After(time.Second) for { select { case <-firstSend: err := it.sendInvoiceExpectExchangeMessage() if err != nil { return err } case <-it.stop: return nil case <-time.After(it.chargePeriod): err := it.sendInvoiceExpectExchangeMessage() if err != nil { return err } } } } func (it *InvoiceTracker) markExchangeMessageNotReceived() { atomic.AddUint64(&it.notReceivedExchangeMessageCount, 1) } func (it *InvoiceTracker) resetNotReceivedExchangeMessageCount() { atomic.SwapUint64(&it.notReceivedExchangeMessageCount, 0) } func (it *InvoiceTracker) getNotReceivedExchangeMessageCount() uint64 { return atomic.LoadUint64(&it.notReceivedExchangeMessageCount) } func (it *InvoiceTracker) generateR() []byte { r := make([]byte, 32) rand.Read(r) return r } func (it *InvoiceTracker) sendInvoiceExpectExchangeMessage() error { // TODO: this should be calculated according to the passed in payment period shouldBe := uint64(math.Trunc(it.timeTracker.Elapsed().Minutes() * float64(it.paymentInfo.GetPrice().Amount))) // In case we're sending a first invoice, there might be a big missmatch percentage wise on the consumer side. // This is due to the fact that both payment providers start at different times. // To compensate for this, be a bit more lenient on the first invoice - ask for a reduced amount. // Over the long run, this becomes redundant as the difference should become miniscule. if it.lastExchangeMessage.AgreementTotal == 0 { shouldBe = uint64(math.Trunc(float64(shouldBe) * 0.8)) log.Debug().Msgf("Being lenient for the first payment, asking for %v", shouldBe) } r := it.generateR() invoice := crypto.CreateInvoice(it.lastInvoice.invoice.AgreementID, shouldBe, it.transactorFee, r) invoice.Provider = it.providerID.Address err := it.peerInvoiceSender.Send(invoice) if err != nil { return err } it.lastInvoice = lastInvoice{ invoice: invoice, r: r, } err = it.invoiceStorage.Store(it.providerID, it.peer, invoice) if err != nil { return errors.Wrap(err, "could not store invoice") } err = it.receiveExchangeMessageOrTimeout() if err != nil { handlerErr := it.handleExchangeMessageReceiveError(err) if handlerErr != nil { return err } } else { it.resetNotReceivedExchangeMessageCount() } return nil } func (it *InvoiceTracker) handleExchangeMessageReceiveError(err error) error { // if it's a timeout, we'll want to ignore it if we're not exceeding maxNotReceivedexchangeMessages if err == ErrExchangeWaitTimeout || err == ErrMissmatchingHashlock { it.markExchangeMessageNotReceived() if it.getNotReceivedExchangeMessageCount() >= it.maxNotReceivedExchangeMessages { return err } log.Warn().Err(err).Msg("Failed to receive exchangeMessage") return nil } return err } func (it *InvoiceTracker) incrementAccountantFailureCount() { atomic.AddUint64(&it.accountantFailureCount, 1) } func (it *InvoiceTracker) resetAccountantFailureCount() { atomic.SwapUint64(&it.accountantFailureCount, 0) } func (it *InvoiceTracker) getAccountantFailureCount() uint64 { return atomic.LoadUint64(&it.accountantFailureCount) } func (it *InvoiceTracker) validateExchangeMessage(em crypto.ExchangeMessage) error { peerAddr := common.HexToAddress(it.peer.Address) if res := em.IsMessageValid(peerAddr); !res { return ErrExchangeValidationFailed } signer, err := em.Promise.RecoverSigner() if err != nil { return errors.Wrap(err, "could not recover promise signature") } if signer.Hex() != peerAddr.Hex() { return errors.New("identity missmatch") } if em.Promise.Amount < it.lastExchangeMessage.Promise.Amount { log.Warn().Msgf("Consumer sent an invalid amount. Expected < %v, got %v", it.lastExchangeMessage.Promise.Amount, em.Promise.Amount) return errors.Wrap(ErrConsumerPromiseValidationFailed, "invalid amount") } addr, err := it.channelAddressCalculator.GetChannelAddress(it.peer) if err != nil { return errors.Wrap(err, "could not generate channel address") } expectedChannel, err := hex.DecodeString(strings.TrimPrefix(addr.Hex(), "0x")) if err != nil { return errors.Wrap(err, "could not decode expected chanel") } if !bytes.Equal(expectedChannel, em.Promise.ChannelID) { log.Warn().Msgf("Consumer sent an invalid channel address. Expected %q, got %q", addr, hex.EncodeToString(em.Promise.ChannelID)) return errors.Wrap(ErrConsumerPromiseValidationFailed, "invalid channel address") } return nil } func (it *InvoiceTracker) checkForCorrectHashlock(em crypto.ExchangeMessage) error { hashlock, err := hex.DecodeString(strings.TrimPrefix(it.lastInvoice.invoice.Hashlock, "0x")) if err != nil { return errors.Wrap(err, "could not decode hashlock") } if !bytes.Equal(hashlock, em.Promise.Hashlock) { log.Warn().Msgf("Consumer sent an invalid hashlock. Expected %q, got %q. Skipping", it.lastInvoice.invoice.Hashlock, hex.EncodeToString(em.Promise.Hashlock)) return ErrMissmatchingHashlock } return nil } func (it *InvoiceTracker) receiveExchangeMessageOrTimeout() error { select { case pm := <-it.exchangeMessageChan: err := it.checkForCorrectHashlock(pm) if err != nil { return err } err = it.validateExchangeMessage(pm) if err != nil { return err } it.lastExchangeMessage = pm needsRevealing := false accountantPromise, err := it.accountantPromiseStorage.Get(it.providerID, it.accountantID) switch err { case nil: needsRevealing = !accountantPromise.Revealed break case ErrNotFound: needsRevealing = false break default: return errors.Wrap(err, "could not get accountant promise") } if needsRevealing { err = it.accountantCaller.RevealR(accountantPromise.R, it.providerID.Address, accountantPromise.AgreementID) if err != nil { log.Error().Err(err).Msg("Could not reveal R") it.incrementAccountantFailureCount() if it.getAccountantFailureCount() > it.maxAccountantFailureCount { return errors.Wrap(err, "could not call accountant") } log.Warn().Msg("Ignoring accountant error, we haven't reached the error threshold yet") return nil } it.resetAccountantFailureCount() accountantPromise.Revealed = true err = it.accountantPromiseStorage.Store(it.providerID, it.accountantID, accountantPromise) if err != nil { return errors.Wrap(err, "could not store accountant promise") } log.Debug().Msg("Accountant promise stored") } err = it.invoiceStorage.StoreR(it.providerID, it.lastInvoice.invoice.AgreementID, hex.EncodeToString(it.lastInvoice.r)) if err != nil { return errors.Wrap(err, "could not store r") } promise, err := it.accountantCaller.RequestPromise(pm) if err != nil { log.Warn().Err(err).Msg("Could not call accountant") // TODO: handle this better if strings.Contains(err.Error(), "400 Bad Request") { recoveryError := it.initiateRRecovery() if recoveryError != nil { return errors.Wrap(err, "could not recover R") } } it.incrementAccountantFailureCount() if it.getAccountantFailureCount() > it.maxAccountantFailureCount { return errors.Wrap(err, "could not call accountant") } log.Warn().Msg("Ignoring accountant error, we haven't reached the error threshold yet") return nil } it.resetAccountantFailureCount() ap := AccountantPromise{ Promise: promise, R: hex.EncodeToString(it.lastInvoice.r), Revealed: false, AgreementID: it.lastInvoice.invoice.AgreementID, } err = it.accountantPromiseStorage.Store(it.providerID, it.accountantID, ap) if err != nil { return errors.Wrap(err, "could not store accountant promise") } log.Debug().Msg("Accountant promise stored") promise.R = it.lastInvoice.r it.publisher.Publish(AccountantPromiseTopic, AccountantPromiseEventPayload{ Promise: promise, AccountantID: it.accountantID, ProviderID: it.providerID, }) it.resetAccountantFailureCount() case <-time.After(it.exchangeMessageWaitTimeout): return ErrExchangeWaitTimeout case <-it.stop: return nil } return nil } func (it *InvoiceTracker) initiateRRecovery() error { currentAgreement := it.lastInvoice.invoice.AgreementID var minBound uint64 = 1 if currentAgreement > it.maxRRecoveryLength { minBound = currentAgreement - it.maxRRecoveryLength } for i := currentAgreement; i >= minBound; i-- { r, err := it.invoiceStorage.GetR(it.providerID, i) if err != nil { return errors.Wrap(err, "could not get R") } err = it.accountantCaller.RevealR(r, it.providerID.Address, i) if err != nil { log.Warn().Err(err).Msgf("revealing %v", i) } else { log.Info().Msg("r recovered") return nil } } return errors.New("R recovery failed") } // Stop stops the invoice tracker. func (it *InvoiceTracker) Stop() { it.once.Do(func() { log.Debug().Msg("Stopping...") close(it.stop) }) }
1
15,361
Is it ok to use "math/rand" instead of "crypto/rand" here? Do we need to protect this ID somehow?
mysteriumnetwork-node
go
@@ -2,7 +2,7 @@ package nats_dialog import ( "fmt" - "github.com/mgutz/logxi/v1" + log "github.com/cihub/seelog" "github.com/mysterium/node/communication" "github.com/mysterium/node/communication/nats" "github.com/mysterium/node/communication/nats_discovery"
1
package nats_dialog import ( "fmt" "github.com/mgutz/logxi/v1" "github.com/mysterium/node/communication" "github.com/mysterium/node/communication/nats" "github.com/mysterium/node/communication/nats_discovery" "github.com/mysterium/node/identity" dto_discovery "github.com/mysterium/node/service_discovery/dto" ) func NewDialogEstablisher(identity identity.Identity) *dialogEstablisher { return &dialogEstablisher{ myIdentity: identity, contactAddressFactory: func(contact dto_discovery.Contact) (*nats_discovery.NatsAddress, error) { address, err := nats_discovery.NewAddressForContact(contact) if err == nil { err = address.Connect() } return address, err }, } } const establisherLogPrefix = "[NATS.DialogEstablisher] " type dialogEstablisher struct { myIdentity identity.Identity contactAddressFactory func(contact dto_discovery.Contact) (*nats_discovery.NatsAddress, error) } func (establisher *dialogEstablisher) CreateDialog(contact dto_discovery.Contact) (communication.Dialog, error) { log.Info(establisherLogPrefix, fmt.Sprintf("Connecting to: %#v", contact)) contactAddress, err := establisher.contactAddressFactory(contact) if err != nil { return nil, fmt.Errorf("Failed to connect to: %#v. %s", contact, err) } contactSender := nats.NewSender(contactAddress.GetConnection(), contactAddress.GetTopic()) response, err := contactSender.Request(&dialogCreateProducer{ &dialogCreateRequest{ IdentityId: establisher.myIdentity.Address, }, }) if err != nil { return nil, fmt.Errorf("Dialog creation error. %s", err) } if response.(*dialogCreateResponse).Reason != 200 { return nil, fmt.Errorf("Dialog creation rejected. %#v", response) } dialog := establisher.newDialogToContact(contactAddress) log.Info(establisherLogPrefix, fmt.Sprintf("Dialog established with: %#v", contact)) return dialog, err } func (establisher *dialogEstablisher) newDialogToContact(contactAddress *nats_discovery.NatsAddress) *dialog { subTopic := contactAddress.GetTopic() + "." + establisher.myIdentity.Address return &dialog{ Sender: nats.NewSender(contactAddress.GetConnection(), subTopic), Receiver: nats.NewReceiver(contactAddress.GetConnection(), subTopic), } }
1
9,993
Why did we have `logxi` in the first place? Maybe it makes sense to remove it from `glide.lock? It will safe us in the future from using logger which is not working.
mysteriumnetwork-node
go
@@ -269,9 +269,17 @@ public class DataCitation { out.write(title); out.write("},\r\n"); } + if(getUNF() != null){ + out.write("UNF = {"); + out.write(UNF); + out.write("},\r\n"); + } out.write("year = {"); out.write(year); out.write("},\r\n"); + out.write("version = {"); + out.write(version); + out.write("},\r\n"); out.write("doi = {"); out.write(persistentId.getAuthority()); out.write("/");
1
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package edu.harvard.iq.dataverse; import edu.harvard.iq.dataverse.harvest.client.HarvestingClient; import java.io.BufferedWriter; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.ejb.EJBException; import javax.xml.stream.XMLOutputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamWriter; import edu.harvard.iq.dataverse.util.BundleUtil; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; /** * * @author gdurand, qqmyers */ public class DataCitation { private static final Logger logger = Logger.getLogger(DataCitation.class.getCanonicalName()); private List<String> authors = new ArrayList<String>(); private List<String> producers = new ArrayList<String>(); private String title; private String fileTitle = null; private String year; private Date date; private GlobalId persistentId; private String version; private String UNF = null; private String publisher; private boolean direct; private List<String> funders; private String seriesTitle; private String description; private List<String> datesOfCollection; private List<String> keywords; private List<String> kindsOfData; private List<String> languages; private List<String> spatialCoverages; private List<DatasetField> optionalValues = new ArrayList<>(); private int optionalURLcount = 0; public DataCitation(DatasetVersion dsv) { this(dsv, false); } public DataCitation(DatasetVersion dsv, boolean direct) { this.direct = direct; getCommonValuesFrom(dsv); // The Global Identifier: // It is always part of the citation for the local datasets; // And for *some* harvested datasets. persistentId = getPIDFrom(dsv, dsv.getDataset()); // UNF UNF = dsv.getUNF(); // optional values for (DatasetFieldType dsfType : dsv.getDataset().getOwner().getCitationDatasetFieldTypes()) { DatasetField dsf = dsv.getDatasetField(dsfType); if (dsf != null) { optionalValues.add(dsf); if (dsf.getDatasetFieldType().getFieldType().equals(DatasetFieldType.FieldType.URL)) { optionalURLcount++; } } } } public DataCitation(FileMetadata fm) { this(fm, false); } public DataCitation(FileMetadata fm, boolean direct) { this.direct = direct; DatasetVersion dsv = fm.getDatasetVersion(); getCommonValuesFrom(dsv); // file Title for direct File citation fileTitle = fm.getLabel(); DataFile df = fm.getDataFile(); // File description description = fm.getDescription(); // The Global Identifier of the Datafile (if published and isDirect==true) or Dataset as appropriate persistentId = getPIDFrom(dsv, df); // UNF if (df.isTabularData() && df.getUnf() != null && !df.getUnf().isEmpty()) { UNF = df.getUnf(); } } private void getCommonValuesFrom(DatasetVersion dsv) { getAuthorsAndProducersFrom(dsv); funders = dsv.getUniqueGrantAgencyValues(); kindsOfData = dsv.getKindOfData(); // publication year date = getDateFrom(dsv); year = new SimpleDateFormat("yyyy").format(date); datesOfCollection = dsv.getDatesOfCollection(); title = dsv.getTitle(); seriesTitle = dsv.getSeriesTitle(); keywords = dsv.getKeywords(); languages = dsv.getLanguages(); spatialCoverages = dsv.getSpatialCoverages(); publisher = getPublisherFrom(dsv); version = getVersionFrom(dsv); } public String getAuthorsString() { return String.join("; ", authors); } public String getTitle() { return title; } public String getFileTitle() { return fileTitle; } public boolean isDirect() { return direct; } public String getYear() { return year; } public GlobalId getPersistentId() { return persistentId; } public String getVersion() { return version; } public String getUNF() { return UNF; } public String getPublisher() { return publisher; } @Override public String toString() { return toString(false); } public String toString(boolean html) { // first add comma separated parts String separator = ", "; List<String> citationList = new ArrayList<>(); citationList.add(formatString(getAuthorsString(), html)); citationList.add(year); if ((fileTitle != null) && isDirect()) { citationList.add(formatString(fileTitle, html, "\"")); citationList.add(formatString(title, html, "<i>", "</i>")); } else { citationList.add(formatString(title, html, "\"")); } if (persistentId != null) { // always show url format citationList.add(formatURL(persistentId.toURL().toString(), persistentId.toURL().toString(), html)); } citationList.add(formatString(publisher, html)); citationList.add(version); StringBuilder citation = new StringBuilder(citationList.stream().filter(value -> !StringUtils.isEmpty(value)) .collect(Collectors.joining(separator))); if ((fileTitle != null) && !isDirect()) { citation.append("; " + formatString(fileTitle, html, "") + " [fileName]"); } // append UNF if (!StringUtils.isEmpty(UNF)) { citation.append(separator).append(UNF).append(" [fileUNF]"); } for (DatasetField dsf : optionalValues) { String displayName = dsf.getDatasetFieldType().getDisplayName(); String displayValue; if (dsf.getDatasetFieldType().getFieldType().equals(DatasetFieldType.FieldType.URL)) { displayValue = formatURL(dsf.getDisplayValue(), dsf.getDisplayValue(), html); if (optionalURLcount == 1) { displayName = "URL"; } } else { displayValue = formatString(dsf.getDisplayValue(), html); } citation.append(" [").append(displayName).append(": ").append(displayValue).append("]"); } return citation.toString(); } public String toBibtexString() { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); try { writeAsBibtexCitation(buffer); } catch (IOException e) { e.printStackTrace(); } //Use UTF-8? return buffer.toString(); } public void writeAsBibtexCitation(OutputStream os) throws IOException { // Use UTF-8 Writer out = new BufferedWriter(new OutputStreamWriter(os, "utf-8")); if(getFileTitle() !=null && isDirect()) { out.write("@incollection{"); } else { out.write("@data{"); } out.write(persistentId.getIdentifier() + "_" + year + "," + "\r\n"); out.write("author = {"); out.write(String.join(" and ", authors)); out.write("},\r\n"); out.write("publisher = {"); out.write(publisher); out.write("},\r\n"); if(getFileTitle() !=null && isDirect()) { out.write("title = {"); out.write(fileTitle); out.write("},\r\n"); out.write("booktitle = {"); out.write(title); out.write("},\r\n"); } else { out.write("title = {"); out.write(title); out.write("},\r\n"); } out.write("year = {"); out.write(year); out.write("},\r\n"); out.write("doi = {"); out.write(persistentId.getAuthority()); out.write("/"); out.write(persistentId.getIdentifier()); out.write("},\r\n"); out.write("url = {"); out.write(persistentId.toURL().toString()); out.write("}\r\n"); out.write("}\r\n"); out.flush(); } public String toRISString() { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); try { writeAsRISCitation(buffer); } catch (IOException e) { e.printStackTrace(); } //Use UTF-8? return buffer.toString(); } public void writeAsRISCitation(OutputStream os) throws IOException { // Use UTF-8 Writer out = new BufferedWriter(new OutputStreamWriter(os, "utf-8")); out.write("Provider: " + publisher + "\r\n"); out.write("Content: text/plain; charset=\"utf-8\"" + "\r\n"); // Using type "DATA" - see https://github.com/IQSS/dataverse/issues/4816 if ((getFileTitle()!=null)&&isDirect()) { out.write("TY - DATA" + "\r\n"); out.write("T1 - " + getFileTitle() + "\r\n"); out.write("T2 - " + getTitle() + "\r\n"); } else { out.write("TY - DATA" + "\r\n"); out.write("T1 - " + getTitle() + "\r\n"); } if (seriesTitle != null) { out.write("T3 - " + seriesTitle + "\r\n"); } if(description!=null) { out.write("AB - " + flattenHtml(description) + "\r\n"); } for (String author : authors) { out.write("AU - " + author + "\r\n"); } if (!producers.isEmpty()) { for (String author : producers) { out.write("A2 - " + author + "\r\n"); } } if (!funders.isEmpty()) { for (String author : funders) { out.write("A4 - " + author + "\r\n"); } } if (!kindsOfData.isEmpty()) { for (String kod : kindsOfData) { out.write("C3 - " + kod + "\r\n"); } } if (!datesOfCollection.isEmpty()) { for (String dateRange : datesOfCollection) { out.write("DA - " + dateRange + "\r\n"); } } if (persistentId != null) { out.write("DO - " + persistentId.toString() + "\r\n"); } out.write("ET - " + version + "\r\n"); if (!keywords.isEmpty()) { for (String keyword : keywords) { out.write("KW - " + keyword + "\r\n"); } } if (!languages.isEmpty()) { for (String lang : languages) { out.write("LA - " + lang + "\r\n"); } } out.write("PY - " + year + "\r\n"); if (!spatialCoverages.isEmpty()) { for (String coverage : spatialCoverages) { out.write("RI - " + coverage + "\r\n"); } } out.write("SE - " + date + "\r\n"); out.write("UR - " + persistentId.toURL().toString() + "\r\n"); out.write("PB - " + publisher + "\r\n"); // a DataFile citation also includes filename und UNF, if applicable: if (getFileTitle() != null) { if(!isDirect()) { out.write("C1 - " + getFileTitle() + "\r\n"); } if (getUNF() != null) { out.write("C2 - " + getUNF() + "\r\n"); } } // closing element: out.write("ER - \r\n"); out.flush(); } private XMLOutputFactory xmlOutputFactory = null; public String toEndNoteString() { ByteArrayOutputStream outStream = new ByteArrayOutputStream(); writeAsEndNoteCitation(outStream); String xml = outStream.toString(); return xml; } public void writeAsEndNoteCitation(OutputStream os) { xmlOutputFactory = javax.xml.stream.XMLOutputFactory.newInstance(); XMLStreamWriter xmlw = null; try { xmlw = xmlOutputFactory.createXMLStreamWriter(os); xmlw.writeStartDocument(); createEndNoteXML(xmlw); xmlw.writeEndDocument(); } catch (XMLStreamException ex) { Logger.getLogger("global").log(Level.SEVERE, null, ex); throw new EJBException("ERROR occurred during creating endnote xml.", ex); } finally { try { if (xmlw != null) { xmlw.close(); } } catch (XMLStreamException ex) { } } } private void createEndNoteXML(XMLStreamWriter xmlw) throws XMLStreamException { xmlw.writeStartElement("xml"); xmlw.writeStartElement("records"); xmlw.writeStartElement("record"); // "Ref-type" indicates which of the (numerous!) available EndNote // schemas this record will be interpreted as. // This is relatively important. Certain fields with generic // names like "custom1" and "custom2" become very specific things // in specific schemas; for example, custom1 shows as "legal notice" // in "Journal Article" (ref-type 84), or as "year published" in // "Government Document". // We don't want the UNF to show as a "legal notice"! // We have found a ref-type that works ok for our purposes - // "Dataset" (type 59). In this one, the fields Custom1 // and Custom2 are not translated and just show as is. // And "Custom1" still beats "legal notice". // -- L.A. 12.12.2014 beta 10 // and see https://github.com/IQSS/dataverse/issues/4816 xmlw.writeStartElement("ref-type"); xmlw.writeAttribute("name", "Dataset"); xmlw.writeCharacters("59"); xmlw.writeEndElement(); // ref-type xmlw.writeStartElement("contributors"); if (!authors.isEmpty()) { xmlw.writeStartElement("authors"); for (String author : authors) { xmlw.writeStartElement("author"); xmlw.writeCharacters(author); xmlw.writeEndElement(); // author } xmlw.writeEndElement(); // authors } if (!producers.isEmpty()) { xmlw.writeStartElement("secondary-authors"); for (String producer : producers) { xmlw.writeStartElement("author"); xmlw.writeCharacters(producer); xmlw.writeEndElement(); // author } xmlw.writeEndElement(); // secondary-authors } if (!funders.isEmpty()) { xmlw.writeStartElement("subsidiary-authors"); for (String funder : funders) { xmlw.writeStartElement("author"); xmlw.writeCharacters(funder); xmlw.writeEndElement(); // author } xmlw.writeEndElement(); // subsidiary-authors } xmlw.writeEndElement(); // contributors xmlw.writeStartElement("titles"); if ((fileTitle != null) && isDirect()) { xmlw.writeStartElement("title"); xmlw.writeCharacters(fileTitle); xmlw.writeEndElement(); // title xmlw.writeStartElement("secondary-title"); xmlw.writeCharacters(title); xmlw.writeEndElement(); // secondary-title } else { xmlw.writeStartElement("title"); xmlw.writeCharacters(title); xmlw.writeEndElement(); // title } if (seriesTitle != null) { xmlw.writeStartElement("tertiary-title"); xmlw.writeCharacters(seriesTitle); xmlw.writeEndElement(); // tertiary-title } xmlw.writeEndElement(); // titles xmlw.writeStartElement("section"); String sectionString; sectionString = new SimpleDateFormat("yyyy-MM-dd").format(date); xmlw.writeCharacters(sectionString); xmlw.writeEndElement(); // section xmlw.writeStartElement("abstract"); if(description!=null) { xmlw.writeCharacters(flattenHtml(description)); } xmlw.writeEndElement(); // abstract xmlw.writeStartElement("dates"); xmlw.writeStartElement("year"); xmlw.writeCharacters(year); xmlw.writeEndElement(); // year if (!datesOfCollection.isEmpty()) { xmlw.writeStartElement("pub-dates"); for (String dateRange : datesOfCollection) { xmlw.writeStartElement("date"); xmlw.writeCharacters(dateRange); xmlw.writeEndElement(); // date } xmlw.writeEndElement(); // pub-dates } xmlw.writeEndElement(); // dates xmlw.writeStartElement("edition"); xmlw.writeCharacters(version); xmlw.writeEndElement(); // edition if (!keywords.isEmpty()) { xmlw.writeStartElement("keywords"); for (String keyword : keywords) { xmlw.writeStartElement("keyword"); xmlw.writeCharacters(keyword); xmlw.writeEndElement(); // keyword } xmlw.writeEndElement(); // keywords } if (!kindsOfData.isEmpty()) { for (String kod : kindsOfData) { xmlw.writeStartElement("custom3"); xmlw.writeCharacters(kod); xmlw.writeEndElement(); // custom3 } } if (!languages.isEmpty()) { for (String lang : languages) { xmlw.writeStartElement("language"); xmlw.writeCharacters(lang); xmlw.writeEndElement(); // language } } xmlw.writeStartElement("publisher"); xmlw.writeCharacters(publisher); xmlw.writeEndElement(); // publisher if (!spatialCoverages.isEmpty()) { for (String coverage : spatialCoverages) { xmlw.writeStartElement("reviewed-item"); xmlw.writeCharacters(coverage); xmlw.writeEndElement(); // reviewed-item } } xmlw.writeStartElement("urls"); xmlw.writeStartElement("related-urls"); xmlw.writeStartElement("url"); xmlw.writeCharacters(getPersistentId().toURL().toString()); xmlw.writeEndElement(); // url xmlw.writeEndElement(); // related-urls xmlw.writeEndElement(); // urls // a DataFile citation also includes the filename and (for Tabular // files) the UNF signature, that we put into the custom1 and custom2 // fields respectively: if (getFileTitle() != null) { xmlw.writeStartElement("custom1"); xmlw.writeCharacters(fileTitle); xmlw.writeEndElement(); // custom1 if (getUNF() != null) { xmlw.writeStartElement("custom2"); xmlw.writeCharacters(getUNF()); xmlw.writeEndElement(); // custom2 } } if (persistentId != null) { xmlw.writeStartElement("electronic-resource-num"); String electResourceNum = persistentId.getProtocol() + "/" + persistentId.getAuthority() + "/" + persistentId.getIdentifier(); xmlw.writeCharacters(electResourceNum); xmlw.writeEndElement(); } //<electronic-resource-num>10.3886/ICPSR03259.v1</electronic-resource-num> xmlw.writeEndElement(); // record xmlw.writeEndElement(); // records xmlw.writeEndElement(); // xml } public Map<String, String> getDataCiteMetadata() { Map<String, String> metadata = new HashMap<>(); String authorString = getAuthorsString(); if (authorString.isEmpty()) { authorString = ":unav"; } String producerString = getPublisher(); if (producerString.isEmpty()) { producerString = ":unav"; } metadata.put("datacite.creator", authorString); metadata.put("datacite.title", getTitle()); metadata.put("datacite.publisher", producerString); metadata.put("datacite.publicationyear", getYear()); return metadata; } // helper methods private String formatString(String value, boolean escapeHtml) { return formatString(value, escapeHtml, ""); } private String formatString(String value, boolean escapeHtml, String wrapperFront) { return formatString(value, escapeHtml, wrapperFront, wrapperFront); } private String formatString(String value, boolean escapeHtml, String wrapperStart, String wrapperEnd) { if (!StringUtils.isEmpty(value)) { return new StringBuilder(wrapperStart).append(escapeHtml ? StringEscapeUtils.escapeHtml(value) : value) .append(wrapperEnd).toString(); } return null; } private String formatURL(String text, String url, boolean html) { if (text == null) { return null; } if (html && url != null) { return "<a href=\"" + url + "\" target=\"_blank\">" + StringEscapeUtils.escapeHtml(text) + "</a>"; } else { return text; } } /** This method flattens html for the textual export formats. * It removes <b> and <i> tags, replaces <br>, <p> and headers <hX> with * line breaks, converts lists to form where items start with an indented '* ', * and converts links to simple text showing the label and, if different, * the url in parenthesis after it. Since these operations may create * multiple line breaks, a final step limits the changes and compacts multiple * line breaks into one. * * @param html input string * @return the flattened text output */ private String flattenHtml(String html) { html = html.replaceAll("<[pP]>", "\r\n"); html = html.replaceAll("<\\/[pP]>", "\r\n"); html = html.replaceAll("<[hH]\\d>", "\r\n"); html = html.replaceAll("<\\/[hH]\\d>", "\r\n"); html = html.replaceAll("<[\\/]?[bB]>", ""); html = html.replaceAll("<[\\/]?[iI]>", "\r\n"); html = html.replaceAll("<[bB][rR][\\/]?>", "\r\n"); html = html.replaceAll("<[uU][lL]>", "\r\n"); html = html.replaceAll("<\\/[uU][lL]>", "\r\n"); html = html.replaceAll("<[lL][iI]>", "\t* "); html = html.replaceAll("<\\/[lL][iI]>", "\r\n"); Pattern p = Pattern.compile("<a\\W+href=\\\"(.*?)\\\".*?>(.*?)<\\/a>"); Matcher m = p.matcher(html); String url = null; String label = null; while(m.find()) { url = m.group(1); // this variable should contain the link URL label = m.group(2); // this variable should contain the label //display either the label or label(url) if(!url.equals(label)) { label = label + "(" + url +")"; } html = html.replaceFirst("<a\\W+href=\\\"(.*?)\\\".*?>(.*?)<\\/a>", label); } //Note, this does not affect single '\n' chars originally in the text html=html.replaceAll("(\\r\\n?)+", "\r\n"); return html; } private Date getDateFrom(DatasetVersion dsv) { Date citationDate = null; SimpleDateFormat sdf = new SimpleDateFormat("yyyy"); if (!dsv.getDataset().isHarvested()) { citationDate = dsv.getCitationDate(); if (citationDate == null) { if (dsv.getDataset().getPublicationDate() != null) { citationDate = dsv.getDataset().getPublicationDate(); } else { // for drafts citationDate = dsv.getLastUpdateTime(); } } } else { try { citationDate= sdf.parse(dsv.getDistributionDate()); } catch (ParseException ex) { // ignore } catch (Exception ex) { // ignore } } if (citationDate == null) { //As a last resort, pick the current date logger.warning("Unable to find citation date for datasetversion: " + dsv.getId()); citationDate = new Date(); } return citationDate; } private void getAuthorsAndProducersFrom(DatasetVersion dsv) { dsv.getDatasetAuthors().stream().forEach((author) -> { if (!author.isEmpty()) { String an = author.getName().getDisplayValue().trim(); authors.add(an); } }); producers = dsv.getDatasetProducerNames(); } private String getPublisherFrom(DatasetVersion dsv) { if (!dsv.getDataset().isHarvested()) { return dsv.getRootDataverseNameforCitation(); } else { return dsv.getDistributorName(); // remove += [distributor] SEK 8-18-2016 } } private String getVersionFrom(DatasetVersion dsv) { String version = ""; if (!dsv.getDataset().isHarvested()) { if (dsv.isDraft()) { version = BundleUtil.getStringFromBundle("draftversion"); } else if (dsv.getVersionNumber() != null) { version = "V" + dsv.getVersionNumber(); if (dsv.isDeaccessioned()) { version += ", "+ BundleUtil.getStringFromBundle("deaccessionedversion"); } } } return version; } private GlobalId getPIDFrom(DatasetVersion dsv, DvObject dv) { if (!dsv.getDataset().isHarvested() || HarvestingClient.HARVEST_STYLE_VDC.equals(dsv.getDataset().getHarvestedFrom().getHarvestStyle()) || HarvestingClient.HARVEST_STYLE_ICPSR.equals(dsv.getDataset().getHarvestedFrom().getHarvestStyle()) || HarvestingClient.HARVEST_STYLE_DATAVERSE .equals(dsv.getDataset().getHarvestedFrom().getHarvestStyle())) { // creating a global id like this: // persistentId = new GlobalId(dv.getGlobalId()); // you end up doing new GlobalId((New GlobalId(dv)).toString()) // - doing an extra formatting-and-parsing-again // This achieves the same thing: if(!isDirect()) { if (!StringUtils.isEmpty(dsv.getDataset().getIdentifier())) { return new GlobalId(dsv.getDataset()); } } else { if (!StringUtils.isEmpty(dv.getIdentifier())) { return new GlobalId(dv); } } } return null; } }
1
39,153
Is there a reason to use `getUNF()` and `UNF` in the same block?
IQSS-dataverse
java
@@ -198,7 +198,7 @@ func TestParse(t *testing.T) { }, { name: "regex match operators", - raw: `"a" =~ /.*/ and "b" !~ /c/`, + raw: `"a" =~ /.*/ and "b" !~ /c$/`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{
1
package parser_test import ( "regexp" "testing" "github.com/google/go-cmp/cmp" "github.com/influxdata/flux/ast" "github.com/influxdata/flux/ast/asttest" "github.com/influxdata/flux/parser" ) func TestParse(t *testing.T) { tests := []struct { name string raw string want *ast.Program wantErr bool }{ { name: "optional query metadata", raw: `option task = { name: "foo", every: 1h, delay: 10m, cron: "0 2 * * *", retry: 5, }`, want: &ast.Program{ Body: []ast.Statement{ &ast.OptionStatement{ Declaration: &ast.VariableDeclarator{ ID: &ast.Identifier{Name: "task"}, Init: &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "name"}, Value: &ast.StringLiteral{Value: "foo"}, }, { Key: &ast.Identifier{Name: "every"}, Value: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, { Key: &ast.Identifier{Name: "delay"}, Value: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 10, Unit: "m", }, }, }, }, { Key: &ast.Identifier{Name: "cron"}, Value: &ast.StringLiteral{Value: "0 2 * * *"}, }, { Key: &ast.Identifier{Name: "retry"}, Value: &ast.IntegerLiteral{Value: 5}, }, }, }, }, }, }, }, }, { name: "optional query metadata preceding query text", raw: `option task = { name: "foo", // Name of task every: 1h, // Execution frequency of task } // Task will execute the following query from() |> count()`, want: &ast.Program{ Body: []ast.Statement{ &ast.OptionStatement{ Declaration: &ast.VariableDeclarator{ ID: &ast.Identifier{Name: "task"}, Init: &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "name"}, Value: &ast.StringLiteral{Value: "foo"}, }, { Key: &ast.Identifier{Name: "every"}, Value: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, }, }, }, }, &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: nil, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "count"}, Arguments: nil, }, }, }, }, }, }, { name: "from", raw: `from()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, }, }, }, }, }, { name: "comment", raw: `// Comment from()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, }, }, }, }, }, { name: "identifier with number", raw: `tan2()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "tan2", }, }, }, }, }, }, { name: "regex literal", raw: `/.*/`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.RegexpLiteral{ Value: regexp.MustCompile(".*"), }, }, }, }, }, { name: "regex literal with escape sequence", raw: `/a\/b\\c\d/`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.RegexpLiteral{ Value: regexp.MustCompile(`a/b\\c\d`), }, }, }, }, }, { name: "regex match operators", raw: `"a" =~ /.*/ and "b" !~ /c/`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.LogicalExpression{ Operator: ast.AndOperator, Left: &ast.BinaryExpression{ Operator: ast.RegexpMatchOperator, Left: &ast.StringLiteral{Value: "a"}, Right: &ast.RegexpLiteral{Value: regexp.MustCompile(".*")}, }, Right: &ast.BinaryExpression{ Operator: ast.NotRegexpMatchOperator, Left: &ast.StringLiteral{Value: "b"}, Right: &ast.RegexpLiteral{Value: regexp.MustCompile("c")}, }, }, }, }, }, }, { name: "declare variable as an int", raw: `howdy = 1`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "howdy"}, Init: &ast.IntegerLiteral{Value: 1}, }}, }, }, }, }, { name: "declare variable as a float", raw: `howdy = 1.1`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "howdy"}, Init: &ast.FloatLiteral{Value: 1.1}, }}, }, }, }, }, { name: "declare variable as an array", raw: `howdy = [1, 2, 3, 4]`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "howdy"}, Init: &ast.ArrayExpression{ Elements: []ast.Expression{ &ast.IntegerLiteral{Value: 1}, &ast.IntegerLiteral{Value: 2}, &ast.IntegerLiteral{Value: 3}, &ast.IntegerLiteral{Value: 4}, }, }, }}, }, }, }, }, { name: "use variable to declare something", raw: `howdy = 1 from()`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "howdy"}, Init: &ast.IntegerLiteral{Value: 1}, }}, }, &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, }, }, }, }, }, { name: "variable is from statement", raw: `howdy = from() howdy.count()`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "howdy", }, Init: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.MemberExpression{ Object: &ast.Identifier{ Name: "howdy", }, Property: &ast.Identifier{ Name: "count", }, }, }, }, }, }, }, { name: "pipe expression", raw: `from() |> count()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: nil, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "count"}, Arguments: nil, }, }, }, }, }, }, { name: "literal pipe expression", raw: `5 |> pow2()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.IntegerLiteral{Value: 5}, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "pow2"}, Arguments: nil, }, }, }, }, }, }, { name: "member expression pipe expression", raw: `foo.bar |> baz()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.MemberExpression{ Object: &ast.Identifier{Name: "foo"}, Property: &ast.Identifier{Name: "bar"}, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "baz"}, Arguments: nil, }, }, }, }, }, }, { name: "multiple pipe expressions", raw: `from() |> range() |> filter() |> count()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "filter"}, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "count"}, }, }, }, }, }, }, { name: "two variables for two froms", raw: `howdy = from() doody = from() howdy|>count() doody|>sum()`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "howdy", }, Init: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, }, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "doody", }, Init: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.Identifier{Name: "howdy"}, Call: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "count", }, }, }, }, &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.Identifier{Name: "doody"}, Call: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "sum", }, }, }, }, }, }, }, { name: "from with database", raw: `from(bucket:"telegraf/autogen")`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{ Name: "bucket", }, Value: &ast.StringLiteral{ Value: "telegraf/autogen", }, }, }, }, }, }, }, }, }, }, { name: "map member expressions", raw: `m = {key1: 1, key2:"value2"} m.key1 m["key2"] `, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "m", }, Init: &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "key1"}, Value: &ast.IntegerLiteral{Value: 1}, }, { Key: &ast.Identifier{Name: "key2"}, Value: &ast.StringLiteral{Value: "value2"}, }, }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.MemberExpression{ Object: &ast.Identifier{Name: "m"}, Property: &ast.Identifier{Name: "key1"}, }, }, &ast.ExpressionStatement{ Expression: &ast.MemberExpression{ Object: &ast.Identifier{Name: "m"}, Property: &ast.StringLiteral{Value: "key2"}, }, }, }, }, }, { name: "var as binary expression of other vars", raw: `a = 1 b = 2 c = a + b d = a`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.IntegerLiteral{Value: 1}, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "b", }, Init: &ast.IntegerLiteral{Value: 2}, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "c", }, Init: &ast.BinaryExpression{ Operator: ast.AdditionOperator, Left: &ast.Identifier{Name: "a"}, Right: &ast.Identifier{Name: "b"}, }, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "d", }, Init: &ast.Identifier{Name: "a"}, }}, }, }, }, }, { name: "var as unary expression of other vars", raw: `a = 5 c = -a`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.IntegerLiteral{Value: 5}, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "c", }, Init: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.Identifier{Name: "a"}, }, }}, }, }, }, }, { name: "var as both binary and unary expressions", raw: `a = 5 c = 10 * -a`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.IntegerLiteral{Value: 5}, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "c", }, Init: &ast.BinaryExpression{ Operator: ast.MultiplicationOperator, Left: &ast.IntegerLiteral{Value: 10}, Right: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.Identifier{Name: "a"}, }, }, }}, }, }, }, }, { name: "unary expressions within logical expression", raw: `a = 5.0 10.0 * -a == -0.5 or a == 6.0`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.FloatLiteral{Value: 5}, }}, }, &ast.ExpressionStatement{ Expression: &ast.LogicalExpression{ Operator: ast.OrOperator, Left: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.BinaryExpression{ Operator: ast.MultiplicationOperator, Left: &ast.FloatLiteral{Value: 10}, Right: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.Identifier{Name: "a"}, }, }, Right: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.FloatLiteral{Value: 0.5}, }, }, Right: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.Identifier{Name: "a"}, Right: &ast.FloatLiteral{Value: 6}, }, }, }, }, }, }, { name: "unary expressions with too many comments", raw: `// define a a = 5.0 // eval this 10.0 * -a == -0.5 // or this or a == 6.0`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.FloatLiteral{Value: 5}, }}, }, &ast.ExpressionStatement{ Expression: &ast.LogicalExpression{ Operator: ast.OrOperator, Left: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.BinaryExpression{ Operator: ast.MultiplicationOperator, Left: &ast.FloatLiteral{Value: 10}, Right: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.Identifier{Name: "a"}, }, }, Right: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.FloatLiteral{Value: 0.5}, }, }, Right: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.Identifier{Name: "a"}, Right: &ast.FloatLiteral{Value: 6}, }, }, }, }, }, }, { name: "expressions with function calls", raw: `a = foo() == 10`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.CallExpression{ Callee: &ast.Identifier{Name: "foo"}, }, Right: &ast.IntegerLiteral{Value: 10}, }, }}, }, }, }, }, { name: "mix unary logical and binary expressions", raw: ` not (f() == 6.0 * x) or fail()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.LogicalExpression{ Operator: ast.OrOperator, Left: &ast.UnaryExpression{ Operator: ast.NotOperator, Argument: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.CallExpression{ Callee: &ast.Identifier{Name: "f"}, }, Right: &ast.BinaryExpression{ Operator: ast.MultiplicationOperator, Left: &ast.FloatLiteral{Value: 6}, Right: &ast.Identifier{Name: "x"}, }, }, }, Right: &ast.CallExpression{ Callee: &ast.Identifier{Name: "fail"}, }, }, }, }, }, }, { name: "mix unary logical and binary expressions with extra parens", raw: ` (not (f() == 6.0 * x) or fail())`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.LogicalExpression{ Operator: ast.OrOperator, Left: &ast.UnaryExpression{ Operator: ast.NotOperator, Argument: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.CallExpression{ Callee: &ast.Identifier{Name: "f"}, }, Right: &ast.BinaryExpression{ Operator: ast.MultiplicationOperator, Left: &ast.FloatLiteral{Value: 6}, Right: &ast.Identifier{Name: "x"}, }, }, }, Right: &ast.CallExpression{ Callee: &ast.Identifier{Name: "fail"}, }, }, }, }, }, }, { name: "arrow function called", raw: `plusOne = (r) => r + 1 plusOne(r:5) `, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "plusOne", }, Init: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.BinaryExpression{ Operator: ast.AdditionOperator, Left: &ast.Identifier{Name: "r"}, Right: &ast.IntegerLiteral{Value: 1}, }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{Name: "plusOne"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{ Name: "r", }, Value: &ast.IntegerLiteral{ Value: 5, }, }, }, }, }, }, }, }, }, }, { name: "arrow function return map", raw: `toMap = (r) =>({r:r})`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "toMap", }, Init: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.ObjectExpression{ Properties: []*ast.Property{{ Key: &ast.Identifier{Name: "r"}, Value: &ast.Identifier{Name: "r"}, }}, }, }, }}, }, }, }, }, { name: "arrow function with default arg", raw: `addN = (r, n=5) => r + n`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "addN", }, Init: &ast.ArrowFunctionExpression{ Params: []*ast.Property{ {Key: &ast.Identifier{Name: "r"}}, {Key: &ast.Identifier{Name: "n"}, Value: &ast.IntegerLiteral{Value: 5}}, }, Body: &ast.BinaryExpression{ Operator: ast.AdditionOperator, Left: &ast.Identifier{Name: "r"}, Right: &ast.Identifier{Name: "n"}, }, }, }}, }, }, }, }, { name: "arrow function called in binary expression", raw: ` plusOne = (r) => r + 1 plusOne(r:5) == 6 or die() `, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "plusOne", }, Init: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.BinaryExpression{ Operator: ast.AdditionOperator, Left: &ast.Identifier{Name: "r"}, Right: &ast.IntegerLiteral{Value: 1}, }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.LogicalExpression{ Operator: ast.OrOperator, Left: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.CallExpression{ Callee: &ast.Identifier{Name: "plusOne"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{ Name: "r", }, Value: &ast.IntegerLiteral{ Value: 5, }, }, }, }, }, }, Right: &ast.IntegerLiteral{Value: 6}, }, Right: &ast.CallExpression{ Callee: &ast.Identifier{Name: "die"}, }, }, }, }, }, }, { name: "arrow function as single expression", raw: `f = (r) => r["_measurement"] == "cpu"`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "f", }, Init: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "_measurement"}, }, Right: &ast.StringLiteral{Value: "cpu"}, }, }, }}, }, }, }, }, { name: "arrow function as block", raw: `f = (r) => { m = r["_measurement"] return m == "cpu" }`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "f", }, Init: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.BlockStatement{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "m", }, Init: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "_measurement"}, }, }}, }, &ast.ReturnStatement{ Argument: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.Identifier{Name: "m"}, Right: &ast.StringLiteral{Value: "cpu"}, }, }, }, }, }, }}, }, }, }, }, { name: "from with filter with no parens", raw: `from(bucket:"telegraf/autogen").filter(fn: (r) => r["other"]=="mem" and r["this"]=="that" or r["these"]!="those")`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.MemberExpression{ Property: &ast.Identifier{Name: "filter"}, Object: &ast.CallExpression{ Callee: &ast.Identifier{ Name: "from", }, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "telegraf/autogen"}, }, }, }, }, }, }, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "fn"}, Value: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.LogicalExpression{ Operator: ast.OrOperator, Left: &ast.LogicalExpression{ Operator: ast.AndOperator, Left: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "other"}, }, Right: &ast.StringLiteral{Value: "mem"}, }, Right: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "this"}, }, Right: &ast.StringLiteral{Value: "that"}, }, }, Right: &ast.BinaryExpression{ Operator: ast.NotEqualOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "these"}, }, Right: &ast.StringLiteral{Value: "those"}, }, }, }, }, }, }, }, }, }, }, }, }, { name: "from with range", raw: `from(bucket:"telegraf/autogen")|>range(start:-1h, end:10m)`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "telegraf/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, }, { Key: &ast.Identifier{Name: "end"}, Value: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 10, Unit: "m", }, }, }, }, }, }, }, }, }, }, }, }, }, { name: "from with limit", raw: `from(bucket:"telegraf/autogen")|>limit(limit:100, offset:10)`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "telegraf/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "limit"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "limit"}, Value: &ast.IntegerLiteral{Value: 100}, }, { Key: &ast.Identifier{Name: "offset"}, Value: &ast.IntegerLiteral{Value: 10}, }, }, }, }, }, }, }, }, }, }, { name: "from with range and count", raw: `from(bucket:"mydb/autogen") |> range(start:-4h, stop:-2h) |> count()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "mydb/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 4, Unit: "h", }, }, }, }, }, { Key: &ast.Identifier{Name: "stop"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 2, Unit: "h", }, }, }, }, }, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "count"}, }, }, }, }, }, }, { name: "from with range, limit and count", raw: `from(bucket:"mydb/autogen") |> range(start:-4h, stop:-2h) |> limit(n:10) |> count()`, want: &ast.Program{ Body: []ast.Statement{ &ast.ExpressionStatement{ Expression: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "mydb/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 4, Unit: "h", }, }, }, }, }, { Key: &ast.Identifier{Name: "stop"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 2, Unit: "h", }, }, }, }, }, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "limit"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{{ Key: &ast.Identifier{Name: "n"}, Value: &ast.IntegerLiteral{Value: 10}, }}, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "count"}, }, }, }, }, }, }, { name: "from with join", raw: ` a = from(bucket:"dbA/autogen") |> range(start:-1h) b = from(bucket:"dbB/autogen") |> range(start:-1h) join(tables:[a,b], on:["host"], fn: (a,b) => a["_field"] + b["_field"])`, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "dbA/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, }, }, }, }, }, }, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "b", }, Init: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "dbB/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, }, }, }, }, }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{Name: "join"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "tables"}, Value: &ast.ArrayExpression{ Elements: []ast.Expression{ &ast.Identifier{Name: "a"}, &ast.Identifier{Name: "b"}, }, }, }, { Key: &ast.Identifier{Name: "on"}, Value: &ast.ArrayExpression{ Elements: []ast.Expression{&ast.StringLiteral{Value: "host"}}, }, }, { Key: &ast.Identifier{Name: "fn"}, Value: &ast.ArrowFunctionExpression{ Params: []*ast.Property{ {Key: &ast.Identifier{Name: "a"}}, {Key: &ast.Identifier{Name: "b"}}, }, Body: &ast.BinaryExpression{ Operator: ast.AdditionOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "a"}, Property: &ast.StringLiteral{Value: "_field"}, }, Right: &ast.MemberExpression{ Object: &ast.Identifier{Name: "b"}, Property: &ast.StringLiteral{Value: "_field"}, }, }, }, }, }, }, }, }, }, }, }, }, { name: "from with join with complex expression", raw: ` a = from(bucket:"Flux/autogen") |> filter(fn: (r) => r["_measurement"] == "a") |> range(start:-1h) b = from(bucket:"Flux/autogen") |> filter(fn: (r) => r["_measurement"] == "b") |> range(start:-1h) join(tables:[a,b], on:["t1"], fn: (a,b) => (a["_field"] - b["_field"]) / b["_field"]) `, want: &ast.Program{ Body: []ast.Statement{ &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "a", }, Init: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "Flux/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "filter"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "fn"}, Value: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "_measurement"}, }, Right: &ast.StringLiteral{Value: "a"}, }, }, }, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, }, }, }, }, }, }, }}, }, &ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{ Name: "b", }, Init: &ast.PipeExpression{ Argument: &ast.PipeExpression{ Argument: &ast.CallExpression{ Callee: &ast.Identifier{Name: "from"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "bucket"}, Value: &ast.StringLiteral{Value: "Flux/autogen"}, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "filter"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "fn"}, Value: &ast.ArrowFunctionExpression{ Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, Body: &ast.BinaryExpression{ Operator: ast.EqualOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "r"}, Property: &ast.StringLiteral{Value: "_measurement"}, }, Right: &ast.StringLiteral{Value: "b"}, }, }, }, }, }, }, }, }, Call: &ast.CallExpression{ Callee: &ast.Identifier{Name: "range"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "start"}, Value: &ast.UnaryExpression{ Operator: ast.SubtractionOperator, Argument: &ast.DurationLiteral{ Values: []ast.Duration{ { Magnitude: 1, Unit: "h", }, }, }, }, }, }, }, }, }, }, }}, }, &ast.ExpressionStatement{ Expression: &ast.CallExpression{ Callee: &ast.Identifier{Name: "join"}, Arguments: []ast.Expression{ &ast.ObjectExpression{ Properties: []*ast.Property{ { Key: &ast.Identifier{Name: "tables"}, Value: &ast.ArrayExpression{ Elements: []ast.Expression{ &ast.Identifier{Name: "a"}, &ast.Identifier{Name: "b"}, }, }, }, { Key: &ast.Identifier{Name: "on"}, Value: &ast.ArrayExpression{ Elements: []ast.Expression{ &ast.StringLiteral{ Value: "t1", }, }, }, }, { Key: &ast.Identifier{Name: "fn"}, Value: &ast.ArrowFunctionExpression{ Params: []*ast.Property{ {Key: &ast.Identifier{Name: "a"}}, {Key: &ast.Identifier{Name: "b"}}, }, Body: &ast.BinaryExpression{ Operator: ast.DivisionOperator, Left: &ast.BinaryExpression{ Operator: ast.SubtractionOperator, Left: &ast.MemberExpression{ Object: &ast.Identifier{Name: "a"}, Property: &ast.StringLiteral{Value: "_field"}, }, Right: &ast.MemberExpression{ Object: &ast.Identifier{Name: "b"}, Property: &ast.StringLiteral{Value: "_field"}, }, }, Right: &ast.MemberExpression{ Object: &ast.Identifier{Name: "b"}, Property: &ast.StringLiteral{Value: "_field"}, }, }, }, }, }, }, }, }, }, }, }, }, { name: "duration literal, all units", raw: `dur = 1y3mo2w1d4h1m30s1ms2µs70ns`, want: &ast.Program{ Body: []ast.Statement{&ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "dur"}, Init: &ast.DurationLiteral{ Values: []ast.Duration{ {Magnitude: 1, Unit: "y"}, {Magnitude: 3, Unit: "mo"}, {Magnitude: 2, Unit: "w"}, {Magnitude: 1, Unit: "d"}, {Magnitude: 4, Unit: "h"}, {Magnitude: 1, Unit: "m"}, {Magnitude: 30, Unit: "s"}, {Magnitude: 1, Unit: "ms"}, {Magnitude: 2, Unit: "us"}, {Magnitude: 70, Unit: "ns"}, }, }, }}, }}, }, }, { name: "duration literal, months", raw: `dur = 6mo`, want: &ast.Program{ Body: []ast.Statement{&ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "dur"}, Init: &ast.DurationLiteral{ Values: []ast.Duration{ {Magnitude: 6, Unit: "mo"}, }, }, }}, }}, }, }, { name: "duration literal, milliseconds", raw: `dur = 500ms`, want: &ast.Program{ Body: []ast.Statement{&ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "dur"}, Init: &ast.DurationLiteral{ Values: []ast.Duration{ {Magnitude: 500, Unit: "ms"}, }, }, }}, }}, }, }, { name: "duration literal, months, minutes, milliseconds", raw: `dur = 6mo30m500ms`, want: &ast.Program{ Body: []ast.Statement{&ast.VariableDeclaration{ Declarations: []*ast.VariableDeclarator{{ ID: &ast.Identifier{Name: "dur"}, Init: &ast.DurationLiteral{ Values: []ast.Duration{ {Magnitude: 6, Unit: "mo"}, {Magnitude: 30, Unit: "m"}, {Magnitude: 500, Unit: "ms"}, }, }, }}, }}, }, }, { name: "parse error extra gibberish", raw: `from(bucket:"Flux/autogen") &^*&H#IUJBN`, wantErr: true, }, { name: "parse error extra gibberish and valid content", raw: `from(bucket:"Flux/autogen") &^*&H#IUJBN from(bucket:"other/autogen")`, wantErr: true, }, { name: "parse error from duration literal with repeated units", raw: `from(bucket:"my_bucket") |> range(start: -1d3h2h1m)`, wantErr: true, }, { name: "parser error from duration literal with smaller unit before larger one", raw: `from(bucket:"my_bucket") |> range(start: -1s5m)`, wantErr: true, }, { name: "parser error from duration literal with invalid unit", raw: `from(bucket:"my_bucket") |> range(start: -1s5v)`, wantErr: true, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() // Set the env var`GO_TAGS=parser_debug` in order // to turn on parser debugging as it is turned off by default. got, err := parser.NewAST(tt.raw) if (err != nil) != tt.wantErr { t.Errorf("parser.NewAST() error = %v, wantErr %v", err, tt.wantErr) return } if tt.wantErr { return } if !cmp.Equal(tt.want, got, asttest.CompareOptions...) { t.Errorf("parser.NewAST() = -want/+got %s", cmp.Diff(tt.want, got, asttest.CompareOptions...)) } }) } } var benchmarkQuery = []byte(` start = -10s do = (cpu) => from(bucket:"telegraf/autogen") .filter(fn: (r) => r["_measurement"] == "cpu" and r["cpu"] == cpu) .range(start:start) cpu0 = do(cpu:"cpu0") cpu1 = do(cpu:"cpu1") join( tables:[cpu0, cpu1], on:["_measurement","_field","host"], fn: (a,b) => a["_value"] - b["_value"], ) `) var benchmarkProgram interface{} func BenchmarkParse(b *testing.B) { b.ReportAllocs() var err error for n := 0; n < b.N; n++ { benchmarkProgram, err = parser.Parse("", benchmarkQuery) if err != nil { b.Fatal(err) } } }
1
8,793
Again this seems odd that megacheck cares
influxdata-flux
go
@@ -49,6 +49,9 @@ module Bolt @sudo_password = transport_conf[:sudo_password] @run_as = transport_conf[:run_as] @tmpdir = transport_conf[:tmpdir] + @service_url = transport_conf[:service_url] + @token_file = transport_conf[:token_file] + @environment = transport_conf[:environment] @logger = init_logger(config[:log_destination], config[:log_level]) @transport_logger = init_logger(config[:log_destination], Logger::WARN)
1
require 'logger' require 'bolt/node_uri' require 'bolt/formatter' require 'bolt/result' require 'bolt/config' module Bolt class Node STDIN_METHODS = %w[both stdin].freeze ENVIRONMENT_METHODS = %w[both environment].freeze def self.from_uri(uri_string, **kwargs) uri = NodeURI.new(uri_string, kwargs[:config][:transport]) klass = case uri.scheme when 'winrm' Bolt::WinRM when 'pcp' Bolt::Orch else Bolt::SSH end klass.new(uri.hostname, uri.port, uri.user, uri.password, uri: uri_string, **kwargs) end def self.initialize_transport(_logger); end attr_reader :logger, :host, :port, :uri, :user, :password, :connect_timeout def initialize(host, port = nil, user = nil, password = nil, uri: nil, config: Bolt::Config.new) @host = host @port = port @uri = uri transport_conf = config[:transports][protocol.to_sym] @user = user || transport_conf[:user] @password = password || transport_conf[:password] @key = transport_conf[:key] @ca_cert = transport_conf[:ca_cert] @tty = transport_conf[:tty] @insecure = transport_conf[:insecure] @connect_timeout = transport_conf[:connect_timeout] @sudo = transport_conf[:sudo] @sudo_password = transport_conf[:sudo_password] @run_as = transport_conf[:run_as] @tmpdir = transport_conf[:tmpdir] @logger = init_logger(config[:log_destination], config[:log_level]) @transport_logger = init_logger(config[:log_destination], Logger::WARN) end def init_logger(destination, level) logger = Logger.new(destination) logger.progname = @host logger.level = level logger.formatter = Bolt::Formatter.new logger end def upload(source, destination) @logger.debug { "Uploading #{source} to #{destination}" } result = _upload(source, destination) if result.success? Bolt::Result.new(nil, "Uploaded '#{source}' to '#{host}:#{destination}'") else result end end def run_command(command) @logger.info { "Running command: #{command}" } _run_command(command) end def run_script(script, arguments) @logger.info { "Running script: #{script}" } _run_script(script, arguments) end def run_task(task, input_method, arguments) _run_task(task, input_method, arguments) end end end require 'bolt/node/errors' require 'bolt/node/ssh' require 'bolt/node/winrm' require 'bolt/node/orch'
1
7,224
I assume this will resolve to nil if this isn't in the config?
puppetlabs-bolt
rb
@@ -17,6 +17,18 @@ namespace Datadog.Trace.Configuration /// <param name="data">A string containing key-value pairs which are comma-separated, and for which the key and value are colon-separated.</param> /// <returns><see cref="IDictionary{TKey, TValue}"/> of key value pairs.</returns> public static IDictionary<string, string> ParseCustomKeyValues(string data) + { + return ParseCustomKeyValues(data, allowOptionalMappings: false); + } + + /// <summary> + /// Returns a <see cref="IDictionary{TKey, TValue}"/> from parsing + /// <paramref name="data"/>. + /// </summary> + /// <param name="data">A string containing key-value pairs which are comma-separated, and for which the key and value are colon-separated.</param> + /// <param name="allowOptionalMappings">Determines whether to create dictionary entries when the input has no value mapping</param> + /// <returns><see cref="IDictionary{TKey, TValue}"/> of key value pairs.</returns> + public static IDictionary<string, string> ParseCustomKeyValues(string data, bool allowOptionalMappings = false) { var dictionary = new ConcurrentDictionary<string, string>();
1
using System.Collections.Concurrent; using System.Collections.Generic; using Datadog.Trace.ExtensionMethods; namespace Datadog.Trace.Configuration { /// <summary> /// A base <see cref="IConfigurationSource"/> implementation /// for string-only configuration sources. /// </summary> public abstract class StringConfigurationSource : IConfigurationSource { /// <summary> /// Returns a <see cref="IDictionary{TKey, TValue}"/> from parsing /// <paramref name="data"/>. /// </summary> /// <param name="data">A string containing key-value pairs which are comma-separated, and for which the key and value are colon-separated.</param> /// <returns><see cref="IDictionary{TKey, TValue}"/> of key value pairs.</returns> public static IDictionary<string, string> ParseCustomKeyValues(string data) { var dictionary = new ConcurrentDictionary<string, string>(); // A null return value means the key was not present, // and CompositeConfigurationSource depends on this behavior // (it returns the first non-null value it finds). if (data == null) { return null; } if (string.IsNullOrWhiteSpace(data)) { return dictionary; } var entries = data.Split(','); foreach (var e in entries) { var kv = e.Split(':'); if (kv.Length != 2) { continue; } var key = kv[0]; var value = kv[1]; dictionary[key] = value; } return dictionary; } /// <inheritdoc /> public abstract string GetString(string key); /// <inheritdoc /> public virtual int? GetInt32(string key) { string value = GetString(key); return int.TryParse(value, out int result) ? result : (int?)null; } /// <inheritdoc /> public double? GetDouble(string key) { string value = GetString(key); return double.TryParse(value, out double result) ? result : (double?)null; } /// <inheritdoc /> public virtual bool? GetBool(string key) { var value = GetString(key); return value?.ToBoolean(); } /// <summary> /// Gets a <see cref="ConcurrentDictionary{TKey, TValue}"/> from parsing /// </summary> /// <param name="key">The key</param> /// <returns><see cref="ConcurrentDictionary{TKey, TValue}"/> containing all of the key-value pairs.</returns> public IDictionary<string, string> GetDictionary(string key) { return ParseCustomKeyValues(GetString(key)); } } }
1
19,521
I looks like we can make `allowOptionalMappings` non-optional since we have another overload without it. (I thought this generated a compiler warning, but maybe it's just R#)
DataDog-dd-trace-dotnet
.cs
@@ -76,12 +76,13 @@ class YOLOV3Neck(BaseModule): Args: num_scales (int): The number of scales / stages. - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. + in_channels (List[int]): The number of input channels per scale. + out_channels (List[int]): The number of output channels per scale. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None. + norm_cfg (dict, optional): Dictionary to construct and config norm + layer. Default: dict(type='BN', requires_grad=True) + act_cfg (dict, optional): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None
1
# Copyright (c) 2019 Western Digital Corporation or its affiliates. import torch import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS class DetectionBlock(BaseModule): """Detection block in YOLO neck. Let out_channels = n, the DetectionBlock contains: Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. The first 6 ConvLayers are formed the following way: 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. The Conv2D layer is 1x1x255. Some block will have branch after the fifth ConvLayer. The input channel is arbitrary (in_channels) Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(DetectionBlock, self).__init__(init_cfg) double_out_channels = out_channels * 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out @NECKS.register_module() class YOLOV3Neck(BaseModule): """The neck of YOLOV3. It can be treated as a simplified version of FPN. It will take the result from Darknet backbone and do some upsampling and concatenation. It will finally output the detection result. Note: The input feats should be from top to bottom. i.e., from high-lvl to low-lvl But YOLOV3Neck will process them in reversed order. i.e., from bottom (high-lvl) to top (low-lvl) Args: num_scales (int): The number of scales / stages. in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(YOLOV3Neck, self).__init__(init_cfg) assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) # To support arbitrary scales, the code looks awful, but it works. # Better solution is welcomed. self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): in_c, out_c = self.in_channels[i], self.out_channels[i] self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg)) # in_c + out_c : High-lvl feats will be cat with low-lvl feats self.add_module(f'detect{i+1}', DetectionBlock(in_c + out_c, out_c, **cfg)) def forward(self, feats): assert len(feats) == self.num_scales # processed from bottom (high-lvl) to top (low-lvl) outs = [] out = self.detect1(feats[-1]) outs.append(out) for i, x in enumerate(reversed(feats[:-1])): conv = getattr(self, f'conv{i+1}') tmp = conv(out) # Cat with low-lvl feats tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{i+2}') out = detect(tmp) outs.append(out) return tuple(outs)
1
23,656
` per scale.` -> ` per scale.` need to clean the unnecessary blank space
open-mmlab-mmdetection
py
@@ -79,7 +79,8 @@ public class class194 { descriptor = "(IS)Ljava/lang/String;", garbageValue = "4096" ) - static final String method3712(int var0) { + @Export("inventoryQuantityFormat") + static final String inventoryQuantityFormat(int var0) { if (var0 < 100000) { // L: 421 return "<col=ffff00>" + var0 + "</col>"; } else {
1
import net.runelite.mapping.ObfuscatedName; import net.runelite.mapping.ObfuscatedSignature; @ObfuscatedName("gd") public class class194 { @ObfuscatedName("z") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2371; @ObfuscatedName("k") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2362; @ObfuscatedName("s") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2363; @ObfuscatedName("t") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2370; @ObfuscatedName("i") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2365; @ObfuscatedName("o") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2366; @ObfuscatedName("x") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2364; @ObfuscatedName("w") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2368; @ObfuscatedName("g") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2361; @ObfuscatedName("m") @ObfuscatedSignature( descriptor = "Lgd;" ) public static final class194 field2367; static { field2371 = new class194(5); // L: 5 field2362 = new class194(4); // L: 6 field2363 = new class194(3); // L: 7 field2370 = new class194(5); // L: 8 field2365 = new class194(4); // L: 9 field2366 = new class194(14); // L: 10 field2364 = new class194(15); // L: 11 field2368 = new class194(6); // L: 12 field2361 = new class194(7); // L: 13 field2367 = new class194(2); // L: 14 } @ObfuscatedSignature( descriptor = "(I)V", garbageValue = "5" ) class194(int var1) { } // L: 20 @ObfuscatedName("h") @ObfuscatedSignature( descriptor = "(IS)Ljava/lang/String;", garbageValue = "4096" ) static final String method3712(int var0) { if (var0 < 100000) { // L: 421 return "<col=ffff00>" + var0 + "</col>"; } else { return var0 < 10000000 ? "<col=ffffff>" + var0 / 1000 + "K" + "</col>" : "<col=00ff80>" + var0 / 1000000 + "M" + "</col>"; // L: 422 423 } } }
1
16,496
there's no import for Export
open-osrs-runelite
java
@@ -51,7 +51,7 @@ func (e *endpoints) ListenAndServe(ctx context.Context) error { tcpServer := e.createTCPServer(ctx) udsServer := e.createUDSServer(ctx) - e.registerNodeAPI(tcpServer) + e.registerNodeAPI(tcpServer, udsServer) e.registerRegistrationAPI(tcpServer, udsServer) err := util.RunTasks(ctx,
1
package endpoints import ( "crypto/ecdsa" "crypto/tls" "crypto/x509" "errors" "fmt" "net" "os" "sync" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "github.com/spiffe/spire/pkg/common/auth" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/pkg/server/endpoints/node" "github.com/spiffe/spire/pkg/server/endpoints/registration" "github.com/spiffe/spire/pkg/server/svid" node_pb "github.com/spiffe/spire/proto/api/node" registration_pb "github.com/spiffe/spire/proto/api/registration" datastore_pb "github.com/spiffe/spire/proto/server/datastore" ) // Server manages gRPC and HTTP endpoint lifecycle type Server interface { // ListenAndServe starts all endpoints, and blocks for as long as the // underlying servers are still running. Returns an error if any of the // endpoints encounter one. ListenAndServe will return an ListenAndServe(ctx context.Context) error } type endpoints struct { c *Config mtx *sync.RWMutex svid []*x509.Certificate svidKey *ecdsa.PrivateKey } // ListenAndServe starts all maintenance routines and endpoints, then blocks // until the context is cancelled or there is an error encountered listening // on one of the servers. func (e *endpoints) ListenAndServe(ctx context.Context) error { // Certs must be ready before anything else e.updateSVID() e.c.Log.Debug("Initializing API endpoints") tcpServer := e.createTCPServer(ctx) udsServer := e.createUDSServer(ctx) e.registerNodeAPI(tcpServer) e.registerRegistrationAPI(tcpServer, udsServer) err := util.RunTasks(ctx, func(ctx context.Context) error { return e.runTCPServer(ctx, tcpServer) }, func(ctx context.Context) error { return e.runUDSServer(ctx, udsServer) }, e.runSVIDObserver, ) if err == context.Canceled { err = nil } return err } func (e *endpoints) createTCPServer(ctx context.Context) *grpc.Server { tlsConfig := &tls.Config{ GetConfigForClient: e.getTLSConfig(ctx), } return grpc.NewServer( grpc.UnaryInterceptor(auth.UnaryAuthorizeCall), grpc.StreamInterceptor(auth.StreamAuthorizeCall), grpc.Creds(credentials.NewTLS(tlsConfig))) } func (e *endpoints) createUDSServer(ctx context.Context) *grpc.Server { return grpc.NewServer( grpc.UnaryInterceptor(auth.UnaryAuthorizeCall), grpc.StreamInterceptor(auth.StreamAuthorizeCall), grpc.Creds(auth.NewCredentials())) } // registerNodeAPI creates a Node API handler and registers it against // the provided gRPC server. func (e *endpoints) registerNodeAPI(tcpServer *grpc.Server) { n := node.NewHandler(node.HandlerConfig{ Log: e.c.Log.WithField("subsystem_name", "node_api"), Metrics: e.c.Metrics, Catalog: e.c.Catalog, TrustDomain: e.c.TrustDomain, ServerCA: e.c.ServerCA, }) node_pb.RegisterNodeServer(tcpServer, n) } // registerRegistrationAPI creates a Registration API handler and registers // it against the provided gRPC. func (e *endpoints) registerRegistrationAPI(tcpServer, udpServer *grpc.Server) { r := &registration.Handler{ Log: e.c.Log.WithField("subsystem_name", "registration_api"), Metrics: e.c.Metrics, Catalog: e.c.Catalog, TrustDomain: e.c.TrustDomain, } registration_pb.RegisterRegistrationServer(tcpServer, r) registration_pb.RegisterRegistrationServer(udpServer, r) } // runTCPServer will start the server and block until it exits or we are dying. func (e *endpoints) runTCPServer(ctx context.Context, server *grpc.Server) error { l, err := net.Listen(e.c.TCPAddr.Network(), e.c.TCPAddr.String()) if err != nil { return err } defer l.Close() if e.c.GRPCHook != nil { err := e.c.GRPCHook(server) if err != nil { return fmt.Errorf("call grpc hook: %v", err) } } // Skip use of tomb here so we don't pollute a clean shutdown with errors e.c.Log.Infof("Starting TCP server on %s", l.Addr()) errChan := make(chan error) go func() { errChan <- server.Serve(l) }() select { case err = <-errChan: return err case <-ctx.Done(): e.c.Log.Info("Stopping TCP server") l.Close() server.Stop() <-errChan e.c.Log.Info("TCP server has stopped.") return nil } } // runUDSServer will start the server and block until it exits or we are dying. func (e *endpoints) runUDSServer(ctx context.Context, server *grpc.Server) error { os.Remove(e.c.UDSAddr.String()) l, err := net.Listen(e.c.UDSAddr.Network(), e.c.UDSAddr.String()) if err != nil { return err } defer l.Close() // Restrict access to the UDS to processes running as the same user or // group as the server. if err := os.Chmod(e.c.UDSAddr.String(), 0770); err != nil { return err } // Skip use of tomb here so we don't pollute a clean shutdown with errors e.c.Log.Infof("Starting UDS server %s", l.Addr()) errChan := make(chan error) go func() { errChan <- server.Serve(l) }() select { case err := <-errChan: return err case <-ctx.Done(): e.c.Log.Info("Stopping UDS server") l.Close() server.Stop() <-errChan e.c.Log.Info("UDS server has stopped.") return nil } return nil } func (e *endpoints) runSVIDObserver(ctx context.Context) error { for { select { case <-ctx.Done(): return nil case <-e.c.SVIDStream.Changes(): e.c.SVIDStream.Next() e.updateSVID() } } } // getTLSConfig returns a TLS Config hook for the gRPC server func (e *endpoints) getTLSConfig(ctx context.Context) func(*tls.ClientHelloInfo) (*tls.Config, error) { return func(hello *tls.ClientHelloInfo) (*tls.Config, error) { certs, roots, err := e.getCerts(ctx) if err != nil { e.c.Log.Errorf("Could not generate TLS config for gRPC client %v: %v", hello.Conn.RemoteAddr(), err) return nil, err } c := &tls.Config{ // When bootstrapping, the agent does not yet have // an SVID. In order to include the bootstrap endpoint // in the same server as the rest of the Node API, // request but don't require a client certificate ClientAuth: tls.VerifyClientCertIfGiven, Certificates: certs, ClientCAs: roots, } return c, nil } } // getCerts queries the datastore and returns a TLS serving certificate(s) plus // the current CA root bundle. func (e *endpoints) getCerts(ctx context.Context) ([]tls.Certificate, *x509.CertPool, error) { ds := e.c.Catalog.DataStores()[0] resp, err := ds.FetchBundle(ctx, &datastore_pb.FetchBundleRequest{ TrustDomainId: e.c.TrustDomain.String(), }) if err != nil { return nil, nil, fmt.Errorf("get bundle from datastore: %v", err) } if resp.Bundle == nil { return nil, nil, errors.New("bundle not found") } var caCerts []*x509.Certificate for _, rootCA := range resp.Bundle.RootCas { rootCACerts, err := x509.ParseCertificates(rootCA.DerBytes) if err != nil { return nil, nil, fmt.Errorf("parse bundle: %v", err) } caCerts = append(caCerts, rootCACerts...) } caPool := x509.NewCertPool() for _, c := range caCerts { caPool.AddCert(c) } e.mtx.RLock() defer e.mtx.RUnlock() certChain := [][]byte{} for i, cert := range e.svid { certChain = append(certChain, cert.Raw) // add the intermediates into the root CA pool since we need to // validate old agents that don't present intermediates with the // certificate request. // TODO: remove this hack in 0.8 if i > 0 { caPool.AddCert(cert) } } tlsCert := tls.Certificate{ Certificate: certChain, PrivateKey: e.svidKey, } return []tls.Certificate{tlsCert}, caPool, nil } func (e *endpoints) updateSVID() { e.mtx.Lock() defer e.mtx.Unlock() state := e.c.SVIDStream.Value().(svid.State) e.svid = state.SVID e.svidKey = state.Key } func (e *endpoints) getSVIDState() svid.State { e.mtx.RLock() defer e.mtx.RUnlock() return svid.State{ SVID: e.svid, Key: e.svidKey, } }
1
10,349
i don't think we want/need to make the node API available over UDS
spiffe-spire
go
@@ -113,7 +113,7 @@ class LegalConditionsFacade $articleId = $this->setting->getForDomain($settingKey, $domainId); if ($articleId !== null) { - return $this->articleFacade->getById($articleId); + return $this->articleFacade->findById($articleId); } return null;
1
<?php namespace Shopsys\FrameworkBundle\Model\LegalConditions; use Shopsys\FrameworkBundle\Component\Domain\Domain; use Shopsys\FrameworkBundle\Component\Setting\Setting; use Shopsys\FrameworkBundle\Model\Article\Article; use Shopsys\FrameworkBundle\Model\Article\ArticleFacade; class LegalConditionsFacade { /** * @var \Shopsys\FrameworkBundle\Model\Article\ArticleFacade */ protected $articleFacade; /** * @var \Shopsys\FrameworkBundle\Component\Setting\Setting */ protected $setting; /** * @var \Shopsys\FrameworkBundle\Component\Domain\Domain */ protected $domain; /** * @param \Shopsys\FrameworkBundle\Model\Article\ArticleFacade $articleFacade * @param \Shopsys\FrameworkBundle\Component\Setting\Setting $setting * @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain */ public function __construct( ArticleFacade $articleFacade, Setting $setting, Domain $domain ) { $this->articleFacade = $articleFacade; $this->setting = $setting; $this->domain = $domain; } /** * @param int $domainId * @return \Shopsys\FrameworkBundle\Model\Article\Article|null */ public function findTermsAndConditions($domainId) { return $this->findArticle(Setting::TERMS_AND_CONDITIONS_ARTICLE_ID, $domainId); } /** * @param \Shopsys\FrameworkBundle\Model\Article\Article|null $termsAndConditions * @param int $domainId */ public function setTermsAndConditions(Article $termsAndConditions = null, $domainId) { $this->setArticle(Setting::TERMS_AND_CONDITIONS_ARTICLE_ID, $termsAndConditions, $domainId); } /** * @return string */ public function getTermsAndConditionsDownloadFilename() { return t('Terms-and-conditions.html'); } /** * @param int $domainId * @return \Shopsys\FrameworkBundle\Model\Article\Article|null */ public function findPrivacyPolicy($domainId) { return $this->findArticle(Setting::PRIVACY_POLICY_ARTICLE_ID, $domainId); } /** * @param \Shopsys\FrameworkBundle\Model\Article\Article|null $privacyPolicy * @param int $domainId */ public function setPrivacyPolicy(Article $privacyPolicy = null, $domainId) { $this->setArticle(Setting::PRIVACY_POLICY_ARTICLE_ID, $privacyPolicy, $domainId); } /** * @param \Shopsys\FrameworkBundle\Model\Article\Article $article * @return bool */ public function isArticleUsedAsLegalConditions(Article $article) { foreach ($this->domain->getAllIds() as $domainId) { $legalConditionsArticles = [ $this->findTermsAndConditions($domainId), $this->findPrivacyPolicy($domainId), ]; if (in_array($article, $legalConditionsArticles, true)) { return true; } } return false; } /** * @param string $settingKey * @param int $domainId * @return \Shopsys\FrameworkBundle\Model\Article\Article|null */ protected function findArticle($settingKey, $domainId) { $articleId = $this->setting->getForDomain($settingKey, $domainId); if ($articleId !== null) { return $this->articleFacade->getById($articleId); } return null; } /** * @param string $settingKey * @param \Shopsys\FrameworkBundle\Model\Article\Article|null $privacyPolicy * @param int $domainId */ protected function setArticle($settingKey, Article $privacyPolicy = null, $domainId) { $articleId = null; if ($privacyPolicy !== null) { $articleId = $privacyPolicy->getId(); } $this->setting->setForDomain($settingKey, $articleId, $domainId); } }
1
18,088
How about to skip if and returning null and return a result of `ArticleFacade::findById` directly?
shopsys-shopsys
php
@@ -180,6 +180,11 @@ class CoreDocumentArray extends CoreMongooseArray { return arr; } + map() { + const arr = [].concat(Array.prototype.map.apply(this,arguments)); + return arr; + } + /** * Wraps [`Array#push`](https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/push) with proper change tracking. *
1
'use strict'; /*! * Module dependencies. */ const CoreMongooseArray = require('./core_array'); const Document = require('../document'); const ObjectId = require('./objectid'); const castObjectId = require('../cast/objectid'); const getDiscriminatorByValue = require('../queryhelpers').getDiscriminatorByValue; const internalToObjectOptions = require('../options').internalToObjectOptions; const util = require('util'); const utils = require('../utils'); const arrayAtomicsSymbol = require('../helpers/symbols').arrayAtomicsSymbol; const arrayParentSymbol = require('../helpers/symbols').arrayParentSymbol; const arrayPathSymbol = require('../helpers/symbols').arrayPathSymbol; const arraySchemaSymbol = require('../helpers/symbols').arraySchemaSymbol; const documentArrayParent = require('../helpers/symbols').documentArrayParent; const _basePush = Array.prototype.push; class CoreDocumentArray extends CoreMongooseArray { get isMongooseDocumentArray() { return true; } /*! * ignore */ toBSON() { return this.toObject(internalToObjectOptions); } /** * Overrides MongooseArray#cast * * @method _cast * @api private * @receiver MongooseDocumentArray */ _cast(value, index) { let Constructor = this[arraySchemaSymbol].casterConstructor; const isInstance = Constructor.$isMongooseDocumentArray ? value && value.isMongooseDocumentArray : value instanceof Constructor; if (isInstance || // Hack re: #5001, see #5005 (value && value.constructor && value.constructor.baseCasterConstructor === Constructor)) { if (!(value[documentArrayParent] && value.__parentArray)) { // value may have been created using array.create() value[documentArrayParent] = this[arrayParentSymbol]; value.__parentArray = this; } value.$setIndex(index); return value; } if (value === undefined || value === null) { return null; } // handle cast('string') or cast(ObjectId) etc. // only objects are permitted so we can safely assume that // non-objects are to be interpreted as _id if (Buffer.isBuffer(value) || value instanceof ObjectId || !utils.isObject(value)) { value = {_id: value}; } if (value && Constructor.discriminators && Constructor.schema && Constructor.schema.options && Constructor.schema.options.discriminatorKey) { if (typeof value[Constructor.schema.options.discriminatorKey] === 'string' && Constructor.discriminators[value[Constructor.schema.options.discriminatorKey]]) { Constructor = Constructor.discriminators[value[Constructor.schema.options.discriminatorKey]]; } else { const constructorByValue = getDiscriminatorByValue(Constructor, value[Constructor.schema.options.discriminatorKey]); if (constructorByValue) { Constructor = constructorByValue; } } } if (Constructor.$isMongooseDocumentArray) { return Constructor.cast(value, this, undefined, undefined, index); } return new Constructor(value, this, undefined, undefined, index); } /** * Searches array items for the first document with a matching _id. * * ####Example: * * var embeddedDoc = m.array.id(some_id); * * @return {EmbeddedDocument|null} the subdocument or null if not found. * @param {ObjectId|String|Number|Buffer} id * @TODO cast to the _id based on schema for proper comparison * @method id * @api public * @receiver MongooseDocumentArray */ id(id) { let casted; let sid; let _id; try { casted = castObjectId(id).toString(); } catch (e) { casted = null; } for (let i = 0, l = this.length; i < l; i++) { if (!this[i]) { continue; } _id = this[i].get('_id'); if (_id === null || typeof _id === 'undefined') { continue; } else if (_id instanceof Document) { sid || (sid = String(id)); if (sid == _id._id) { return this[i]; } } else if (!(id instanceof ObjectId) && !(_id instanceof ObjectId)) { if (utils.deepEqual(id, _id)) { return this[i]; } } else if (casted == _id) { return this[i]; } } return null; } /** * Returns a native js Array of plain js objects * * ####NOTE: * * _Each sub-document is converted to a plain object by calling its `#toObject` method._ * * @param {Object} [options] optional options to pass to each documents `toObject` method call during conversion * @return {Array} * @method toObject * @api public * @receiver MongooseDocumentArray */ toObject(options) { // `[].concat` coerces the return value into a vanilla JS array, rather // than a Mongoose array. return [].concat(this.map(function(doc) { if (doc == null) { return null; } if (typeof doc.toObject !== 'function') { return doc; } return doc.toObject(options); })); } slice() { const arr = super.slice.apply(this, arguments); arr[arrayParentSymbol] = this[arrayParentSymbol]; arr[arrayPathSymbol] = this[arrayPathSymbol]; return arr; } /** * Wraps [`Array#push`](https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/push) with proper change tracking. * * @param {Object} [args...] * @api public * @method push * @memberOf MongooseDocumentArray */ push() { const ret = super.push.apply(this, arguments); _updateParentPopulated(this); return ret; } /** * Pulls items from the array atomically. * * @param {Object} [args...] * @api public * @method pull * @memberOf MongooseDocumentArray */ pull() { const ret = super.pull.apply(this, arguments); _updateParentPopulated(this); return ret; } /** * Wraps [`Array#shift`](https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/unshift) with proper change tracking. */ shift() { const ret = super.shift.apply(this, arguments); _updateParentPopulated(this); return ret; } /** * Wraps [`Array#splice`](https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/splice) with proper change tracking and casting. */ splice() { const ret = super.splice.apply(this, arguments); _updateParentPopulated(this); return ret; } /** * Helper for console.log * * @method inspect * @api public * @receiver MongooseDocumentArray */ inspect() { return this.toObject(); } /** * Creates a subdocument casted to this schema. * * This is the same subdocument constructor used for casting. * * @param {Object} obj the value to cast to this arrays SubDocument schema * @method create * @api public * @receiver MongooseDocumentArray */ create(obj) { let Constructor = this[arraySchemaSymbol].casterConstructor; if (obj && Constructor.discriminators && Constructor.schema && Constructor.schema.options && Constructor.schema.options.discriminatorKey) { if (typeof obj[Constructor.schema.options.discriminatorKey] === 'string' && Constructor.discriminators[obj[Constructor.schema.options.discriminatorKey]]) { Constructor = Constructor.discriminators[obj[Constructor.schema.options.discriminatorKey]]; } else { const constructorByValue = getDiscriminatorByValue(Constructor, obj[Constructor.schema.options.discriminatorKey]); if (constructorByValue) { Constructor = constructorByValue; } } } return new Constructor(obj, this); } /*! * ignore */ notify(event) { const _this = this; return function notify(val, _arr) { _arr = _arr || _this; let i = _arr.length; while (i--) { if (_arr[i] == null) { continue; } switch (event) { // only swap for save event for now, we may change this to all event types later case 'save': val = _this[i]; break; default: // NO-OP break; } if (_arr[i].isMongooseArray) { notify(val, _arr[i]); } else if (_arr[i]) { _arr[i].emit(event, val); } } }; } } if (util.inspect.custom) { CoreDocumentArray.prototype[util.inspect.custom] = CoreDocumentArray.prototype.inspect; } /*! * If this is a document array, each element may contain single * populated paths, so we need to modify the top-level document's * populated cache. See gh-8247, gh-8265. */ function _updateParentPopulated(arr) { const parent = arr[arrayParentSymbol]; if (parent.$__.populated != null) { const populatedPaths = Object.keys(parent.$__.populated). filter(p => p.startsWith(arr[arrayPathSymbol] + '.')); for (const path of populatedPaths) { const remnant = path.slice((arr[arrayPathSymbol] + '.').length); if (!Array.isArray(parent.$__.populated[path].value)) { continue; } parent.$__.populated[path].value = arr.map(val => val.populated(remnant)); } } } /** * DocumentArray constructor * * @param {Array} values * @param {String} path the path to this array * @param {Document} doc parent document * @api private * @return {MongooseDocumentArray} * @inherits MongooseArray * @see http://bit.ly/f6CnZU */ function MongooseDocumentArray(values, path, doc) { // TODO: replace this with `new CoreDocumentArray().concat()` when we remove // support for node 4.x and 5.x, see https://i.imgur.com/UAAHk4S.png const arr = new CoreDocumentArray(); arr[arrayAtomicsSymbol] = {}; arr[arraySchemaSymbol] = void 0; if (Array.isArray(values)) { if (values instanceof CoreDocumentArray && values[arrayPathSymbol] === path && values[arrayParentSymbol] === doc) { arr[arrayAtomicsSymbol] = Object.assign({}, values[arrayAtomicsSymbol]); } values.forEach(v => { _basePush.call(arr, v); }); } arr[arrayPathSymbol] = path; // Because doc comes from the context of another function, doc === global // can happen if there was a null somewhere up the chain (see #3020 && #3034) // RB Jun 17, 2015 updated to check for presence of expected paths instead // to make more proof against unusual node environments if (doc && doc instanceof Document) { arr[arrayParentSymbol] = doc; arr[arraySchemaSymbol] = doc.schema.path(path); // `schema.path()` doesn't drill into nested arrays properly yet, see // gh-6398, gh-6602. This is a workaround because nested arrays are // always plain non-document arrays, so once you get to a document array // nesting is done. Matryoshka code. while (arr != null && arr[arraySchemaSymbol] != null && arr[arraySchemaSymbol].$isMongooseArray && !arr[arraySchemaSymbol].$isMongooseDocumentArray) { arr[arraySchemaSymbol] = arr[arraySchemaSymbol].casterConstructor; } // Tricky but this may be a document array embedded in a normal array, // in which case `path` would point to the embedded array. See #6405, #6398 if (arr[arraySchemaSymbol] && !arr[arraySchemaSymbol].$isMongooseDocumentArray) { arr[arraySchemaSymbol] = arr[arraySchemaSymbol].casterConstructor; } } return arr; } /*! * Module exports. */ module.exports = MongooseDocumentArray;
1
14,096
I think this change is currently unnecessary. I like the idea of `map()`, `filter()`, etc. returning vanilla JavaScript arrays rather than mongoose arrays, but I think that's a bit too heavy of a change for a patch release. I think that all we need for this release is the `if (!parent || parent.$__.populated == null) return;` change.
Automattic-mongoose
js
@@ -10,6 +10,11 @@ module Travis BUILD_DIR = File.join(HOME_DIR, 'build') class << self + def version + @version ||= `git rev-parse HEAD 2>/dev/null || \\ + echo "${SOURCE_VERSION:-fafafaf}"`.strip + end + def self.register(key) Travis.services.add(key, self) end
1
require 'core_ext/hash/deep_symbolize_keys' require 'travis/shell' require 'travis/build/data' require 'travis/build/env' require 'travis/build/script' module Travis module Build HOME_DIR = '$HOME' BUILD_DIR = File.join(HOME_DIR, 'build') class << self def self.register(key) Travis.services.add(key, self) end def script(data) data = data.deep_symbolize_keys lang = (Array(data[:config][:language]).first || 'ruby').to_s.downcase.strip const = by_lang(lang) const.new(data) end def by_lang(lang) case lang when /^java/i then Script::PureJava when "c++", "cpp", "cplusplus" then Script::Cpp when 'objective-c', 'swift' then Script::ObjectiveC when 'bash', 'sh', 'shell' then Script::Generic else name = lang.split('_').map { |w| w.capitalize }.join Script.const_get(name, false) rescue Script::Ruby end end end end end
1
14,467
how about "unknown" instead of "fafafaf"?
travis-ci-travis-build
rb
@@ -0,0 +1,14 @@ +package api2 + +import ( + "context" + + "github.com/filecoin-project/go-filecoin/address" + "github.com/filecoin-project/go-filecoin/exec" +) + +// Actor is the actor-related Filecoin plumbing interface. +type Actor interface { + // ActorGetSignature returns the signature of the given actor's given method. + ActorGetSignature(ctx context.Context, actorAddr address.Address, method string) (_ *exec.FunctionSignature, err error) +}
1
1
15,673
nit: do we gain anything from naming the output error `err` in this definition?
filecoin-project-venus
go
@@ -188,6 +188,11 @@ define(['dialogHelper', 'inputManager', 'connectionManager', 'layoutManager', 'f stopInterval(); }); + // Blur foreign element to prevent starting of "nested" slideshow + if (document.activeElement && !dlg.contains(document.activeElement)) { + document.activeElement.blur(); + } + inputManager.on(window, onInputCommand); document.addEventListener((window.PointerEvent ? 'pointermove' : 'mousemove'), onPointerMove);
1
define(['dialogHelper', 'inputManager', 'connectionManager', 'layoutManager', 'focusManager', 'browser', 'apphost', 'loading', 'css!./style', 'material-icons', 'paper-icon-button-light'], function (dialogHelper, inputManager, connectionManager, layoutManager, focusManager, browser, appHost, loading) { 'use strict'; function getImageUrl(item, options, apiClient) { options = options || {}; options.type = options.type || "Primary"; if (typeof (item) === 'string') { return apiClient.getScaledImageUrl(item, options); } if (item.ImageTags && item.ImageTags[options.type]) { options.tag = item.ImageTags[options.type]; return apiClient.getScaledImageUrl(item.Id, options); } if (options.type === 'Primary') { if (item.AlbumId && item.AlbumPrimaryImageTag) { options.tag = item.AlbumPrimaryImageTag; return apiClient.getScaledImageUrl(item.AlbumId, options); } } return null; } function getBackdropImageUrl(item, options, apiClient) { options = options || {}; options.type = options.type || "Backdrop"; // If not resizing, get the original image if (!options.maxWidth && !options.width && !options.maxHeight && !options.height) { options.quality = 100; } if (item.BackdropImageTags && item.BackdropImageTags.length) { options.tag = item.BackdropImageTags[0]; return apiClient.getScaledImageUrl(item.Id, options); } return null; } function getImgUrl(item, original) { var apiClient = connectionManager.getApiClient(item.ServerId); var imageOptions = {}; if (!original) { imageOptions.maxWidth = screen.availWidth; } if (item.BackdropImageTags && item.BackdropImageTags.length) { return getBackdropImageUrl(item, imageOptions, apiClient); } else { if (item.MediaType === 'Photo' && original) { return apiClient.getItemDownloadUrl(item.Id); } imageOptions.type = "Primary"; return getImageUrl(item, imageOptions, apiClient); } } function getIcon(icon, cssClass, canFocus, autoFocus) { var tabIndex = canFocus ? '' : ' tabindex="-1"'; autoFocus = autoFocus ? ' autofocus' : ''; return '<button is="paper-icon-button-light" class="autoSize ' + cssClass + '"' + tabIndex + autoFocus + '><i class="material-icons slideshowButtonIcon">' + icon + '</i></button>'; } function setUserScalable(scalable) { try { appHost.setUserScalable(scalable); } catch (err) { console.error('error in appHost.setUserScalable: ' + err); } } return function (options) { var self = this; var swiperInstance; var dlg; var currentTimeout; var currentIntervalMs; var currentOptions; var currentIndex; // small hack since this is not possible anyway if (browser.chromecast) { options.interactive = false; } function createElements(options) { dlg = dialogHelper.createDialog({ exitAnimationDuration: options.interactive ? 400 : 800, size: 'fullscreen', autoFocus: false, scrollY: false, exitAnimation: 'fadeout', removeOnClose: true }); dlg.classList.add('slideshowDialog'); var html = ''; if (options.interactive) { var actionButtonsOnTop = layoutManager.mobile; html += '<div>'; html += '<div class="slideshowSwiperContainer"><div class="swiper-wrapper"></div></div>'; html += getIcon('keyboard_arrow_left', 'btnSlideshowPrevious slideshowButton hide-mouse-idle-tv', false); html += getIcon('keyboard_arrow_right', 'btnSlideshowNext slideshowButton hide-mouse-idle-tv', false); html += '<div class="topActionButtons">'; if (actionButtonsOnTop) { if (appHost.supports('filedownload')) { html += getIcon('file_download', 'btnDownload slideshowButton', true); } if (appHost.supports('sharing')) { html += getIcon('share', 'btnShare slideshowButton', true); } } html += getIcon('close', 'slideshowButton btnSlideshowExit hide-mouse-idle-tv', false); html += '</div>'; if (!actionButtonsOnTop) { html += '<div class="slideshowBottomBar hide">'; html += getIcon('pause', 'btnSlideshowPause slideshowButton', true, true); if (appHost.supports('filedownload')) { html += getIcon('file_download', 'btnDownload slideshowButton', true); } if (appHost.supports('sharing')) { html += getIcon('share', 'btnShare slideshowButton', true); } html += '</div>'; } html += '</div>'; } else { html += '<div class="slideshowImage"></div><h1 class="slideshowImageText"></h1>'; } dlg.innerHTML = html; if (options.interactive) { dlg.querySelector('.btnSlideshowExit').addEventListener('click', function (e) { dialogHelper.close(dlg); }); dlg.querySelector('.btnSlideshowNext').addEventListener('click', nextImage); dlg.querySelector('.btnSlideshowPrevious').addEventListener('click', previousImage); var btnPause = dlg.querySelector('.btnSlideshowPause'); if (btnPause) { btnPause.addEventListener('click', playPause); } var btnDownload = dlg.querySelector('.btnDownload'); if (btnDownload) { btnDownload.addEventListener('click', download); } var btnShare = dlg.querySelector('.btnShare'); if (btnShare) { btnShare.addEventListener('click', share); } } setUserScalable(true); dialogHelper.open(dlg).then(function () { setUserScalable(false); stopInterval(); }); inputManager.on(window, onInputCommand); document.addEventListener((window.PointerEvent ? 'pointermove' : 'mousemove'), onPointerMove); dlg.addEventListener('close', onDialogClosed); if (options.interactive) { loadSwiper(dlg); } } function onAutoplayStart() { var btnSlideshowPause = dlg.querySelector('.btnSlideshowPause i'); if (btnSlideshowPause) { btnSlideshowPause.innerHTML = "pause"; } } function onAutoplayStop() { var btnSlideshowPause = dlg.querySelector('.btnSlideshowPause i'); if (btnSlideshowPause) { btnSlideshowPause.innerHTML = "&#xE037;"; } } function loadSwiper(dlg) { if (currentOptions.slides) { dlg.querySelector('.swiper-wrapper').innerHTML = currentOptions.slides.map(getSwiperSlideHtmlFromSlide).join(''); } else { dlg.querySelector('.swiper-wrapper').innerHTML = currentOptions.items.map(getSwiperSlideHtmlFromItem).join(''); } require(['swiper'], function (Swiper) { swiperInstance = new Swiper(dlg.querySelector('.slideshowSwiperContainer'), { // Optional parameters direction: 'horizontal', loop: options.loop !== false, autoplay: { delay: options.interval || 8000 }, // Disable preloading of all images preloadImages: false, // Enable lazy loading lazy: true, loadPrevNext: true, disableOnInteraction: false, initialSlide: options.startIndex || 0, speed: 240 }); swiperInstance.on('autoplayStart', onAutoplayStart); swiperInstance.on('autoplayStop', onAutoplayStop); if (layoutManager.mobile) { pause(); } else { play(); } }); } function getSwiperSlideHtmlFromItem(item) { return getSwiperSlideHtmlFromSlide({ imageUrl: getImgUrl(item), originalImage: getImgUrl(item, true), //title: item.Name, //description: item.Overview Id: item.Id, ServerId: item.ServerId }); } function getSwiperSlideHtmlFromSlide(item) { var html = ''; html += '<div class="swiper-slide" data-imageurl="' + item.imageUrl + '" data-original="' + item.originalImage + '" data-itemid="' + item.Id + '" data-serverid="' + item.ServerId + '">'; html += '<img data-src="' + item.imageUrl + '" class="swiper-lazy swiper-slide-img">'; if (item.title || item.subtitle) { html += '<div class="slideText">'; html += '<div class="slideTextInner">'; if (item.title) { html += '<h1 class="slideTitle">'; html += item.title; html += '</h1>'; } if (item.description) { html += '<div class="slideSubtitle">'; html += item.description; html += '</div>'; } html += '</div>'; html += '</div>'; } html += '</div>'; return html; } function previousImage() { if (swiperInstance) { swiperInstance.slidePrev(); } else { stopInterval(); showNextImage(currentIndex - 1); } } function nextImage() { if (swiperInstance) { if (options.loop === false) { if (swiperInstance.activeIndex >= swiperInstance.slides.length - 1) { dialogHelper.close(dlg); return; } } swiperInstance.slideNext(); } else { stopInterval(); showNextImage(currentIndex + 1); } } function getCurrentImageInfo() { if (swiperInstance) { var slide = document.querySelector('.swiper-slide-active'); if (slide) { return { url: slide.getAttribute('data-original'), shareUrl: slide.getAttribute('data-imageurl'), itemId: slide.getAttribute('data-itemid'), serverId: slide.getAttribute('data-serverid') }; } return null; } else { return null; } } function download() { var imageInfo = getCurrentImageInfo(); require(['fileDownloader'], function (fileDownloader) { fileDownloader.download([imageInfo]); }); } function share() { var imageInfo = getCurrentImageInfo(); navigator.share({ url: imageInfo.shareUrl }); } function play() { if (swiperInstance.autoplay) { swiperInstance.autoplay.start(); } } function pause() { if (swiperInstance.autoplay) { swiperInstance.autoplay.stop(); } } function playPause() { var paused = dlg.querySelector('.btnSlideshowPause i').innerHTML !== "pause"; if (paused) { play(); } else { pause(); } } function onDialogClosed() { var swiper = swiperInstance; if (swiper) { swiper.destroy(true, true); swiperInstance = null; } inputManager.off(window, onInputCommand); document.removeEventListener((window.PointerEvent ? 'pointermove' : 'mousemove'), onPointerMove); } function startInterval(options) { currentOptions = options; stopInterval(); createElements(options); if (!options.interactive) { currentIntervalMs = options.interval || 11000; showNextImage(options.startIndex || 0, true); } } var _osdOpen = false; function isOsdOpen() { return _osdOpen; } function getOsdBottom() { return dlg.querySelector('.slideshowBottomBar'); } function showOsd() { var bottom = getOsdBottom(); if (bottom) { slideUpToShow(bottom); startHideTimer(); } } function hideOsd() { var bottom = getOsdBottom(); if (bottom) { slideDownToHide(bottom); } } var hideTimeout; function startHideTimer() { stopHideTimer(); hideTimeout = setTimeout(hideOsd, 4000); } function stopHideTimer() { if (hideTimeout) { clearTimeout(hideTimeout); hideTimeout = null; } } function slideUpToShow(elem) { if (!elem.classList.contains('hide')) { return; } _osdOpen = true; elem.classList.remove('hide'); var onFinish = function () { focusManager.focus(elem.querySelector('.btnSlideshowPause')); }; if (!elem.animate) { onFinish(); return; } requestAnimationFrame(function () { var keyframes = [ { transform: 'translate3d(0,' + elem.offsetHeight + 'px,0)', opacity: '.3', offset: 0 }, { transform: 'translate3d(0,0,0)', opacity: '1', offset: 1 } ]; var timing = { duration: 300, iterations: 1, easing: 'ease-out' }; elem.animate(keyframes, timing).onfinish = onFinish; }); } function slideDownToHide(elem) { if (elem.classList.contains('hide')) { return; } var onFinish = function () { elem.classList.add('hide'); _osdOpen = false; }; if (!elem.animate) { onFinish(); return; } requestAnimationFrame(function () { var keyframes = [ { transform: 'translate3d(0,0,0)', opacity: '1', offset: 0 }, { transform: 'translate3d(0,' + elem.offsetHeight + 'px,0)', opacity: '.3', offset: 1 } ]; var timing = { duration: 300, iterations: 1, easing: 'ease-out' }; elem.animate(keyframes, timing).onfinish = onFinish; }); } var lastMouseMoveData; function onPointerMove(e) { var pointerType = e.pointerType || (layoutManager.mobile ? 'touch' : 'mouse'); if (pointerType === 'mouse') { var eventX = e.screenX || 0; var eventY = e.screenY || 0; var obj = lastMouseMoveData; if (!obj) { lastMouseMoveData = { x: eventX, y: eventY }; return; } // if coord are same, it didn't move if (Math.abs(eventX - obj.x) < 10 && Math.abs(eventY - obj.y) < 10) { return; } obj.x = eventX; obj.y = eventY; showOsd(); } } function onInputCommand(e) { switch (e.detail.command) { case 'left': if (!isOsdOpen()) { e.preventDefault(); e.stopPropagation(); previousImage(); } break; case 'right': if (!isOsdOpen()) { e.preventDefault(); e.stopPropagation(); nextImage(); } break; case 'up': case 'down': case 'select': case 'menu': case 'info': case 'play': case 'playpause': case 'pause': showOsd(); break; default: break; } } function showNextImage(index, skipPreload) { index = Math.max(0, index); if (index >= currentOptions.items.length) { index = 0; } currentIndex = index; var options = currentOptions; var items = options.items; var item = items[index]; var imgUrl = getImgUrl(item); var onSrcLoaded = function () { var cardImageContainer = dlg.querySelector('.slideshowImage'); var newCardImageContainer = document.createElement('div'); newCardImageContainer.className = cardImageContainer.className; if (options.cover) { newCardImageContainer.classList.add('slideshowImage-cover'); } newCardImageContainer.style.backgroundImage = "url('" + imgUrl + "')"; newCardImageContainer.classList.add('hide'); cardImageContainer.parentNode.appendChild(newCardImageContainer); if (options.showTitle) { dlg.querySelector('.slideshowImageText').innerHTML = item.Name; } else { dlg.querySelector('.slideshowImageText').innerHTML = ''; } newCardImageContainer.classList.remove('hide'); var onAnimationFinished = function () { var parentNode = cardImageContainer.parentNode; if (parentNode) { parentNode.removeChild(cardImageContainer); } }; if (newCardImageContainer.animate) { var keyframes = [ { opacity: '0', offset: 0 }, { opacity: '1', offset: 1 } ]; var timing = { duration: 1200, iterations: 1 }; newCardImageContainer.animate(keyframes, timing).onfinish = onAnimationFinished; } else { onAnimationFinished(); } stopInterval(); currentTimeout = setTimeout(function () { showNextImage(index + 1, true); }, currentIntervalMs); }; if (!skipPreload) { var img = new Image(); img.onload = onSrcLoaded; img.src = imgUrl; } else { onSrcLoaded(); } } function stopInterval() { if (currentTimeout) { clearTimeout(currentTimeout); currentTimeout = null; } } self.show = function () { startInterval(options); }; self.hide = function () { var dialog = dlg; if (dialog) { dialogHelper.close(dialog); } }; }; });
1
13,639
This can happen with any dialog. Maybe the best place to add this change would be the dialog component instead.
jellyfin-jellyfin-web
js
@@ -46,6 +46,10 @@ func NewTestEnvironment(ctx context.Context, t *testing.T, fastenvOpts fast.Envi env, err := fast.NewEnvironmentMemoryGenesis(big.NewInt(1000000), dir, types.TestProofsMode) require.NoError(err) + defer func() { + dumpEnvOutputOnFail(t, env.Processes()) + }() + // Setup options for nodes. options := make(map[string]string) options[localplugin.AttrLogJSON] = "1" // Enable JSON logs
1
package fastesting import ( "context" "io/ioutil" "math/big" "strings" "testing" "time" "github.com/ipfs/go-ipfs-files" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/testhelpers" "github.com/filecoin-project/go-filecoin/tools/fast" "github.com/filecoin-project/go-filecoin/tools/fast/series" localplugin "github.com/filecoin-project/go-filecoin/tools/iptb-plugins/filecoin/local" "github.com/filecoin-project/go-filecoin/types" ) // TestEnvironment provides common setup for writing tests using FAST type TestEnvironment struct { fast.Environment t *testing.T ctx context.Context pluginName string pluginOpts map[string]string fastenvOpts fast.EnvironmentOpts GenesisMiner *fast.Filecoin } // NewTestEnvironment creates a TestEnvironment with a basic setup for writing tests using the FAST library. func NewTestEnvironment(ctx context.Context, t *testing.T, fastenvOpts fast.EnvironmentOpts) (context.Context, *TestEnvironment) { require := require.New(t) // Create a directory for the test using the test name (mostly for FAST) // Replace the forward slash as tempdir can't handle them dir, err := ioutil.TempDir("", strings.Replace(t.Name(), "/", ".", -1)) require.NoError(err) // Create an environment that includes a genesis block with 1MM FIL env, err := fast.NewEnvironmentMemoryGenesis(big.NewInt(1000000), dir, types.TestProofsMode) require.NoError(err) // Setup options for nodes. options := make(map[string]string) options[localplugin.AttrLogJSON] = "1" // Enable JSON logs options[localplugin.AttrLogLevel] = "5" // Set log level to Debug options[localplugin.AttrFilecoinBinary] = testhelpers.MustGetFilecoinBinary() // Get the filecoin binary genesisURI := env.GenesisCar() genesisMiner, err := env.GenesisMiner() require.NoError(err) fastenvOpts = fast.EnvironmentOpts{ InitOpts: append([]fast.ProcessInitOption{fast.POGenesisFile(genesisURI)}, fastenvOpts.InitOpts...), DaemonOpts: append([]fast.ProcessDaemonOption{fast.POBlockTime(time.Millisecond)}, fastenvOpts.DaemonOpts...), } // Setup the first node which is used to help coordinate the other nodes by providing // funds, mining for the network, etc genesis, err := env.NewProcess(ctx, localplugin.PluginName, options, fastenvOpts) require.NoError(err) err = series.SetupGenesisNode(ctx, genesis, genesisMiner.Address, files.NewReaderFile(genesisMiner.Owner)) require.NoError(err) // Define a MiningOnce function which will bet set on the context to provide // a way to mine blocks in the series used during testing var MiningOnce series.MiningOnceFunc = func() { _, err := genesis.MiningOnce(ctx) require.NoError(err) } ctx = series.SetCtxMiningOnce(ctx, MiningOnce) ctx = series.SetCtxSleepDelay(ctx, time.Second) return ctx, &TestEnvironment{ Environment: env, t: t, ctx: ctx, pluginName: localplugin.PluginName, pluginOpts: options, fastenvOpts: fastenvOpts, GenesisMiner: genesis, } } // RequireNewNode builds a new node for the environment func (env *TestEnvironment) RequireNewNode() *fast.Filecoin { require := require.New(env.t) p, err := env.NewProcess(env.ctx, env.pluginName, env.pluginOpts, env.fastenvOpts) require.NoError(err) return p } // RequireNewNodeStarted builds a new node using RequireNewNode, then initializes // and starts it func (env *TestEnvironment) RequireNewNodeStarted() *fast.Filecoin { require := require.New(env.t) p := env.RequireNewNode() err := series.InitAndStart(env.ctx, p) require.NoError(err) return p } // RequireNewNodeConnected builds a new node using RequireNewNodeStarted, then // connect it to the environment GenesisMiner node func (env *TestEnvironment) RequireNewNodeConnected() *fast.Filecoin { require := require.New(env.t) p := env.RequireNewNodeStarted() err := series.Connect(env.ctx, env.GenesisMiner, p) require.NoError(err) return p } // RequireNodeNodeWithFunds builds a new node using RequireNewNodeStarted, then // sends it funds from the environment GenesisMiner node func (env *TestEnvironment) RequireNewNodeWithFunds(funds int) *fast.Filecoin { require := require.New(env.t) p := env.RequireNewNodeConnected() err := series.SendFilecoinDefaults(env.ctx, env.GenesisMiner, p, funds) require.NoError(err) return p }
1
18,701
Should this be `TearDown` since it now calls this method? Something I would like to see is, on test failure don't teardown completely instead leave the FAST repo in place with the stderr and stdout files and direct the user to that location. What do you think?
filecoin-project-venus
go
@@ -190,6 +190,8 @@ def getModule(metricSpec): return MetricMAPE(metricSpec) elif metricName == 'multi': return MetricMulti(metricSpec) + elif metricName == 'negLL': + return MetricNegLogLikelihood(metricSpec) else: raise Exception("Unsupported metric type: %s" % metricName)
1
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- from abc import ABCMeta, abstractmethod import numbers import copy import random import numpy as np from nupic.data.fieldmeta import FieldMetaType import nupic.math.roc_utils as roc from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA from nupic.frameworks.opf.opfutils import InferenceType from nupic.utils import MovingAverage from collections import deque from operator import itemgetter from safe_interpreter import SafeInterpreter from io import BytesIO, StringIO from functools import partial ############################################################################### # Public Metric specification class ############################################################################### class MetricSpec(object): """ This class represents a single Metrics specification in the TaskControl block """ _LABEL_SEPARATOR = ":" def __init__(self, metric, inferenceElement, field=None, params=None): """ metric: A metric type name that identifies which metrics module is to be constructed by the metrics factory method opf.metrics.getModule(); e.g., "rmse" inferenceElement: Some inference types (such as classification), can output more than one type of inference (i.e. the predicted class AND the predicted next step). This field specifies which of these inferences to compute the metrics on field: Field name on which this metric is to be collected params: Custom parameters dict for the metrics module's constructor """ self.metric = metric self.inferenceElement = inferenceElement self.field = field self.params = params return def __repr__(self): return "%s(metric=%r, inferenceElement=%r, field=%r, params=%r)" \ % (self.__class__.__name__, self.metric, self.inferenceElement, self.field, self.params) def getLabel(self, inferenceType=None): """ Helper method that generates a unique label for a MetricSpec / InferenceType pair. The label is formatted as follows: <predictionKind>:<metric type>:(paramName=value)*:field=<fieldname> For example: classification:aae:paramA=10.2:paramB=20:window=100:field=pounds """ result = [] if inferenceType is not None: result.append(InferenceType.getLabel(inferenceType)) result.append(self.inferenceElement) result.append(self.metric) params = self.params if params is not None: sortedParams= params.keys() sortedParams.sort() for param in sortedParams: # Don't include the customFuncSource - it is too long an unwieldy if param in ('customFuncSource', 'customFuncDef', 'customExpr'): continue value = params[param] if isinstance(value, str): result.extend(["%s='%s'"% (param, value)]) else: result.extend(["%s=%s"% (param, value)]) if self.field: result.append("field=%s"% (self.field) ) return self._LABEL_SEPARATOR.join(result) @classmethod def getInferenceTypeFromLabel(cls, label): """ Extracts the PredicitonKind (temporal vs. nontemporal) from the given metric label Parameters: ----------------------------------------------------------------------- label: A label (string) for a metric spec generated by getMetricLabel (above) Returns: An InferenceType value """ infType, _, _= label.partition(cls._LABEL_SEPARATOR) if not InferenceType.validate(infType): return None return infType def getModule(metricSpec): """ factory method to return an appropriate MetricsIface-based module args: metricSpec - an instance of MetricSpec. metricSpec.metric must be one of: rmse (root-mean-square error) aae (average absolute error) acc (accuracy, for enumerated types) return: an appropriate Metric module """ metricName = metricSpec.metric if metricName == 'rmse': return MetricRMSE(metricSpec) if metricName == 'nrmse': return MetricNRMSE(metricSpec) elif metricName == 'aae': return MetricAAE(metricSpec) elif metricName == 'acc': return MetricAccuracy(metricSpec) elif metricName == 'avg_err': return MetricAveError(metricSpec) elif metricName == 'trivial': return MetricTrivial(metricSpec) elif metricName == 'two_gram': return MetricTwoGram(metricSpec) elif metricName == 'moving_mean': return MetricMovingMean(metricSpec) elif metricName == 'moving_mode': return MetricMovingMode(metricSpec) elif metricName == 'neg_auc': return MetricNegAUC(metricSpec) elif metricName == 'custom_error_metric': return CustomErrorMetric(metricSpec) elif metricName == 'multiStep': return MetricMultiStep(metricSpec) elif metricName == 'multiStepProbability': return MetricMultiStepProbability(metricSpec) elif metricName == 'ms_aae': return MetricMultiStepAAE(metricSpec) elif metricName == 'ms_avg_err': return MetricMultiStepAveError(metricSpec) elif metricName == 'passThruPrediction': return MetricPassThruPrediction(metricSpec) elif metricName == 'altMAPE': return MetricAltMAPE(metricSpec) elif metricName == 'MAPE': return MetricMAPE(metricSpec) elif metricName == 'multi': return MetricMulti(metricSpec) else: raise Exception("Unsupported metric type: %s" % metricName) ################################################################################ # Helper Methods and Classes # ################################################################################ class _MovingMode(object): """ Helper class for computing windowed moving mode of arbitrary values """ def __init__(self, windowSize = None): """ Parameters: ----------------------------------------------------------------------- windowSize: The number of values that are used to compute the moving average """ self._windowSize = windowSize self._countDict = dict() self._history = deque([]) def __call__(self, value): if len(self._countDict) == 0: pred = "" else: pred = max(self._countDict.items(), key = itemgetter(1))[0] # Update count dict and history buffer self._history.appendleft(value) if not value in self._countDict: self._countDict[value] = 0 self._countDict[value] += 1 if len(self._history) > self._windowSize: removeElem = self._history.pop() self._countDict[removeElem] -= 1 assert(self._countDict[removeElem] > -1) return pred def _isNumber(value): return isinstance(value, (numbers.Number, np.number)) class MetricsIface(object): """ A Metrics module compares a prediction Y to corresponding ground truth X and returns a single measure representing the "goodness" of the prediction. It is up to the implementation to determine how this comparison is made. """ __metaclass__ = ABCMeta @abstractmethod def __init__(self, metricSpec): """ instantiate a MetricsIface-based module. args: metricSpec is an instance of MetricSpec """ @abstractmethod def addInstance(self, groundTruth, prediction, record = None): """ add one instance consisting of ground truth and a prediction. Parameters: ----------------------------------------------------------------------- groundTruth: The actual measured value at the current timestep prediction: The value predicted by the network at the current timestep groundTruthEncoding: The binary encoding of the groundTruth value (as a numpy array). Right now this is only used by CLA networks predictionEncoding: The binary encoding of the prediction value (as a numpy array). Right now this is only used by CLA networks return: The average error as computed over the metric's window size """ @abstractmethod def getMetric(self): """ return: {value : <current measurement>, "stats" : {<stat> : <value> ...}} metric name is defined by the MetricIface implementation. stats is expected to contain further information relevant to the given metric, for example the number of timesteps represented in the current measurement. all stats are implementation defined, and "stats" can be None """ class AggregateMetric(MetricsIface): """ Partial implementation of Metrics Interface for metrics that accumulate an error and compute an aggregate score, potentially over some window of previous data. This is a convenience class that can serve as the base class for a wide variety of metrics """ ___metaclass__ = ABCMeta #FIXME @abstractmethod - this should be marked abstract method and required to be implemented def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): """ Updates the accumulated error given the prediction and the ground truth. groundTruth: Actual value that is observed for the current timestep prediction: Value predicted by the network for the given timestep accumulatedError: The total accumulated score from the previous predictions (possibly over some finite window) historyBuffer: A buffer of the last <self.window> ground truth values that have been observed. If historyBuffer = None, it means that no history is being kept. retval: The new accumulated error. That is: self.accumulatedError = self.accumulate(groundTruth, predictions, accumulatedError) historyBuffer should also be updated in this method. self.spec.params["window"] indicates the maximum size of the window """ #FIXME @abstractmethod - this should be marked abstract method and required to be implemented def aggregate(self, accumulatedError, historyBuffer, steps): """ Updates the final aggregated score error given the prediction and the ground truth. accumulatedError: The total accumulated score from the previous predictions (possibly over some finite window) historyBuffer: A buffer of the last <self.window> ground truth values that have been observed. If historyBuffer = None, it means that no history is being kept. steps: The total number of (groundTruth, prediction) pairs that have been passed to the metric. This does not include pairs where the groundTruth = SENTINEL_VALUE_FOR_MISSING_DATA retval: The new aggregate (final) error measure. """ def __init__(self, metricSpec): """ Initialize this metric If the params contains the key 'errorMetric', then that is the name of another metric to which we will pass a modified groundTruth and prediction to from our addInstance() method. For example, we may compute a moving mean on the groundTruth and then pass that to the AbsoluteAveError metric """ # Init default member variables self.id = None self.verbosity = 0 self.window = -1 self.history = None self.accumulatedError = 0 self.aggregateError = None self.steps = 0 self.spec = metricSpec self.disabled = False # Number of steps ahead we are trying to predict. This is a list of # prediction steps are processing self._predictionSteps = [0] # Where we store the ground truth history self._groundTruthHistory = deque([]) # The instances of another metric to which we will pass a possibly modified # groundTruth and prediction to from addInstance(). There is one instance # for each step present in self._predictionSteps self._subErrorMetrics = None # The maximum number of records to process. After this many records have # been processed, the metric value never changes. This can be used # as the optimization metric for swarming, while having another metric without # the maxRecords limit to get an idea as to how well a production model # would do on the remaining data self._maxRecords = None # Parse the metric's parameters if metricSpec is not None and metricSpec.params is not None: self.id = metricSpec.params.get('id', None) self._predictionSteps = metricSpec.params.get('steps', [0]) # Make sure _predictionSteps is a list if not hasattr(self._predictionSteps, '__iter__'): self._predictionSteps = [self._predictionSteps] self.verbosity = metricSpec.params.get('verbosity', 0) self._maxRecords = metricSpec.params.get('maxRecords', None) # Get the metric window size if 'window' in metricSpec.params: assert metricSpec.params['window'] >= 1 self.history = deque([]) self.window = metricSpec.params['window'] # Get the name of the sub-metric to chain to from addInstance() if 'errorMetric' in metricSpec.params: self._subErrorMetrics = [] for step in self._predictionSteps: subSpec = copy.deepcopy(metricSpec) # Do all ground truth shifting before we pass onto the sub-metric subSpec.params.pop('steps', None) subSpec.params.pop('errorMetric') subSpec.metric = metricSpec.params['errorMetric'] self._subErrorMetrics.append(getModule(subSpec)) def _getShiftedGroundTruth(self, groundTruth): """ Utility function that saves the passed in groundTruth into a local history buffer, and returns the groundTruth from self._predictionSteps ago, where self._predictionSteps is defined by the 'steps' parameter. This can be called from the beginning of a derived class's addInstance() before it passes groundTruth and prediction onto accumulate(). """ # Save this ground truth into our input history self._groundTruthHistory.append(groundTruth) # This is only supported when _predictionSteps has one item in it assert (len(self._predictionSteps) == 1) # Return the one from N steps ago if len(self._groundTruthHistory) > self._predictionSteps[0]: return self._groundTruthHistory.popleft() else: if hasattr(groundTruth, '__iter__'): return [None] * len(groundTruth) else: return None def addInstance(self, groundTruth, prediction, record = None): # This base class does not support time shifting the ground truth or a # subErrorMetric. assert (len(self._predictionSteps) == 1) assert self._predictionSteps[0] == 0 assert self._subErrorMetrics is None # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None: return self.aggregateError if self.verbosity > 0: print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth, prediction, self.getMetric()) # Ignore if we've reached maxRecords if self._maxRecords is not None and self.steps >= self._maxRecords: return self.aggregateError # If there is a sub-metric, chain into it's addInstance # Accumulate the error self.accumulatedError = self.accumulate(groundTruth, prediction, self.accumulatedError, self.history) self.steps += 1 return self._compute() def getMetric(self): return {'value': self.aggregateError, "stats" : {"steps" : self.steps}} def _compute(self): self.aggregateError = self.aggregate(self.accumulatedError, self.history, self.steps) return self.aggregateError class MetricRMSE(AggregateMetric): """ computes root-mean-square error """ def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): error = (groundTruth - prediction)**2 accumulatedError += error if historyBuffer is not None: historyBuffer.append(error) if len(historyBuffer) > self.spec.params["window"] : accumulatedError -= historyBuffer.popleft() return accumulatedError def aggregate(self, accumulatedError, historyBuffer, steps): n = steps if historyBuffer is not None: n = len(historyBuffer) return np.sqrt(accumulatedError / float(n)) class MetricNRMSE(MetricRMSE): """computes normalized root-mean-square error""" def __init__(self, *args, **kwargs): super(MetricNRMSE, self).__init__(*args, **kwargs) self.groundTruths = [] def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): self.groundTruths.append(groundTruth) return super(MetricNRMSE, self).accumulate(groundTruth, prediction, accumulatedError, historyBuffer) def aggregate(self, accumulatedError, historyBuffer, steps): rmse = super(MetricNRMSE, self).aggregate(accumulatedError, historyBuffer, steps) denominator = np.std(self.groundTruths) return rmse / denominator if denominator > 0 else float("inf") class MetricAAE(AggregateMetric): """ computes average absolute error """ def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): error = abs(groundTruth - prediction) accumulatedError += error if historyBuffer is not None: historyBuffer.append(error) if len(historyBuffer) > self.spec.params["window"] : accumulatedError -= historyBuffer.popleft() return accumulatedError def aggregate(self, accumulatedError, historyBuffer, steps): n = steps if historyBuffer is not None: n = len(historyBuffer) return accumulatedError/ float(n) class MetricAltMAPE(AggregateMetric): """ computes the "Alternative" Mean Absolute Percent Error. A generic MAPE computes the percent error for each sample, and then gets an average. This can suffer from samples where the actual value is very small or zero - this one sample can drastically alter the mean. This metric on the other hand first computes the average of the actual values and the averages of the errors before dividing. This washes out the effects of a small number of samples with very small actual values. """ def __init__(self, metricSpec): super(MetricAltMAPE, self).__init__(metricSpec) self._accumulatedGroundTruth = 0 self._accumulatedError = 0 def addInstance(self, groundTruth, prediction, record = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None: return self.aggregateError # Compute absolute error error = abs(groundTruth - prediction) if self.verbosity > 0: print "MetricAltMAPE:\n groundTruth: %s\n Prediction: " \ "%s\n Error: %s" % (groundTruth, prediction, error) # Update the accumulated groundTruth and aggregate error if self.history is not None: self.history.append((groundTruth, error)) if len(self.history) > self.spec.params["window"] : (oldGT, oldErr) = self.history.popleft() self._accumulatedGroundTruth -= oldGT self._accumulatedError -= oldErr self._accumulatedGroundTruth += abs(groundTruth) self._accumulatedError += error # Compute aggregate pct error if self._accumulatedGroundTruth > 0: self.aggregateError = 100.0 * self._accumulatedError / \ self._accumulatedGroundTruth else: self.aggregateError = 0 if self.verbosity >= 1: print " accumGT:", self._accumulatedGroundTruth print " accumError:", self._accumulatedError print " aggregateError:", self.aggregateError self.steps += 1 return self.aggregateError class MetricMAPE(AggregateMetric): """ computes the "Classic" Mean Absolute Percent Error. This computes the percent error for each sample, and then gets an average. Note that this can suffer from samples where the actual value is very small or zero - this one sample can drastically alter the mean. To avoid this potential issue, use 'altMAPE' instead. This metric is provided mainly as a convenience when comparing results against other investigations that have also used MAPE. """ def __init__(self, metricSpec): super(MetricMAPE, self).__init__(metricSpec) self._accumulatedPctError = 0 def addInstance(self, groundTruth, prediction, record = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None: return self.aggregateError # Compute absolute error if groundTruth != 0: pctError = float(abs(groundTruth - prediction))/groundTruth else: # Ignore this sample if self.verbosity > 0: print "Ignoring sample with groundTruth of 0" self.steps += 1 return self.aggregateError if self.verbosity > 0: print "MetricMAPE:\n groundTruth: %s\n Prediction: " \ "%s\n Error: %s" % (groundTruth, prediction, pctError) # Update the accumulated groundTruth and aggregate error if self.history is not None: self.history.append(pctError) if len(self.history) > self.spec.params["window"] : (oldPctErr) = self.history.popleft() self._accumulatedPctError -= oldPctErr self._accumulatedPctError += pctError # Compute aggregate pct error self.aggregateError = 100.0 * self._accumulatedPctError / len(self.history) if self.verbosity >= 1: print " accumPctError:", self._accumulatedPctError print " aggregateError:", self.aggregateError self.steps += 1 return self.aggregateError class MetricPassThruPrediction(MetricsIface): """ This is not a metric, but rather a facility for passing the predictions generated by a baseline metric through to the prediction output cache produced by a model. For example, if you wanted to see the predictions generated for the TwoGram metric, you would specify 'PassThruPredictions' as the 'errorMetric' parameter. This metric class simply takes the prediction and outputs that as the aggregateMetric value. """ def __init__(self, metricSpec): self.spec = metricSpec self.window = metricSpec.params.get("window", 1) self.avg = MovingAverage(self.window) self.value = None def addInstance(self, groundTruth, prediction, record = None): """Compute and store metric value""" self.value = self.avg(prediction) def getMetric(self): """Return the metric value """ return {"value": self.value} #def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): # # Simply return the prediction as the accumulated error # return prediction # #def aggregate(self, accumulatedError, historyBuffer, steps): # # Simply return the prediction as the aggregateError # return accumulatedError class MetricMovingMean(AggregateMetric): """ computes error metric based on moving mean prediction """ def __init__(self, metricSpec): # This metric assumes a default 'steps' of 1 if not 'steps' in metricSpec.params: metricSpec.params['steps'] = 1 super(MetricMovingMean, self).__init__(metricSpec) # Only supports 1 item in _predictionSteps assert (len(self._predictionSteps) == 1) self.mean_window = 10 if metricSpec.params.has_key('mean_window'): assert metricSpec.params['mean_window'] >= 1 self.mean_window = metricSpec.params['mean_window'] # Construct moving average instance self._movingAverage = MovingAverage(self.mean_window) def getMetric(self): return self._subErrorMetrics[0].getMetric() def addInstance(self, groundTruth, prediction, record = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA: return self._subErrorMetrics[0].aggregateError if self.verbosity > 0: print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth, prediction, self.getMetric()) # Use ground truth from 'steps' steps ago as our most recent ground truth lastGT = self._getShiftedGroundTruth(groundTruth) if lastGT is None: return self._subErrorMetrics[0].aggregateError mean = self._movingAverage(lastGT) return self._subErrorMetrics[0].addInstance(groundTruth, mean, record) def evalCustomErrorMetric(expr, prediction, groundTruth, tools): sandbox = SafeInterpreter(writer=StringIO()) if isinstance(prediction, dict): sandbox.symtable['prediction'] = tools.mostLikely(prediction) sandbox.symtable['EXP'] = tools.expValue(prediction) sandbox.symtable['probabilityDistribution'] = prediction else: sandbox.symtable['prediction'] = prediction sandbox.symtable['groundTruth'] = groundTruth sandbox.symtable['tools'] = tools error = sandbox(expr) return error class CustomErrorMetric(MetricsIface): """ Custom Error Metric class that handles user defined error metrics """ class CircularBuffer(): """ implementation of a fixed size constant random access circular buffer """ def __init__(self,length): #Create an array to back the buffer #If the length<0 create a zero length array self.data = [None for i in range(max(length,0))] self.elements = 0 self.index = 0 self.dataLength = length def getItem(self,n): #Get item from n steps back if n >= self.elements or (n >= self.dataLength and not self.dataLength < 0): assert False,"Trying to access data not in the stored window" return None if self.dataLength>=0: getInd = (self.index-n-1)%min(self.elements,self.dataLength) else: getInd = (self.index-n-1)%self.elements return self.data[getInd] def pushToEnd(self,obj): ret = None #If storing everything simply append right to the list if(self.dataLength < 0 ): self.data.append(obj) self.index+=1 self.elements+=1 return None if(self.elements==self.dataLength): #pop last added element ret = self.data[self.index % self.dataLength] else: #else push new element and increment the element counter self.elements += 1 self.data[self.index % self.dataLength] = obj self.index += 1 return ret def __len__(self): return self.elements def __init__(self,metricSpec): self.metricSpec = metricSpec self.steps = 0 self.error = 0 self.averageError = None self.errorMatrix = None self.evalError = self.evalAbsErr self.errorWindow = 1 self.storeWindow=-1 self.userDataStore = dict() if "errorWindow" in metricSpec.params: self.errorWindow = metricSpec.params["errorWindow"] assert self.errorWindow != 0 , "Window Size cannon be zero" if "storeWindow" in metricSpec.params: self.storeWindow = metricSpec.params["storeWindow"] assert self.storeWindow != 0 , "Window Size cannon be zero" self.errorStore = self.CircularBuffer(self.errorWindow) self.recordStore = self.CircularBuffer(self.storeWindow) if "customExpr" in metricSpec.params: assert not "customFuncDef" in metricSpec.params assert not "customFuncSource" in metricSpec.params self.evalError = partial(evalCustomErrorMetric, metricSpec.params["customExpr"]) elif "customFuncSource" in metricSpec.params: assert not "customFuncDef" in metricSpec.params assert not "customExpr" in metricSpec.params exec(metricSpec.params["customFuncSource"]) #pull out defined function from locals self.evalError = locals()["getError"] elif "customFuncDef" in metricSpec.params: assert not "customFuncSource" in metricSpec.params assert not "customExpr" in metricSpec.params self.evalError = metricSpec.params["customFuncDef"] def getPrediction(self,n): #Get prediction from n steps ago return self.recordStore.getItem(n)["prediction"] def getFieldValue(self,n,field): #Get field value from record n steps ago record = self.recordStore.getItem(n)["record"] value = record[field] return value def getGroundTruth(self,n): #Get the groundTruth from n steps ago return self.recordStore.getItem(n)["groundTruth"] def getBufferLen(self): return len(self.recordStore) def storeData(self,name,obj): #Store custom user data self.userDataStore[name] = obj def getData(self,name): #Retrieve user data if name in self.userDataStore: return self.userDataStore[name] return None def mostLikely(self, pred): """ Helper function to return a scalar value representing the most likely outcome given a probability distribution """ if len(pred) == 1: return pred.keys()[0] mostLikelyOutcome = None maxProbability = 0 for prediction, probability in pred.items(): if probability > maxProbability: mostLikelyOutcome = prediction maxProbability = probability return mostLikelyOutcome def expValue(self, pred): """ Helper function to return a scalar value representing the expected value of a probability distribution """ if len(pred) == 1: return pred.keys()[0] return sum([x*p for x,p in pred.items()]) def evalAbsErr(self,pred,ground): return abs(pred-ground) def getMetric(self): return {'value': self.averageError, "stats" : {"steps" : self.steps}} def addInstance(self, groundTruth, prediction, record = None): #If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA or prediction is None: return self.averageError self.recordStore.pushToEnd({"groundTruth":groundTruth, "prediction":prediction,"record":record}) if isinstance(prediction, dict): assert not any(True for p in prediction if p is None), \ "Invalid prediction of `None` in call to %s.addInstance()" % \ self.__class__.__name__ error = self.evalError(prediction,groundTruth,self) popped = self.errorStore.pushToEnd({"error":error}) if not popped is None: #Subtract error that dropped out of the buffer self.error -= popped["error"] self.error+= error self.averageError = float(self.error)/self.errorStore.elements self.steps+=1 return self.averageError class MetricMovingMode(AggregateMetric): """ computes error metric based on moving mode prediction """ def __init__(self, metricSpec): super(MetricMovingMode, self).__init__(metricSpec) self.mode_window = 100 if metricSpec.params.has_key('mode_window'): assert metricSpec.params['mode_window'] >= 1 self.mode_window = metricSpec.params['mode_window'] # Only supports one stepsize assert len(self._predictionSteps) == 1 # Construct moving average instance self._movingMode = _MovingMode(self.mode_window) def getMetric(self): return self._subErrorMetrics[0].getMetric() def addInstance(self, groundTruth, prediction, record = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA: return self._subErrorMetrics[0].aggregateError if self.verbosity > 0: print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth, prediction, self.getMetric()) # Use ground truth from 'steps' steps ago as our most recent ground truth lastGT = self._getShiftedGroundTruth(groundTruth) if lastGT is None: return self._subErrorMetrics[0].aggregateError mode = self._movingMode(lastGT) result = self._subErrorMetrics[0].addInstance(groundTruth, mode, record) return result class MetricTrivial(AggregateMetric): """ computes a metric against the ground truth N steps ago. The metric to compute is designated by the 'errorMetric' entry in the metric params. """ def __init__(self, metricSpec): # This metric assumes a default 'steps' of 1 if not 'steps' in metricSpec.params: metricSpec.params['steps'] = 1 super(MetricTrivial, self).__init__(metricSpec) # Only supports one stepsize assert len(self._predictionSteps) == 1 # Must have a suberror metric assert self._subErrorMetrics is not None, "This metric requires that you" \ + " specify the name of another base metric via the 'errorMetric' " \ + " parameter." def getMetric(self): return self._subErrorMetrics[0].getMetric() def addInstance(self, groundTruth, prediction, record = None): # Use ground truth from 'steps' steps ago as our "prediction" prediction = self._getShiftedGroundTruth(groundTruth) if self.verbosity > 0: print "groundTruth:\n%s\nPredictions:\n%s\n%s\n" % (groundTruth, prediction, self.getMetric()) # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA: return self._subErrorMetrics[0].aggregateError # Our "prediction" is simply what happened 'steps' steps ago return self._subErrorMetrics[0].addInstance(groundTruth, prediction, record) class MetricTwoGram(AggregateMetric): """ computes error metric based on one-grams. The groundTruth passed into this metric is the encoded output of the field (an array of 1's and 0's). """ def __init__(self, metricSpec): # This metric assumes a default 'steps' of 1 if not 'steps' in metricSpec.params: metricSpec.params['steps'] = 1 super(MetricTwoGram, self).__init__(metricSpec) # Only supports 1 stepsize assert len(self._predictionSteps) == 1 # Must supply the predictionField assert(metricSpec.params.has_key('predictionField')) self.predictionField = metricSpec.params['predictionField'] self.twoGramDict = dict() def getMetric(self): return self._subErrorMetrics[0].getMetric() def addInstance(self, groundTruth, prediction, record = None): # If missing data return previous error (assuming one gram will always # receive an instance of ndarray) if groundTruth.any() == False: return self._subErrorMetrics[0].aggregateError # Get actual ground Truth value from record. For this metric, the # "groundTruth" parameter is the encoder output and we use actualGroundTruth # to hold the input to the encoder (either a scalar or a category string). # # We will use 'groundTruthKey' (the stringified encoded value of # groundTruth) as the key for our one-gram dict and the 'actualGroundTruth' # as the values in our dict, which are used to compute our prediction. actualGroundTruth = record[self.predictionField] # convert binary array to a string groundTruthKey = str(groundTruth) # Get the ground truth key from N steps ago, that is what we will base # our prediction on. Note that our "prediction" is the prediction for the # current time step, to be compared to actualGroundTruth prevGTKey = self._getShiftedGroundTruth(groundTruthKey) # ------------------------------------------------------------------------- # Get the prediction based on the previously known ground truth # If no previous, just default to "" or 0, depending on the groundTruth # data type. if prevGTKey == None: if isinstance(actualGroundTruth,str): pred = "" else: pred = 0 # If the previous was never seen before, create a new dict for it. elif not prevGTKey in self.twoGramDict: if isinstance(actualGroundTruth,str): pred = "" else: pred = 0 # Create a new dict for it self.twoGramDict[prevGTKey] = {actualGroundTruth:1} # If it was seen before, compute the prediction from the past history else: # Find most often occurring 1-gram if isinstance(actualGroundTruth,str): # Get the most frequent category that followed the previous timestep twoGramMax = max(self.twoGramDict[prevGTKey].items(), key=itemgetter(1)) pred = twoGramMax[0] else: # Get average of all possible values that followed the previous # timestep pred = sum(self.twoGramDict[prevGTKey].iterkeys()) pred /= len(self.twoGramDict[prevGTKey]) # Add current ground truth to dict if actualGroundTruth in self.twoGramDict[prevGTKey]: self.twoGramDict[prevGTKey][actualGroundTruth] += 1 else: self.twoGramDict[prevGTKey][actualGroundTruth] = 1 if self.verbosity > 0: print "\nencoding:%s\nactual:%s\nprevEncoding:%s\nprediction:%s\nmetric:%s" % \ (groundTruth, actualGroundTruth, prevGTKey, pred, self.getMetric()) return self._subErrorMetrics[0].addInstance(actualGroundTruth, pred, record) class MetricAccuracy(AggregateMetric): """ computes simple accuracy for an enumerated type. all inputs are treated as discrete members of a set, therefore for example 0.5 is only a correct response if the ground truth is exactly 0.5. Inputs can be strings, integers, or reals """ def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): # This is really an accuracy measure rather than an "error" measure error = 1.0 if groundTruth == prediction else 0.0 accumulatedError += error if historyBuffer is not None: historyBuffer.append(error) if len(historyBuffer) > self.spec.params["window"] : accumulatedError -= historyBuffer.popleft() return accumulatedError def aggregate(self, accumulatedError, historyBuffer, steps): n = steps if historyBuffer is not None: n = len(historyBuffer) return accumulatedError/ float(n) class MetricAveError(AggregateMetric): """Simply the inverse of the Accuracy metric More consistent with scalar metrics because they all report an error to be minimized""" def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): error = 1.0 if groundTruth != prediction else 0.0 accumulatedError += error if historyBuffer is not None: historyBuffer.append(error) if len(historyBuffer) > self.spec.params["window"] : accumulatedError -= historyBuffer.popleft() return accumulatedError def aggregate(self, accumulatedError, historyBuffer, steps): n = steps if historyBuffer is not None: n = len(historyBuffer) return accumulatedError/ float(n) class MetricNegAUC(AggregateMetric): """ Computes -1 * AUC (Area Under the Curve) of the ROC (Receiver Operator Characteristics) curve. We compute -1 * AUC because metrics are optimized to be LOWER when running hypersearch. For this, we assuming that category 1 is the "positive" category and we are generating an ROC curve with the TPR (True Positive Rate) of category 1 on the y-axis and the FPR (False Positive Rate) on the x-axis. """ def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer): """ Accumulate history of groundTruth and "prediction" values. For this metric, groundTruth is the actual category and "prediction" is a dict containing one top-level item with a key of 0 (meaning this is the 0-step classificaton) and a value which is another dict, which contains the probability for each category as output from the classifier. For example, this is what "prediction" would be if the classifier said that category 0 had a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4} """ # We disable it within aggregate() if we find that the classifier classes # are not compatible with AUC calculations. if self.disabled: return 0 # Just store the groundTruth, probability into our history buffer. We will # wait until aggregate gets called to actually compute AUC. if historyBuffer is not None: historyBuffer.append((groundTruth, prediction[0])) if len(historyBuffer) > self.spec.params["window"] : historyBuffer.popleft() # accumulatedError not used in this metric return 0 def aggregate(self, accumulatedError, historyBuffer, steps): # If disabled, do nothing. if self.disabled: return 0.0 if historyBuffer is not None: n = len(historyBuffer) else: return 0.0 # For performance reasons, only re-compute this every 'computeEvery' steps frequency = self.spec.params.get('computeEvery', 1) if ((steps+1) % frequency) != 0: return self.aggregateError # Compute the ROC curve and the area underneath it actuals = [gt for (gt, probs) in historyBuffer] classes = np.unique(actuals) # We can only compute ROC when we have at least 1 sample of each category if len(classes) < 2: return -1 * 0.5 # Print warning the first time this metric is asked to be computed on a # problem with more than 2 classes if sorted(classes) != [0,1]: print "WARNING: AUC only implemented for binary classifications where " \ "the categories are category 0 and 1. In this network, the " \ "categories are: %s" % (classes) print "WARNING: Computation of this metric is disabled for the remainder of " \ "this experiment." self.disabled = True return 0.0 # Compute the ROC and AUC. Note that because we are online, there's a # chance that some of the earlier classification probabilities don't # have the True class (category 1) yet because it hasn't been seen yet. # Therefore, we use probs.get() with a default value of 0. scores = [probs.get(1, 0) for (gt, probs) in historyBuffer] (fpr, tpr, thresholds) = roc.ROCCurve(actuals, scores) auc = roc.AreaUnderCurve(fpr, tpr) # Debug? if False: print print "AUC metric debug info (%d steps):" % (steps) print " actuals:", actuals print " probabilities:", ["%.2f" % x for x in scores] print " fpr:", fpr print " tpr:", tpr print " thresholds:", thresholds print " AUC:", auc return -1 * auc class MetricMultiStep(AggregateMetric): """ This is an "uber" metric which is used to apply one of the other basic metrics to a specific step in a multi-step prediction. The specParams are expected to contain: 'errorMetric': name of basic metric to apply 'steps': compare prediction['steps'] to the current ground truth. Note that the metrics manager has already performed the time shifting for us - it passes us the prediction element from 'steps' steps ago and asks us to compare that to the current ground truth. When multiple steps of prediction are requested, we average the results of the underlying metric for each step. """ def __init__(self, metricSpec): super(MetricMultiStep, self).__init__(metricSpec) assert self._subErrorMetrics is not None def getMetric(self): return {'value': self.aggregateError, "stats" : {"steps" : self.steps}} def addInstance(self, groundTruth, prediction, record = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA: return self.aggregateError # Get the prediction for this time step aggErrSum = 0 try: for step, subErrorMetric in \ zip(self._predictionSteps, self._subErrorMetrics): stepPrediction = prediction[step] # Unless this is a custom_error_metric, when we have a dict of # probabilities, get the most probable one. For custom error metrics, # we pass the probabilities in so that it can decide how best to deal with # them. if isinstance(stepPrediction, dict) \ and not isinstance(subErrorMetric, CustomErrorMetric): predictions = [(prob,value) for (value, prob) in \ stepPrediction.iteritems()] predictions.sort() stepPrediction = predictions[-1][1] # Get sum of the errors aggErr = subErrorMetric.addInstance(groundTruth, stepPrediction, record) if self.verbosity >= 2: print "MetricMultiStep %s: aggErr for stepSize %d: %s" % \ (self._predictionSteps, step, aggErr) aggErrSum += aggErr except: pass # Return average aggregate error across all step sizes self.aggregateError = aggErrSum / len(self._subErrorMetrics) if self.verbosity >= 2: print "MetricMultiStep %s: aggErrAvg: %s" % (self._predictionSteps, self.aggregateError) self.steps += 1 if self.verbosity >= 1: print "\nMetricMultiStep %s: \n groundTruth: %s\n Predictions: %s" \ "\n Metric: %s" % (self._predictionSteps, groundTruth, prediction, self.getMetric()) return self.aggregateError class MetricMultiStepProbability(AggregateMetric): """ This is an "uber" metric which is used to apply one of the other basic metrics to a specific step in a multi-step prediction. The specParams are expected to contain: 'errorMetric': name of basic metric to apply 'steps': compare prediction['steps'] to the current ground truth. Note that the metrics manager has already performed the time shifting for us - it passes us the prediction element from 'steps' steps ago and asks us to compare that to the current ground truth. """ def __init__(self, metricSpec): # Default window should be 1 if not 'window' in metricSpec.params: metricSpec.params['window'] = 1 super(MetricMultiStepProbability, self).__init__(metricSpec) # Must have a suberror metric assert self._subErrorMetrics is not None, "This metric requires that you" \ + " specify the name of another base metric via the 'errorMetric' " \ + " parameter." # Force all subErrorMetric windows to 1. This is necessary because by # default they each do their own history averaging assuming that their # addInstance() gets called once per interation. But, in this metric # we actually call into each subErrorMetric multiple times per iteration for subErrorMetric in self._subErrorMetrics: subErrorMetric.window = 1 subErrorMetric.spec.params['window'] = 1 self._movingAverage = MovingAverage(self.window) def getMetric(self): return {'value': self.aggregateError, "stats" : {"steps" : self.steps}} def addInstance(self, groundTruth, prediction, record = None): # If missing data, if groundTruth == SENTINEL_VALUE_FOR_MISSING_DATA: return self.aggregateError if self.verbosity >= 1: print "\nMetricMultiStepProbability %s: \n groundTruth: %s\n " \ "Predictions: %s" % (self._predictionSteps, groundTruth, prediction) # Get the aggregateErrors for all requested step sizes and average them aggErrSum = 0 for step, subErrorMetric in \ zip(self._predictionSteps, self._subErrorMetrics): stepPrediction = prediction[step] # If it's a dict of probabilities, get the expected value error = 0 if isinstance(stepPrediction, dict): expectedValue = 0 # For every possible prediction multiply its error by its probability for (pred, prob) in stepPrediction.iteritems(): error += subErrorMetric.addInstance(groundTruth, pred, record) \ * prob else: error += subErrorMetric.addInstance(groundTruth, stepPrediction, record) if self.verbosity >= 2: print ("MetricMultiStepProbability %s: aggErr for stepSize %d: %s" % (self._predictionSteps, step, error)) aggErrSum += error # Return aggregate error avgAggErr = aggErrSum / len(self._subErrorMetrics) self.aggregateError = self._movingAverage(avgAggErr) if self.verbosity >= 2: print ("MetricMultiStepProbability %s: aggErr over all steps, this " "iteration (%d): %s" % (self._predictionSteps, self.steps, avgAggErr)) print ("MetricMultiStepProbability %s: aggErr moving avg: %s" % (self._predictionSteps, self.aggregateError)) self.steps += 1 if self.verbosity >= 1: print "MetricMultiStepProbability %s: \n Error: %s\n Metric: %s" % \ (self._predictionSteps, avgAggErr, self.getMetric()) return self.aggregateError class MetricMulti(MetricsIface): """Multi metric can combine multiple other (sub)metrics and weight them to provide combined score.""" def __init__(self, metricSpec): """MetricMulti constructor using metricSpec is not allowed.""" raise ValueError("MetricMulti cannot be constructed from metricSpec string! " "Use MetricMulti(weights,metrics) constructor instead.") def __init__(self, weights, metrics, window=None): """MetricMulti @param weights - [list of floats] used as weights @param metrics - [list of submetrics] @param window - (opt) window size for moving average, or None when disabled """ if (weights is None or not isinstance(weights, list) or not len(weights) > 0 or not isinstance(weights[0], float)): raise ValueError("MetricMulti requires 'weights' parameter as a [list of floats]") self.weights = weights if (metrics is None or not isinstance(metrics, list) or not len(metrics) > 0 or not isinstance(metrics[0], MetricsIface)): raise ValueError("MetricMulti requires 'metrics' parameter as a [list of Metrics]") self.metrics = metrics if window is not None: self.movingAvg = MovingAverage(windowSize=window) else: self.movingAvg = None def addInstance(self, groundTruth, prediction, record = None): err = 0.0 subResults = [m.addInstance(groundTruth, prediction, record) for m in self.metrics] for i in xrange(len(self.weights)): if subResults[i] is not None: err += subResults[i]*self.weights[i] else: # submetric returned None, propagate self.err = None return None if self.verbosity > 2: print "IN=",groundTruth," pred=",prediction,": w=",self.weights[i]," metric=",self.metrics[i]," value=",m," err=",err if self.movingAvg is not None: err=self.movingAvg(err) self.err = err return err def __repr__(self): return "MetricMulti(weights=%s, metrics=%s)" % (self.weights, self.metrics) def getMetric(self): return {'value': self.err, "stats" : {"weights" : self.weights}}
1
19,401
There's not particular reason to use a short name. Can we use a more descriptive name for the metric? Perhaps "negativeLogLikelihood"?
numenta-nupic
py
@@ -745,7 +745,10 @@ static void skipArgumentList (tokenInfo *const token, boolean include_newlines, while (nest_level > 0 && ! isType (token, TOKEN_EOF)) { readTokenFull (token, FALSE, repr); - if (isType (token, TOKEN_OPEN_PAREN)) + if (isType (token, TOKEN_KEYWORD) && token->keyword == KEYWORD_function && + repr == NULL) + parseFunction (token); + else if (isType (token, TOKEN_OPEN_PAREN)) nest_level++; else if (isType (token, TOKEN_CLOSE_PAREN)) nest_level--;
1
/* * Copyright (c) 2003, Darren Hiebert * * This source code is released for free distribution under the terms of the * GNU General Public License version 2 or (at your option) any later version. * * This module contains functions for generating tags for JavaScript language * files. * * This is a good reference for different forms of the function statement: * http://www.permadi.com/tutorial/jsFunc/ * Another good reference: * http://developer.mozilla.org/en/docs/Core_JavaScript_1.5_Guide */ /* * INCLUDE FILES */ #include "general.h" /* must always come first */ #include <ctype.h> /* to define isalpha () */ #ifdef DEBUG #include <stdio.h> #endif #include <string.h> #include "debug.h" #include "entry.h" #include "keyword.h" #include "parse.h" #include "read.h" #include "routines.h" #include "vstring.h" /* * MACROS */ #define isType(token,t) (boolean) ((token)->type == (t)) #define isKeyword(token,k) (boolean) ((token)->keyword == (k)) /* * DATA DECLARATIONS */ /* * Tracks class and function names already created */ static stringList *ClassNames; static stringList *FunctionNames; /* Used to specify type of keyword. */ typedef enum eKeywordId { KEYWORD_NONE = -1, KEYWORD_function, KEYWORD_capital_function, KEYWORD_capital_object, KEYWORD_prototype, KEYWORD_var, KEYWORD_let, KEYWORD_const, KEYWORD_new, KEYWORD_this, KEYWORD_for, KEYWORD_while, KEYWORD_do, KEYWORD_if, KEYWORD_else, KEYWORD_switch, KEYWORD_try, KEYWORD_catch, KEYWORD_finally, KEYWORD_sap, KEYWORD_return } keywordId; typedef enum eTokenType { TOKEN_UNDEFINED, TOKEN_EOF, TOKEN_CHARACTER, TOKEN_CLOSE_PAREN, TOKEN_SEMICOLON, TOKEN_COLON, TOKEN_COMMA, TOKEN_KEYWORD, TOKEN_OPEN_PAREN, TOKEN_IDENTIFIER, TOKEN_STRING, TOKEN_TEMPLATE_STRING, TOKEN_PERIOD, TOKEN_OPEN_CURLY, TOKEN_CLOSE_CURLY, TOKEN_EQUAL_SIGN, TOKEN_OPEN_SQUARE, TOKEN_CLOSE_SQUARE, TOKEN_REGEXP, TOKEN_POSTFIX_OPERATOR, TOKEN_BINARY_OPERATOR } tokenType; typedef struct sTokenInfo { tokenType type; keywordId keyword; vString * string; vString * scope; unsigned long lineNumber; MIOPos filePosition; int nestLevel; boolean ignoreTag; } tokenInfo; /* * DATA DEFINITIONS */ static tokenType LastTokenType; static tokenInfo *NextToken; static langType Lang_js; typedef enum { JSTAG_FUNCTION, JSTAG_CLASS, JSTAG_METHOD, JSTAG_PROPERTY, JSTAG_CONSTANT, JSTAG_VARIABLE, JSTAG_COUNT } jsKind; static kindOption JsKinds [] = { { TRUE, 'f', "function", "functions" }, { TRUE, 'c', "class", "classes" }, { TRUE, 'm', "method", "methods" }, { TRUE, 'p', "property", "properties" }, { TRUE, 'C', "constant", "constants" }, { TRUE, 'v', "variable", "global variables" } }; static const keywordTable JsKeywordTable [] = { /* keyword keyword ID */ { "function", KEYWORD_function }, { "Function", KEYWORD_capital_function }, { "Object", KEYWORD_capital_object }, { "prototype", KEYWORD_prototype }, { "var", KEYWORD_var }, { "let", KEYWORD_let }, { "const", KEYWORD_const }, { "new", KEYWORD_new }, { "this", KEYWORD_this }, { "for", KEYWORD_for }, { "while", KEYWORD_while }, { "do", KEYWORD_do }, { "if", KEYWORD_if }, { "else", KEYWORD_else }, { "switch", KEYWORD_switch }, { "try", KEYWORD_try }, { "catch", KEYWORD_catch }, { "finally", KEYWORD_finally }, { "sap", KEYWORD_sap }, { "return", KEYWORD_return } }; /* * FUNCTION DEFINITIONS */ /* Recursive functions */ static void readTokenFull (tokenInfo *const token, boolean include_newlines, vString *const repr); static void parseFunction (tokenInfo *const token); static boolean parseBlock (tokenInfo *const token, tokenInfo *const orig_parent); static boolean parseLine (tokenInfo *const token, tokenInfo *const parent, boolean is_inside_class); static void parseUI5 (tokenInfo *const token); static boolean isIdentChar (const int c) { return (boolean) (isalpha (c) || isdigit (c) || c == '$' || c == '@' || c == '_' || c == '#'); } static tokenInfo *newToken (void) { tokenInfo *const token = xMalloc (1, tokenInfo); token->type = TOKEN_UNDEFINED; token->keyword = KEYWORD_NONE; token->string = vStringNew (); token->scope = vStringNew (); token->nestLevel = 0; token->ignoreTag = FALSE; token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); return token; } static void deleteToken (tokenInfo *const token) { vStringDelete (token->string); vStringDelete (token->scope); eFree (token); } static void copyToken (tokenInfo *const dest, const tokenInfo *const src, boolean const include_non_read_info) { dest->lineNumber = src->lineNumber; dest->filePosition = src->filePosition; dest->type = src->type; dest->keyword = src->keyword; vStringCopy(dest->string, src->string); if (include_non_read_info) { dest->nestLevel = src->nestLevel; vStringCopy(dest->scope, src->scope); } } /* * Tag generation functions */ static void makeJsTag (tokenInfo *const token, const jsKind kind, vString *const signature) { if (JsKinds [kind].enabled && ! token->ignoreTag ) { const char *name = vStringValue (token->string); vString *fullscope = vStringNewCopy (token->scope); const char *p; tagEntryInfo e; if (kind != JSTAG_PROPERTY && (p = strrchr (name, '.')) != NULL ) { if (vStringLength (fullscope) > 0) vStringPut (fullscope, '.'); vStringNCatS (fullscope, name, (size_t) (p - name)); name = p + 1; } initTagEntry (&e, name, &(JsKinds [kind])); e.lineNumber = token->lineNumber; e.filePosition = token->filePosition; if ( vStringLength(fullscope) > 0 ) { jsKind parent_kind = JSTAG_CLASS; /* * If we're creating a function (and not a method), * guess we're inside another function */ if (kind == JSTAG_FUNCTION) parent_kind = JSTAG_FUNCTION; e.extensionFields.scopeKind = &(JsKinds [parent_kind]); e.extensionFields.scopeName = vStringValue (fullscope); } if (signature && vStringLength(signature)) { size_t i; /* sanitize signature by replacing all control characters with a * space (because it's simple). * there should never be any junk in a valid signature, but who * knows what the user wrote and CTags doesn't cope well with weird * characters. */ for (i = 0; i < signature->length; i++) { unsigned char c = (unsigned char) signature->buffer[i]; if (c < 0x20 /* below space */ || c == 0x7F /* DEL */) signature->buffer[i] = ' '; } e.extensionFields.signature = vStringValue(signature); } makeTagEntry (&e); vStringDelete (fullscope); } } static void makeClassTag (tokenInfo *const token, vString *const signature) { vString * fulltag; if ( ! token->ignoreTag ) { fulltag = vStringNew (); if (vStringLength (token->scope) > 0) { vStringCopy(fulltag, token->scope); vStringCatS (fulltag, "."); vStringCatS (fulltag, vStringValue(token->string)); } else { vStringCopy(fulltag, token->string); } vStringTerminate(fulltag); if ( ! stringListHas(ClassNames, vStringValue (fulltag)) ) { stringListAdd (ClassNames, vStringNewCopy (fulltag)); makeJsTag (token, JSTAG_CLASS, signature); } vStringDelete (fulltag); } } static void makeFunctionTag (tokenInfo *const token, vString *const signature) { vString * fulltag; if ( ! token->ignoreTag ) { fulltag = vStringNew (); if (vStringLength (token->scope) > 0) { vStringCopy(fulltag, token->scope); vStringCatS (fulltag, "."); vStringCatS (fulltag, vStringValue(token->string)); } else { vStringCopy(fulltag, token->string); } vStringTerminate(fulltag); if ( ! stringListHas(FunctionNames, vStringValue (fulltag)) ) { stringListAdd (FunctionNames, vStringNewCopy (fulltag)); makeJsTag (token, JSTAG_FUNCTION, signature); } vStringDelete (fulltag); } } /* * Parsing functions */ static void parseString (vString *const string, const int delimiter) { boolean end = FALSE; while (! end) { int c = getcFromInputFile (); if (c == EOF) end = TRUE; else if (c == '\\') { /* Eat the escape sequence (\", \', etc). We properly handle * <LineContinuation> by eating a whole \<CR><LF> not to see <LF> * as an unescaped character, which is invalid and handled below. * Also, handle the fact that <LineContinuation> produces an empty * sequence. * See ECMA-262 7.8.4 */ c = getcFromInputFile (); if (c != '\r' && c != '\n') vStringPut(string, c); else if (c == '\r') { c = getcFromInputFile(); if (c != '\n') ungetcToInputFile (c); } } else if (c == delimiter) end = TRUE; else if (c == '\r' || c == '\n') { /* those are invalid when not escaped */ end = TRUE; /* we don't want to eat the newline itself to let the automatic * semicolon insertion code kick in */ ungetcToInputFile (c); } else vStringPut (string, c); } vStringTerminate (string); } static void parseRegExp (void) { int c; boolean in_range = FALSE; do { c = getcFromInputFile (); if (! in_range && c == '/') { do /* skip flags */ { c = getcFromInputFile (); } while (isalpha (c)); ungetcToInputFile (c); break; } else if (c == '\\') c = getcFromInputFile (); /* skip next character */ else if (c == '[') in_range = TRUE; else if (c == ']') in_range = FALSE; } while (c != EOF); } /* Read a C identifier beginning with "firstChar" and places it into * "name". */ static void parseIdentifier (vString *const string, const int firstChar) { int c = firstChar; Assert (isIdentChar (c)); do { vStringPut (string, c); c = getcFromInputFile (); } while (isIdentChar (c)); vStringTerminate (string); ungetcToInputFile (c); /* unget non-identifier character */ } static void parseTemplateString (vString *const string) { int c; do { c = getcFromInputFile (); if (c == '`') break; vStringPut (string, c); if (c == '\\') { c = getcFromInputFile(); vStringPut(string, c); } else if (c == '$') { c = getcFromInputFile (); if (c != '{') ungetcToInputFile (c); else { int depth = 1; /* we need to use the real token machinery to handle strings, * comments, regexes and whatnot */ tokenInfo *token = newToken (); LastTokenType = TOKEN_UNDEFINED; vStringPut(string, c); do { readTokenFull (token, FALSE, string); if (isType (token, TOKEN_OPEN_CURLY)) depth++; else if (isType (token, TOKEN_CLOSE_CURLY)) depth--; } while (! isType (token, TOKEN_EOF) && depth > 0); deleteToken (token); } } } while (c != EOF); vStringTerminate (string); } static void readTokenFull (tokenInfo *const token, boolean include_newlines, vString *const repr) { int c; int i; boolean newline_encountered = FALSE; /* if we've got a token held back, emit it */ if (NextToken) { copyToken (token, NextToken, FALSE); deleteToken (NextToken); NextToken = NULL; return; } token->type = TOKEN_UNDEFINED; token->keyword = KEYWORD_NONE; vStringClear (token->string); getNextChar: i = 0; do { c = getcFromInputFile (); if (include_newlines && (c == '\r' || c == '\n')) newline_encountered = TRUE; i++; } while (c == '\t' || c == ' ' || c == '\r' || c == '\n'); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); if (repr) { if (i > 1) vStringPut (repr, ' '); vStringPut (repr, c); } switch (c) { case EOF: token->type = TOKEN_EOF; break; case '(': token->type = TOKEN_OPEN_PAREN; break; case ')': token->type = TOKEN_CLOSE_PAREN; break; case ';': token->type = TOKEN_SEMICOLON; break; case ',': token->type = TOKEN_COMMA; break; case '.': token->type = TOKEN_PERIOD; break; case ':': token->type = TOKEN_COLON; break; case '{': token->type = TOKEN_OPEN_CURLY; break; case '}': token->type = TOKEN_CLOSE_CURLY; break; case '=': token->type = TOKEN_EQUAL_SIGN; break; case '[': token->type = TOKEN_OPEN_SQUARE; break; case ']': token->type = TOKEN_CLOSE_SQUARE; break; case '+': case '-': { int d = getcFromInputFile (); if (d == c) /* ++ or -- */ token->type = TOKEN_POSTFIX_OPERATOR; else { ungetcToInputFile (d); token->type = TOKEN_BINARY_OPERATOR; } break; } case '*': case '%': case '?': case '>': case '<': case '^': case '|': case '&': token->type = TOKEN_BINARY_OPERATOR; break; case '\'': case '"': token->type = TOKEN_STRING; parseString (token->string, c); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); if (repr) { vStringCat (repr, token->string); vStringPut (repr, c); } break; case '`': token->type = TOKEN_TEMPLATE_STRING; parseTemplateString (token->string); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); if (repr) { vStringCat (repr, token->string); vStringPut (repr, c); } break; case '\\': c = getcFromInputFile (); if (c != '\\' && c != '"' && !isspace (c)) ungetcToInputFile (c); token->type = TOKEN_CHARACTER; token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); break; case '/': { int d = getcFromInputFile (); if ( (d != '*') && /* is this the start of a comment? */ (d != '/') ) /* is a one line comment? */ { ungetcToInputFile (d); switch (LastTokenType) { case TOKEN_CHARACTER: case TOKEN_IDENTIFIER: case TOKEN_STRING: case TOKEN_TEMPLATE_STRING: case TOKEN_CLOSE_CURLY: case TOKEN_CLOSE_PAREN: case TOKEN_CLOSE_SQUARE: token->type = TOKEN_BINARY_OPERATOR; break; default: token->type = TOKEN_REGEXP; parseRegExp (); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); break; } } else { if (repr) /* remove the / we added */ repr->buffer[--repr->length] = 0; if (d == '*') { do { skipToCharacterInInputFile ('*'); c = getcFromInputFile (); if (c == '/') break; else ungetcToInputFile (c); } while (c != EOF && c != '\0'); goto getNextChar; } else if (d == '/') /* is this the start of a comment? */ { skipToCharacterInInputFile ('\n'); /* if we care about newlines, put it back so it is seen */ if (include_newlines) ungetcToInputFile ('\n'); goto getNextChar; } } break; } case '#': /* skip shebang in case of e.g. Node.js scripts */ if (token->lineNumber > 1) token->type = TOKEN_UNDEFINED; else if ((c = getcFromInputFile ()) != '!') { ungetcToInputFile (c); token->type = TOKEN_UNDEFINED; } else { skipToCharacterInInputFile ('\n'); goto getNextChar; } break; default: if (! isIdentChar (c)) token->type = TOKEN_UNDEFINED; else { parseIdentifier (token->string, c); token->lineNumber = getInputLineNumber (); token->filePosition = getInputFilePosition (); token->keyword = analyzeToken (token->string, Lang_js); if (isKeyword (token, KEYWORD_NONE)) token->type = TOKEN_IDENTIFIER; else token->type = TOKEN_KEYWORD; if (repr && vStringLength (token->string) > 1) vStringCatS (repr, vStringValue (token->string) + 1); } break; } if (include_newlines && newline_encountered) { /* This isn't strictly correct per the standard, but following the * real rules means understanding all statements, and that's not * what the parser currently does. What we do here is a guess, by * avoiding inserting semicolons that would make the statement on * the left or right obviously invalid. Hopefully this should not * have false negatives (e.g. should not miss insertion of a semicolon) * but might have false positives (e.g. it will wrongfully emit a * semicolon sometimes, i.e. for the newline in "foo\n(bar)"). * This should however be mostly harmless as we only deal with * newlines in specific situations where we know a false positive * wouldn't hurt too bad. */ /* these already end a statement, so no need to duplicate it */ #define IS_STMT_SEPARATOR(t) ((t) == TOKEN_SEMICOLON || \ (t) == TOKEN_EOF || \ (t) == TOKEN_COMMA || \ (t) == TOKEN_CLOSE_CURLY || \ (t) == TOKEN_OPEN_CURLY) /* these cannot be the start or end of a statement */ #define IS_BINARY_OPERATOR(t) ((t) == TOKEN_EQUAL_SIGN || \ (t) == TOKEN_COLON || \ (t) == TOKEN_PERIOD || \ (t) == TOKEN_BINARY_OPERATOR) if (! IS_STMT_SEPARATOR(LastTokenType) && ! IS_STMT_SEPARATOR(token->type) && ! IS_BINARY_OPERATOR(LastTokenType) && ! IS_BINARY_OPERATOR(token->type) && /* these cannot be followed by a semicolon */ ! (LastTokenType == TOKEN_OPEN_PAREN || LastTokenType == TOKEN_OPEN_SQUARE)) { /* hold the token... */ Assert (NextToken == NULL); NextToken = newToken (); copyToken (NextToken, token, FALSE); /* ...and emit a semicolon instead */ token->type = TOKEN_SEMICOLON; token->keyword = KEYWORD_NONE; vStringClear (token->string); if (repr) vStringPut (token->string, '\n'); } #undef IS_STMT_SEPARATOR #undef IS_BINARY_OPERATOR } LastTokenType = token->type; } static void readToken (tokenInfo *const token) { readTokenFull (token, FALSE, NULL); } /* * Token parsing functions */ static void skipArgumentList (tokenInfo *const token, boolean include_newlines, vString *const repr) { int nest_level = 0; if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */ { nest_level++; if (repr) vStringPut (repr, '('); while (nest_level > 0 && ! isType (token, TOKEN_EOF)) { readTokenFull (token, FALSE, repr); if (isType (token, TOKEN_OPEN_PAREN)) nest_level++; else if (isType (token, TOKEN_CLOSE_PAREN)) nest_level--; } readTokenFull (token, include_newlines, NULL); } } static void skipArrayList (tokenInfo *const token, boolean include_newlines) { int nest_level = 0; /* * Handle square brackets * var name[1] * So we must check for nested open and closing square brackets */ if (isType (token, TOKEN_OPEN_SQUARE)) /* arguments? */ { nest_level++; while (nest_level > 0 && ! isType (token, TOKEN_EOF)) { readToken (token); if (isType (token, TOKEN_OPEN_SQUARE)) nest_level++; else if (isType (token, TOKEN_CLOSE_SQUARE)) nest_level--; } readTokenFull (token, include_newlines, NULL); } } static void addContext (tokenInfo* const parent, const tokenInfo* const child) { if (vStringLength (parent->string) > 0) { vStringCatS (parent->string, "."); } vStringCatS (parent->string, vStringValue(child->string)); vStringTerminate(parent->string); } static void addToScope (tokenInfo* const token, vString* const extra) { if (vStringLength (token->scope) > 0) { vStringCatS (token->scope, "."); } vStringCatS (token->scope, vStringValue(extra)); vStringTerminate(token->scope); } /* * Scanning functions */ static boolean findCmdTerm (tokenInfo *const token, boolean include_newlines, boolean include_commas) { /* * Read until we find either a semicolon or closing brace. * Any nested braces will be handled within. */ while (! isType (token, TOKEN_SEMICOLON) && ! isType (token, TOKEN_CLOSE_CURLY) && ! (include_commas && isType (token, TOKEN_COMMA)) && ! isType (token, TOKEN_EOF)) { /* Handle nested blocks */ if ( isType (token, TOKEN_OPEN_CURLY)) { parseBlock (token, token); readTokenFull (token, include_newlines, NULL); } else if ( isType (token, TOKEN_OPEN_PAREN) ) { skipArgumentList(token, include_newlines, NULL); } else if ( isType (token, TOKEN_OPEN_SQUARE) ) { skipArrayList(token, include_newlines); } else { readTokenFull (token, include_newlines, NULL); } } return isType (token, TOKEN_SEMICOLON); } static void parseSwitch (tokenInfo *const token) { /* * switch (expression) { * case value1: * statement; * break; * case value2: * statement; * break; * default : statement; * } */ readToken (token); if (isType (token, TOKEN_OPEN_PAREN)) { /* * Handle nameless functions, these will only * be considered methods. */ skipArgumentList(token, FALSE, NULL); } if (isType (token, TOKEN_OPEN_CURLY)) { parseBlock (token, token); } } static boolean parseLoop (tokenInfo *const token, tokenInfo *const parent) { /* * Handles these statements * for (x=0; x<3; x++) * document.write("This text is repeated three times<br>"); * * for (x=0; x<3; x++) * { * document.write("This text is repeated three times<br>"); * } * * while (number<5){ * document.write(number+"<br>"); * number++; * } * * do{ * document.write(number+"<br>"); * number++; * } * while (number<5); */ boolean is_terminated = TRUE; if (isKeyword (token, KEYWORD_for) || isKeyword (token, KEYWORD_while)) { readToken(token); if (isType (token, TOKEN_OPEN_PAREN)) { /* * Handle nameless functions, these will only * be considered methods. */ skipArgumentList(token, FALSE, NULL); } if (isType (token, TOKEN_OPEN_CURLY)) { /* * This will be either a function or a class. * We can only determine this by checking the body * of the function. If we find a "this." we know * it is a class, otherwise it is a function. */ parseBlock (token, parent); } else { is_terminated = parseLine(token, parent, FALSE); } } else if (isKeyword (token, KEYWORD_do)) { readToken(token); if (isType (token, TOKEN_OPEN_CURLY)) { /* * This will be either a function or a class. * We can only determine this by checking the body * of the function. If we find a "this." we know * it is a class, otherwise it is a function. */ parseBlock (token, parent); } else { is_terminated = parseLine(token, parent, FALSE); } if (is_terminated) readToken(token); if (isKeyword (token, KEYWORD_while)) { readToken(token); if (isType (token, TOKEN_OPEN_PAREN)) { /* * Handle nameless functions, these will only * be considered methods. */ skipArgumentList(token, TRUE, NULL); } if (! isType (token, TOKEN_SEMICOLON)) is_terminated = FALSE; } } return is_terminated; } static boolean parseIf (tokenInfo *const token, tokenInfo *const parent) { boolean read_next_token = TRUE; /* * If statements have two forms * if ( ... ) * one line; * * if ( ... ) * statement; * else * statement * * if ( ... ) { * multiple; * statements; * } * * * if ( ... ) { * return elem * } * * This example if correctly written, but the * else contains only 1 statement without a terminator * since the function finishes with the closing brace. * * function a(flag){ * if(flag) * test(1); * else * test(2) * } * * TODO: Deal with statements that can optional end * without a semi-colon. Currently this messes up * the parsing of blocks. * Need to somehow detect this has happened, and either * backup a token, or skip reading the next token if * that is possible from all code locations. * */ readToken (token); if (isKeyword (token, KEYWORD_if)) { /* * Check for an "else if" and consume the "if" */ readToken (token); } if (isType (token, TOKEN_OPEN_PAREN)) { /* * Handle nameless functions, these will only * be considered methods. */ skipArgumentList(token, FALSE, NULL); } if (isType (token, TOKEN_OPEN_CURLY)) { /* * This will be either a function or a class. * We can only determine this by checking the body * of the function. If we find a "this." we know * it is a class, otherwise it is a function. */ parseBlock (token, parent); } else { /* The next token should only be read if this statement had its own * terminator */ read_next_token = findCmdTerm (token, TRUE, FALSE); } return read_next_token; } static void parseFunction (tokenInfo *const token) { tokenInfo *const name = newToken (); vString *const signature = vStringNew (); boolean is_class = FALSE; /* * This deals with these formats * function validFunctionTwo(a,b) {} */ readToken (name); if (!isType (name, TOKEN_IDENTIFIER)) goto cleanUp; /* Add scope in case this is an INNER function */ addToScope(name, token->scope); readToken (token); while (isType (token, TOKEN_PERIOD)) { readToken (token); if ( isKeyword(token, KEYWORD_NONE) ) { addContext (name, token); readToken (token); } } if ( isType (token, TOKEN_OPEN_PAREN) ) skipArgumentList(token, FALSE, signature); if ( isType (token, TOKEN_OPEN_CURLY) ) { is_class = parseBlock (token, name); if ( is_class ) makeClassTag (name, signature); else makeFunctionTag (name, signature); } findCmdTerm (token, FALSE, FALSE); cleanUp: vStringDelete (signature); deleteToken (name); } static boolean parseBlock (tokenInfo *const token, tokenInfo *const orig_parent) { boolean is_class = FALSE; boolean read_next_token = TRUE; vString * saveScope = vStringNew (); tokenInfo *const parent = newToken (); /* backup the parent token to allow calls like parseBlock(token, token) */ copyToken (parent, orig_parent, TRUE); token->nestLevel++; /* * Make this routine a bit more forgiving. * If called on an open_curly advance it */ if ( isType (token, TOKEN_OPEN_CURLY) && isKeyword(token, KEYWORD_NONE) ) readToken(token); if (! isType (token, TOKEN_CLOSE_CURLY)) { /* * Read until we find the closing brace, * any nested braces will be handled within */ do { read_next_token = TRUE; if (isKeyword (token, KEYWORD_this)) { /* * Means we are inside a class and have found * a class, not a function */ is_class = TRUE; vStringCopy(saveScope, token->scope); addToScope (token, parent->string); /* * Ignore the remainder of the line * findCmdTerm(token); */ read_next_token = parseLine (token, parent, is_class); vStringCopy(token->scope, saveScope); } else if (isKeyword (token, KEYWORD_var) || isKeyword (token, KEYWORD_let) || isKeyword (token, KEYWORD_const)) { /* * Potentially we have found an inner function. * Set something to indicate the scope */ vStringCopy(saveScope, token->scope); addToScope (token, parent->string); read_next_token = parseLine (token, parent, is_class); vStringCopy(token->scope, saveScope); } else if (isKeyword (token, KEYWORD_function)) { vStringCopy(saveScope, token->scope); addToScope (token, parent->string); parseFunction (token); vStringCopy(token->scope, saveScope); } else if (isType (token, TOKEN_OPEN_CURLY)) { /* Handle nested blocks */ parseBlock (token, parent); } else { /* * It is possible for a line to have no terminator * if the following line is a closing brace. * parseLine will detect this case and indicate * whether we should read an additional token. */ read_next_token = parseLine (token, parent, is_class); } /* * Always read a new token unless we find a statement without * a ending terminator */ if( read_next_token ) readToken(token); /* * If we find a statement without a terminator consider the * block finished, otherwise the stack will be off by one. */ } while (! isType (token, TOKEN_EOF) && ! isType (token, TOKEN_CLOSE_CURLY) && read_next_token); } deleteToken (parent); vStringDelete(saveScope); token->nestLevel--; return is_class; } static boolean parseMethods (tokenInfo *const token, tokenInfo *const class) { tokenInfo *const name = newToken (); boolean has_methods = FALSE; /* * This deals with these formats * validProperty : 2, * validMethod : function(a,b) {} * 'validMethod2' : function(a,b) {} * container.dirtyTab = {'url': false, 'title':false, 'snapshot':false, '*': false} */ do { readToken (token); if (isType (token, TOKEN_CLOSE_CURLY)) { goto cleanUp; } if (isType (token, TOKEN_STRING) || isKeyword(token, KEYWORD_NONE)) { copyToken(name, token, TRUE); readToken (token); if ( isType (token, TOKEN_COLON) ) { readToken (token); if ( isKeyword (token, KEYWORD_function) ) { vString *const signature = vStringNew (); readToken (token); if ( isType (token, TOKEN_OPEN_PAREN) ) { skipArgumentList(token, FALSE, signature); } if (isType (token, TOKEN_OPEN_CURLY)) { has_methods = TRUE; addToScope (name, class->string); makeJsTag (name, JSTAG_METHOD, signature); parseBlock (token, name); /* * Read to the closing curly, check next * token, if a comma, we must loop again */ readToken (token); } vStringDelete (signature); } else { vString * saveScope = vStringNew (); boolean has_child_methods = FALSE; /* skip whatever is the value */ while (! isType (token, TOKEN_COMMA) && ! isType (token, TOKEN_CLOSE_CURLY) && ! isType (token, TOKEN_EOF)) { if (isType (token, TOKEN_OPEN_CURLY)) { /* Recurse to find child properties/methods */ vStringCopy (saveScope, token->scope); addToScope (token, class->string); has_child_methods = parseMethods (token, name); vStringCopy (token->scope, saveScope); readToken (token); } else if (isType (token, TOKEN_OPEN_PAREN)) { skipArgumentList (token, FALSE, NULL); } else if (isType (token, TOKEN_OPEN_SQUARE)) { skipArrayList (token, FALSE); } else { readToken (token); } } vStringDelete (saveScope); has_methods = TRUE; addToScope (name, class->string); if (has_child_methods) makeJsTag (name, JSTAG_CLASS, NULL); else makeJsTag (name, JSTAG_PROPERTY, NULL); } } } } while ( isType(token, TOKEN_COMMA) ); findCmdTerm (token, FALSE, FALSE); cleanUp: deleteToken (name); return has_methods; } static boolean parseStatement (tokenInfo *const token, tokenInfo *const parent, boolean is_inside_class) { tokenInfo *const name = newToken (); tokenInfo *const secondary_name = newToken (); tokenInfo *const method_body_token = newToken (); vString * saveScope = vStringNew (); boolean is_class = FALSE; boolean is_var = FALSE; boolean is_const = FALSE; boolean is_terminated = TRUE; boolean is_global = FALSE; boolean has_methods = FALSE; vString * fulltag; vStringClear(saveScope); /* * Functions can be named or unnamed. * This deals with these formats: * Function * validFunctionOne = function(a,b) {} * testlib.validFunctionFive = function(a,b) {} * var innerThree = function(a,b) {} * var innerFour = (a,b) {} * var D2 = secondary_fcn_name(a,b) {} * var D3 = new Function("a", "b", "return a+b;"); * Class * testlib.extras.ValidClassOne = function(a,b) { * this.a = a; * } * Class Methods * testlib.extras.ValidClassOne.prototype = { * 'validMethodOne' : function(a,b) {}, * 'validMethodTwo' : function(a,b) {} * } * ValidClassTwo = function () * { * this.validMethodThree = function() {} * // unnamed method * this.validMethodFour = () {} * } * Database.prototype.validMethodThree = Database_getTodaysDate; */ if ( is_inside_class ) is_class = TRUE; /* * var can precede an inner function */ if ( isKeyword(token, KEYWORD_var) || isKeyword(token, KEYWORD_let) || isKeyword(token, KEYWORD_const) ) { is_const = isKeyword(token, KEYWORD_const); /* * Only create variables for global scope */ if ( token->nestLevel == 0 ) { is_global = TRUE; } readToken(token); } nextVar: if ( isKeyword(token, KEYWORD_this) ) { readToken(token); if (isType (token, TOKEN_PERIOD)) { readToken(token); } } copyToken(name, token, TRUE); while (! isType (token, TOKEN_CLOSE_CURLY) && ! isType (token, TOKEN_SEMICOLON) && ! isType (token, TOKEN_EQUAL_SIGN) && ! isType (token, TOKEN_COMMA) && ! isType (token, TOKEN_EOF)) { if (isType (token, TOKEN_OPEN_CURLY)) parseBlock (token, parent); /* Potentially the name of the function */ readToken (token); if (isType (token, TOKEN_PERIOD)) { /* * Cannot be a global variable is it has dot references in the name */ is_global = FALSE; do { readToken (token); if ( isKeyword(token, KEYWORD_NONE) ) { if ( is_class ) { addToScope(token, name->string); } else addContext (name, token); readToken (token); } else if ( isKeyword(token, KEYWORD_prototype) ) { /* * When we reach the "prototype" tag, we infer: * "BindAgent" is a class * "build" is a method * * function BindAgent( repeatableIdName, newParentIdName ) { * } * * CASE 1 * Specified function name: "build" * BindAgent.prototype.build = function( mode ) { * maybe parse nested functions * } * * CASE 2 * Prototype listing * ValidClassOne.prototype = { * 'validMethodOne' : function(a,b) {}, * 'validMethodTwo' : function(a,b) {} * } * */ if (! ( isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING) ) ) /* * Unexpected input. Try to reset the parsing. * * TOKEN_STRING is acceptable. e.g.: * ----------------------------------- * "a".prototype = function( mode ) {} */ goto cleanUp; makeClassTag (name, NULL); is_class = TRUE; /* * There should a ".function_name" next. */ readToken (token); if (isType (token, TOKEN_PERIOD)) { /* * Handle CASE 1 */ readToken (token); if ( isKeyword(token, KEYWORD_NONE) ) { vString *const signature = vStringNew (); vStringCopy(saveScope, token->scope); addToScope(token, name->string); readToken (method_body_token); vStringCopy (method_body_token->scope, token->scope); while (! isType (method_body_token, TOKEN_SEMICOLON) && ! isType (method_body_token, TOKEN_CLOSE_CURLY) && ! isType (method_body_token, TOKEN_OPEN_CURLY) && ! isType (method_body_token, TOKEN_EOF)) { if ( isType (method_body_token, TOKEN_OPEN_PAREN) ) skipArgumentList(method_body_token, FALSE, vStringLength (signature) == 0 ? signature : NULL); else readToken (method_body_token); } makeJsTag (token, JSTAG_METHOD, signature); vStringDelete (signature); if ( isType (method_body_token, TOKEN_OPEN_CURLY)) { parseBlock (method_body_token, token); is_terminated = TRUE; } else is_terminated = isType (method_body_token, TOKEN_SEMICOLON); goto cleanUp; } } else if (isType (token, TOKEN_EQUAL_SIGN)) { readToken (token); if (isType (token, TOKEN_OPEN_CURLY)) { /* * Handle CASE 2 * * Creates tags for each of these class methods * ValidClassOne.prototype = { * 'validMethodOne' : function(a,b) {}, * 'validMethodTwo' : function(a,b) {} * } */ parseMethods(token, name); /* * Find to the end of the statement */ findCmdTerm (token, FALSE, FALSE); token->ignoreTag = FALSE; is_terminated = TRUE; goto cleanUp; } } } else readToken (token); } while (isType (token, TOKEN_PERIOD)); } if ( isType (token, TOKEN_OPEN_PAREN) ) skipArgumentList(token, FALSE, NULL); if ( isType (token, TOKEN_OPEN_SQUARE) ) skipArrayList(token, FALSE); /* if ( isType (token, TOKEN_OPEN_CURLY) ) { is_class = parseBlock (token, name); } */ } if ( isType (token, TOKEN_CLOSE_CURLY) ) { /* * Reaching this section without having * processed an open curly brace indicates * the statement is most likely not terminated. */ is_terminated = FALSE; goto cleanUp; } if ( isType (token, TOKEN_SEMICOLON) || isType (token, TOKEN_EOF) || isType (token, TOKEN_COMMA) ) { /* * Only create variables for global scope */ if ( token->nestLevel == 0 && is_global ) { /* * Handles this syntax: * var g_var2; */ makeJsTag (name, is_const ? JSTAG_CONSTANT : JSTAG_VARIABLE, NULL); } /* * Statement has ended. * This deals with calls to functions, like: * alert(..); */ if (isType (token, TOKEN_COMMA)) { readToken (token); goto nextVar; } goto cleanUp; } if ( isType (token, TOKEN_EQUAL_SIGN) ) { int parenDepth = 0; readToken (token); /* rvalue might be surrounded with parentheses */ while (isType (token, TOKEN_OPEN_PAREN)) { parenDepth++; readToken (token); } if ( isKeyword (token, KEYWORD_function) ) { vString *const signature = vStringNew (); readToken (token); if ( isKeyword (token, KEYWORD_NONE) && ! isType (token, TOKEN_OPEN_PAREN) ) { /* * Functions of this format: * var D2A = function theAdd(a, b) * { * return a+b; * } * Are really two separate defined functions and * can be referenced in two ways: * alert( D2A(1,2) ); // produces 3 * alert( theAdd(1,2) ); // also produces 3 * So it must have two tags: * D2A * theAdd * Save the reference to the name for later use, once * we have established this is a valid function we will * create the secondary reference to it. */ copyToken(secondary_name, token, TRUE); readToken (token); } if ( isType (token, TOKEN_OPEN_PAREN) ) skipArgumentList(token, FALSE, signature); if (isType (token, TOKEN_OPEN_CURLY)) { /* * This will be either a function or a class. * We can only determine this by checking the body * of the function. If we find a "this." we know * it is a class, otherwise it is a function. */ if ( is_inside_class ) { makeJsTag (name, JSTAG_METHOD, signature); if ( vStringLength(secondary_name->string) > 0 ) makeFunctionTag (secondary_name, signature); parseBlock (token, name); } else { if (! ( isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING) ) ) { /* Unexpected input. Try to reset the parsing. */ vStringDelete (signature); goto cleanUp; } is_class = parseBlock (token, name); if ( is_class ) makeClassTag (name, signature); else makeFunctionTag (name, signature); if ( vStringLength(secondary_name->string) > 0 ) makeFunctionTag (secondary_name, signature); } } vStringDelete (signature); } else if (isType (token, TOKEN_OPEN_CURLY)) { /* * Creates tags for each of these class methods * ValidClassOne.prototype = { * 'validMethodOne' : function(a,b) {}, * 'validMethodTwo' : function(a,b) {} * } * Or checks if this is a hash variable. * var z = {}; */ has_methods = parseMethods(token, name); if (has_methods) makeJsTag (name, JSTAG_CLASS, NULL); else { /* * Only create variables for global scope */ if ( token->nestLevel == 0 && is_global ) { /* * A pointer can be created to the function. * If we recognize the function/class name ignore the variable. * This format looks identical to a variable definition. * A variable defined outside of a block is considered * a global variable: * var g_var1 = 1; * var g_var2; * This is not a global variable: * var g_var = function; * This is a global variable: * var g_var = different_var_name; */ fulltag = vStringNew (); if (vStringLength (token->scope) > 0) { vStringCopy(fulltag, token->scope); vStringCatS (fulltag, "."); vStringCatS (fulltag, vStringValue(token->string)); } else { vStringCopy(fulltag, token->string); } vStringTerminate(fulltag); if ( ! stringListHas(FunctionNames, vStringValue (fulltag)) && ! stringListHas(ClassNames, vStringValue (fulltag)) ) { makeJsTag (name, is_const ? JSTAG_CONSTANT : JSTAG_VARIABLE, NULL); } vStringDelete (fulltag); } } if (isType (token, TOKEN_CLOSE_CURLY)) { /* * Assume the closing parentheses terminates * this statements. */ is_terminated = TRUE; } } else if (isKeyword (token, KEYWORD_new)) { readToken (token); is_var = isType (token, TOKEN_IDENTIFIER); if ( isKeyword (token, KEYWORD_function) || isKeyword (token, KEYWORD_capital_function) || isKeyword (token, KEYWORD_capital_object) || is_var ) { if ( isKeyword (token, KEYWORD_capital_object) ) is_class = TRUE; readToken (token); if ( isType (token, TOKEN_OPEN_PAREN) ) skipArgumentList(token, TRUE, NULL); if (isType (token, TOKEN_SEMICOLON)) { if ( token->nestLevel == 0 ) { if ( is_var ) { makeJsTag (name, is_const ? JSTAG_CONSTANT : JSTAG_VARIABLE, NULL); } else { if ( is_class ) { makeClassTag (name, NULL); } else { /* FIXME: we cannot really get a meaningful * signature from a `new Function()` call, * so for now just don't set any */ makeFunctionTag (name, NULL); } } } } else if (isType (token, TOKEN_CLOSE_CURLY)) is_terminated = FALSE; } } else if (isKeyword (token, KEYWORD_NONE)) { /* * Only create variables for global scope */ if ( token->nestLevel == 0 && is_global ) { /* * A pointer can be created to the function. * If we recognize the function/class name ignore the variable. * This format looks identical to a variable definition. * A variable defined outside of a block is considered * a global variable: * var g_var1 = 1; * var g_var2; * This is not a global variable: * var g_var = function; * This is a global variable: * var g_var = different_var_name; */ fulltag = vStringNew (); if (vStringLength (token->scope) > 0) { vStringCopy(fulltag, token->scope); vStringCatS (fulltag, "."); vStringCatS (fulltag, vStringValue(token->string)); } else { vStringCopy(fulltag, token->string); } vStringTerminate(fulltag); if ( ! stringListHas(FunctionNames, vStringValue (fulltag)) && ! stringListHas(ClassNames, vStringValue (fulltag)) ) { makeJsTag (name, is_const ? JSTAG_CONSTANT : JSTAG_VARIABLE, NULL); } vStringDelete (fulltag); } } if (parenDepth > 0) { while (parenDepth > 0 && ! isType (token, TOKEN_EOF)) { if (isType (token, TOKEN_OPEN_PAREN)) parenDepth++; else if (isType (token, TOKEN_CLOSE_PAREN)) parenDepth--; readTokenFull (token, TRUE, NULL); } if (isType (token, TOKEN_CLOSE_CURLY)) is_terminated = FALSE; } } /* if we aren't already at the cmd end, advance to it and check whether * the statement was terminated */ if (! isType (token, TOKEN_CLOSE_CURLY) && ! isType (token, TOKEN_SEMICOLON)) { /* * Statements can be optionally terminated in the case of * statement prior to a close curly brace as in the * document.write line below: * * function checkForUpdate() { * if( 1==1 ) { * document.write("hello from checkForUpdate<br>") * } * return 1; * } */ is_terminated = findCmdTerm (token, TRUE, TRUE); /* if we're at a comma, try and read a second var */ if (isType (token, TOKEN_COMMA)) { readToken (token); goto nextVar; } } cleanUp: vStringCopy(token->scope, saveScope); deleteToken (name); deleteToken (secondary_name); deleteToken (method_body_token); vStringDelete(saveScope); return is_terminated; } static void parseUI5 (tokenInfo *const token) { tokenInfo *const name = newToken (); /* * SAPUI5 is built on top of jQuery. * It follows a standard format: * sap.ui.controller("id.of.controller", { * method_name : function... { * }, * * method_name : function ... { * } * } * * Handle the parsing of the initial controller (and the * same for "view") and then allow the methods to be * parsed as usual. */ readToken (token); if (isType (token, TOKEN_PERIOD)) { readToken (token); while (! isType (token, TOKEN_OPEN_PAREN) && ! isType (token, TOKEN_EOF)) { readToken (token); } readToken (token); if (isType (token, TOKEN_STRING)) { copyToken(name, token, TRUE); readToken (token); } if (isType (token, TOKEN_COMMA)) readToken (token); do { parseMethods (token, name); } while (! isType (token, TOKEN_CLOSE_CURLY) && ! isType (token, TOKEN_EOF)); } deleteToken (name); } static boolean parseLine (tokenInfo *const token, tokenInfo *const parent, boolean is_inside_class) { boolean is_terminated = TRUE; /* * Detect the common statements, if, while, for, do, ... * This is necessary since the last statement within a block "{}" * can be optionally terminated. * * If the statement is not terminated, we need to tell * the calling routine to prevent reading an additional token * looking for the end of the statement. */ if (isType(token, TOKEN_KEYWORD)) { switch (token->keyword) { case KEYWORD_for: case KEYWORD_while: case KEYWORD_do: is_terminated = parseLoop (token, parent); break; case KEYWORD_if: case KEYWORD_else: case KEYWORD_try: case KEYWORD_catch: case KEYWORD_finally: /* Common semantics */ is_terminated = parseIf (token, parent); break; case KEYWORD_switch: parseSwitch (token); break; case KEYWORD_return: is_terminated = findCmdTerm (token, TRUE, FALSE); break; default: is_terminated = parseStatement (token, parent, is_inside_class); break; } } else { /* * Special case where single line statements may not be * SEMICOLON terminated. parseBlock needs to know this * so that it does not read the next token. */ is_terminated = parseStatement (token, parent, is_inside_class); } return is_terminated; } static void parseJsFile (tokenInfo *const token) { do { readToken (token); if (isType (token, TOKEN_KEYWORD) && token->keyword == KEYWORD_function) parseFunction (token); else if (isType (token, TOKEN_KEYWORD) && token->keyword == KEYWORD_sap) parseUI5 (token); else parseLine (token, token, FALSE); } while (! isType (token, TOKEN_EOF)); } static void initialize (const langType language) { Assert (ARRAY_SIZE (JsKinds) == JSTAG_COUNT); Lang_js = language; } static void findJsTags (void) { tokenInfo *const token = newToken (); NextToken = NULL; ClassNames = stringListNew (); FunctionNames = stringListNew (); LastTokenType = TOKEN_UNDEFINED; parseJsFile (token); stringListDelete (ClassNames); stringListDelete (FunctionNames); ClassNames = NULL; FunctionNames = NULL; deleteToken (token); Assert (NextToken == NULL); } /* Create parser definition structure */ extern parserDefinition* JavaScriptParser (void) { static const char *const extensions [] = { "js", NULL }; static const char *const aliases [] = { "js", "node", "nodejs", "seed", "gjs", NULL }; parserDefinition *const def = parserNew ("JavaScript"); def->extensions = extensions; def->aliases = aliases; /* * New definitions for parsing instead of regex */ def->kinds = JsKinds; def->kindCount = ARRAY_SIZE (JsKinds); def->parser = findJsTags; def->initialize = initialize; def->keywordTable = JsKeywordTable; def->keywordCount = ARRAY_SIZE (JsKeywordTable); return def; } /* vi:set tabstop=4 shiftwidth=4 noexpandtab: */
1
13,945
not handling in case of `repr` makes the behavior probably a little too unpredictable. Is there a reason not to, apart missing stuff in the `repr`? Does it lead to some problem?
universal-ctags-ctags
c
@@ -93,12 +93,13 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http BadHttpRequestException.Throw(RequestRejectionReason.UnexpectedEndOfRequestContent); } - awaitable = _context.Input.ReadAsync(); } finally { _context.Input.AdvanceTo(consumed, examined); } + + awaitable = _context.Input.ReadAsync(); } } catch (Exception ex)
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Buffers; using System.Collections; using System.IO; using System.IO.Pipelines; using System.Threading.Tasks; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Protocols.Abstractions; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http { public abstract class Http1MessageBody : MessageBody { private readonly Http1Connection _context; private volatile bool _canceled; private Task _pumpTask; protected Http1MessageBody(Http1Connection context) : base(context) { _context = context; } private async Task PumpAsync() { Exception error = null; try { var awaitable = _context.Input.ReadAsync(); if (!awaitable.IsCompleted) { TryProduceContinue(); } TryStartTimingReads(); while (true) { var result = await awaitable; if (_context.RequestTimedOut) { BadHttpRequestException.Throw(RequestRejectionReason.RequestBodyTimeout); } var readableBuffer = result.Buffer; var consumed = readableBuffer.Start; var examined = readableBuffer.End; try { if (_canceled) { break; } if (!readableBuffer.IsEmpty) { bool done; done = Read(readableBuffer, _context.RequestBodyPipe.Writer, out consumed, out examined); var writeAwaitable = _context.RequestBodyPipe.Writer.FlushAsync(); var backpressure = false; if (!writeAwaitable.IsCompleted) { // Backpressure, stop controlling incoming data rate until data is read. backpressure = true; TryPauseTimingReads(); } await writeAwaitable; if (backpressure) { TryResumeTimingReads(); } if (done) { break; } } else if (result.IsCompleted) { BadHttpRequestException.Throw(RequestRejectionReason.UnexpectedEndOfRequestContent); } awaitable = _context.Input.ReadAsync(); } finally { _context.Input.AdvanceTo(consumed, examined); } } } catch (Exception ex) { error = ex; } finally { _context.RequestBodyPipe.Writer.Complete(error); TryStopTimingReads(); } } public override Task StopAsync() { if (!_context.HasStartedConsumingRequestBody) { return Task.CompletedTask; } _canceled = true; _context.Input.CancelPendingRead(); return _pumpTask; } protected override async Task OnConsumeAsync() { _context.TimeoutControl.SetTimeout(Constants.RequestBodyDrainTimeout.Ticks, TimeoutAction.SendTimeoutResponse); try { ReadResult result; do { result = await _context.RequestBodyPipe.Reader.ReadAsync(); _context.RequestBodyPipe.Reader.AdvanceTo(result.Buffer.End); } while (!result.IsCompleted); } finally { _context.TimeoutControl.CancelTimeout(); } } protected void Copy(ReadOnlySequence<byte> readableBuffer, PipeWriter writableBuffer) { _context.TimeoutControl.BytesRead(readableBuffer.Length); if (readableBuffer.IsSingleSegment) { writableBuffer.Write(readableBuffer.First.Span); } else { foreach (var memory in readableBuffer) { writableBuffer.Write(memory.Span); } } } protected override void OnReadStarted() { _pumpTask = PumpAsync(); } protected virtual bool Read(ReadOnlySequence<byte> readableBuffer, PipeWriter writableBuffer, out SequencePosition consumed, out SequencePosition examined) { throw new NotImplementedException(); } private void TryStartTimingReads() { if (!RequestUpgrade) { Log.RequestBodyStart(_context.ConnectionIdFeature, _context.TraceIdentifier); _context.TimeoutControl.StartTimingReads(); } } private void TryPauseTimingReads() { if (!RequestUpgrade) { _context.TimeoutControl.PauseTimingReads(); } } private void TryResumeTimingReads() { if (!RequestUpgrade) { _context.TimeoutControl.ResumeTimingReads(); } } private void TryStopTimingReads() { if (!RequestUpgrade) { Log.RequestBodyDone(_context.ConnectionIdFeature, _context.TraceIdentifier); _context.TimeoutControl.StopTimingReads(); } } public static MessageBody For( HttpVersion httpVersion, HttpRequestHeaders headers, Http1Connection context) { // see also http://tools.ietf.org/html/rfc2616#section-4.4 var keepAlive = httpVersion != HttpVersion.Http10; var connection = headers.HeaderConnection; var upgrade = false; if (connection.Count > 0) { var connectionOptions = HttpHeaders.ParseConnection(connection); upgrade = (connectionOptions & ConnectionOptions.Upgrade) == ConnectionOptions.Upgrade; keepAlive = (connectionOptions & ConnectionOptions.KeepAlive) == ConnectionOptions.KeepAlive; } if (upgrade) { if (headers.HeaderTransferEncoding.Count > 0 || (headers.ContentLength.HasValue && headers.ContentLength.Value != 0)) { BadHttpRequestException.Throw(RequestRejectionReason.UpgradeRequestCannotHavePayload); } return new ForUpgrade(context); } var transferEncoding = headers.HeaderTransferEncoding; if (transferEncoding.Count > 0) { var transferCoding = HttpHeaders.GetFinalTransferCoding(headers.HeaderTransferEncoding); // https://tools.ietf.org/html/rfc7230#section-3.3.3 // If a Transfer-Encoding header field // is present in a request and the chunked transfer coding is not // the final encoding, the message body length cannot be determined // reliably; the server MUST respond with the 400 (Bad Request) // status code and then close the connection. if (transferCoding != TransferCoding.Chunked) { BadHttpRequestException.Throw(RequestRejectionReason.FinalTransferCodingNotChunked, in transferEncoding); } return new ForChunkedEncoding(keepAlive, context); } if (headers.ContentLength.HasValue) { var contentLength = headers.ContentLength.Value; if (contentLength == 0) { return keepAlive ? MessageBody.ZeroContentLengthKeepAlive : MessageBody.ZeroContentLengthClose; } return new ForContentLength(keepAlive, contentLength, context); } // Avoid slowing down most common case if (!object.ReferenceEquals(context.Method, HttpMethods.Get)) { // If we got here, request contains no Content-Length or Transfer-Encoding header. // Reject with 411 Length Required. if (context.Method == HttpMethod.Post || context.Method == HttpMethod.Put) { var requestRejectionReason = httpVersion == HttpVersion.Http11 ? RequestRejectionReason.LengthRequired : RequestRejectionReason.LengthRequiredHttp10; BadHttpRequestException.Throw(requestRejectionReason, context.Method); } } return keepAlive ? MessageBody.ZeroContentLengthKeepAlive : MessageBody.ZeroContentLengthClose; } private class ForUpgrade : Http1MessageBody { public ForUpgrade(Http1Connection context) : base(context) { RequestUpgrade = true; } public override bool IsEmpty => true; protected override bool Read(ReadOnlySequence<byte> readableBuffer, PipeWriter writableBuffer, out SequencePosition consumed, out SequencePosition examined) { Copy(readableBuffer, writableBuffer); consumed = readableBuffer.End; examined = readableBuffer.End; return false; } } private class ForContentLength : Http1MessageBody { private readonly long _contentLength; private long _inputLength; public ForContentLength(bool keepAlive, long contentLength, Http1Connection context) : base(context) { RequestKeepAlive = keepAlive; _contentLength = contentLength; _inputLength = _contentLength; } protected override bool Read(ReadOnlySequence<byte> readableBuffer, PipeWriter writableBuffer, out SequencePosition consumed, out SequencePosition examined) { if (_inputLength == 0) { throw new InvalidOperationException("Attempted to read from completed Content-Length request body."); } var actual = (int)Math.Min(readableBuffer.Length, _inputLength); _inputLength -= actual; consumed = readableBuffer.GetPosition(readableBuffer.Start, actual); examined = consumed; Copy(readableBuffer.Slice(0, actual), writableBuffer); return _inputLength == 0; } protected override void OnReadStarting() { if (_contentLength > _context.MaxRequestBodySize) { BadHttpRequestException.Throw(RequestRejectionReason.RequestBodyTooLarge); } } } /// <summary> /// http://tools.ietf.org/html/rfc2616#section-3.6.1 /// </summary> private class ForChunkedEncoding : Http1MessageBody { // byte consts don't have a data type annotation so we pre-cast it private const byte ByteCR = (byte)'\r'; // "7FFFFFFF\r\n" is the largest chunk size that could be returned as an int. private const int MaxChunkPrefixBytes = 10; private long _inputLength; private long _consumedBytes; private Mode _mode = Mode.Prefix; public ForChunkedEncoding(bool keepAlive, Http1Connection context) : base(context) { RequestKeepAlive = keepAlive; } protected override bool Read(ReadOnlySequence<byte> readableBuffer, PipeWriter writableBuffer, out SequencePosition consumed, out SequencePosition examined) { consumed = default(SequencePosition); examined = default(SequencePosition); while (_mode < Mode.Trailer) { if (_mode == Mode.Prefix) { ParseChunkedPrefix(readableBuffer, out consumed, out examined); if (_mode == Mode.Prefix) { return false; } readableBuffer = readableBuffer.Slice(consumed); } if (_mode == Mode.Extension) { ParseExtension(readableBuffer, out consumed, out examined); if (_mode == Mode.Extension) { return false; } readableBuffer = readableBuffer.Slice(consumed); } if (_mode == Mode.Data) { ReadChunkedData(readableBuffer, writableBuffer, out consumed, out examined); if (_mode == Mode.Data) { return false; } readableBuffer = readableBuffer.Slice(consumed); } if (_mode == Mode.Suffix) { ParseChunkedSuffix(readableBuffer, out consumed, out examined); if (_mode == Mode.Suffix) { return false; } readableBuffer = readableBuffer.Slice(consumed); } } // Chunks finished, parse trailers if (_mode == Mode.Trailer) { ParseChunkedTrailer(readableBuffer, out consumed, out examined); if (_mode == Mode.Trailer) { return false; } readableBuffer = readableBuffer.Slice(consumed); } // _consumedBytes aren't tracked for trailer headers, since headers have seperate limits. if (_mode == Mode.TrailerHeaders) { if (_context.TakeMessageHeaders(readableBuffer, out consumed, out examined)) { _mode = Mode.Complete; } } return _mode == Mode.Complete; } private void AddAndCheckConsumedBytes(long consumedBytes) { _consumedBytes += consumedBytes; if (_consumedBytes > _context.MaxRequestBodySize) { BadHttpRequestException.Throw(RequestRejectionReason.RequestBodyTooLarge); } } private void ParseChunkedPrefix(ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined) { consumed = buffer.Start; examined = buffer.Start; var reader = new BufferReader(buffer); var ch1 = reader.Read(); var ch2 = reader.Read(); if (ch1 == -1 || ch2 == -1) { examined = reader.Position; return; } var chunkSize = CalculateChunkSize(ch1, 0); ch1 = ch2; while (reader.ConsumedBytes < MaxChunkPrefixBytes) { if (ch1 == ';') { consumed = reader.Position; examined = reader.Position; AddAndCheckConsumedBytes(reader.ConsumedBytes); _inputLength = chunkSize; _mode = Mode.Extension; return; } ch2 = reader.Read(); if (ch2 == -1) { examined = reader.Position; return; } if (ch1 == '\r' && ch2 == '\n') { consumed = reader.Position; examined = reader.Position; AddAndCheckConsumedBytes(reader.ConsumedBytes); _inputLength = chunkSize; _mode = chunkSize > 0 ? Mode.Data : Mode.Trailer; return; } chunkSize = CalculateChunkSize(ch1, chunkSize); ch1 = ch2; } // At this point, 10 bytes have been consumed which is enough to parse the max value "7FFFFFFF\r\n". BadHttpRequestException.Throw(RequestRejectionReason.BadChunkSizeData); } private void ParseExtension(ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined) { // Chunk-extensions not currently parsed // Just drain the data consumed = buffer.Start; examined = buffer.Start; do { SequencePosition? extensionCursorPosition = buffer.PositionOf(ByteCR); if (extensionCursorPosition == null) { // End marker not found yet consumed = buffer.End; examined = buffer.End; AddAndCheckConsumedBytes(buffer.Length); return; }; var extensionCursor = extensionCursorPosition.Value; var charsToByteCRExclusive = buffer.Slice(0, extensionCursor).Length; var sufixBuffer = buffer.Slice(extensionCursor); if (sufixBuffer.Length < 2) { consumed = extensionCursor; examined = buffer.End; AddAndCheckConsumedBytes(charsToByteCRExclusive); return; } sufixBuffer = sufixBuffer.Slice(0, 2); var sufixSpan = sufixBuffer.ToSpan(); if (sufixSpan[1] == '\n') { // We consumed the \r\n at the end of the extension, so switch modes. _mode = _inputLength > 0 ? Mode.Data : Mode.Trailer; consumed = sufixBuffer.End; examined = sufixBuffer.End; AddAndCheckConsumedBytes(charsToByteCRExclusive + 2); } else { // Don't consume suffixSpan[1] in case it is also a \r. buffer = buffer.Slice(charsToByteCRExclusive + 1); consumed = extensionCursor; AddAndCheckConsumedBytes(charsToByteCRExclusive + 1); } } while (_mode == Mode.Extension); } private void ReadChunkedData(ReadOnlySequence<byte> buffer, PipeWriter writableBuffer, out SequencePosition consumed, out SequencePosition examined) { var actual = Math.Min(buffer.Length, _inputLength); consumed = buffer.GetPosition(buffer.Start, actual); examined = consumed; Copy(buffer.Slice(0, actual), writableBuffer); _inputLength -= actual; AddAndCheckConsumedBytes(actual); if (_inputLength == 0) { _mode = Mode.Suffix; } } private void ParseChunkedSuffix(ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined) { consumed = buffer.Start; examined = buffer.Start; if (buffer.Length < 2) { examined = buffer.End; return; } var suffixBuffer = buffer.Slice(0, 2); var suffixSpan = suffixBuffer.ToSpan(); if (suffixSpan[0] == '\r' && suffixSpan[1] == '\n') { consumed = suffixBuffer.End; examined = suffixBuffer.End; AddAndCheckConsumedBytes(2); _mode = Mode.Prefix; } else { BadHttpRequestException.Throw(RequestRejectionReason.BadChunkSuffix); } } private void ParseChunkedTrailer(ReadOnlySequence<byte> buffer, out SequencePosition consumed, out SequencePosition examined) { consumed = buffer.Start; examined = buffer.Start; if (buffer.Length < 2) { examined = buffer.End; return; } var trailerBuffer = buffer.Slice(0, 2); var trailerSpan = trailerBuffer.ToSpan(); if (trailerSpan[0] == '\r' && trailerSpan[1] == '\n') { consumed = trailerBuffer.End; examined = trailerBuffer.End; AddAndCheckConsumedBytes(2); _mode = Mode.Complete; } else { _mode = Mode.TrailerHeaders; } } private int CalculateChunkSize(int extraHexDigit, int currentParsedSize) { try { checked { if (extraHexDigit >= '0' && extraHexDigit <= '9') { return currentParsedSize * 0x10 + (extraHexDigit - '0'); } else if (extraHexDigit >= 'A' && extraHexDigit <= 'F') { return currentParsedSize * 0x10 + (extraHexDigit - ('A' - 10)); } else if (extraHexDigit >= 'a' && extraHexDigit <= 'f') { return currentParsedSize * 0x10 + (extraHexDigit - ('a' - 10)); } } } catch (OverflowException ex) { throw new IOException(CoreStrings.BadRequest_BadChunkSizeData, ex); } BadHttpRequestException.Throw(RequestRejectionReason.BadChunkSizeData); return -1; // can't happen, but compiler complains } private enum Mode { Prefix, Extension, Data, Suffix, Trailer, TrailerHeaders, Complete }; } } }
1
15,082
Remove the finally?
aspnet-KestrelHttpServer
.cs
@@ -378,7 +378,7 @@ reboot_system() * and then checking error codes; but the problem there is that C:\\ * returns PATH_NOT_FOUND regardless. */ bool -file_exists(const TCHAR *fn) +file_exists(const WCHAR *fn) { #ifdef WINDOWS WIN32_FIND_DATA fd;
1
/* ********************************************************** * Copyright (c) 2011-2017 Google, Inc. All rights reserved. * Copyright (c) 2005-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "share.h" #include <stdio.h> #include <string.h> #include <ctype.h> #ifdef WINDOWS # include "config.h" # include "elm.h" # include "events.h" /* for canary */ # include "processes.h" /* for canary */ # include "options.h" /* for option checking */ # include "ntdll_types.h" /* for NT_SUCCESS */ # include <io.h> /* for canary */ # include <Fcntl.h> /* for canary */ # include <aclapi.h> #else # include <sys/stat.h> #endif #ifndef UNIT_TEST #ifdef WINDOWS # ifdef DEBUG int debuglevel = DL_FATAL; int abortlevel = DL_FATAL; void set_debuglevel(int level) { debuglevel = level; } void set_abortlevel(int level) { abortlevel = level; } #define CONFIG_MAX 8192 #define HEADER_SNIPPET(defsfile) \ "POLICY_VERSION=30000\n" \ "BEGIN_BLOCK\n" \ "GLOBAL\n" \ "DYNAMORIO_OPTIONS=\n" \ "DYNAMORIO_RUNUNDER=1\n" \ "DYNAMORIO_AUTOINJECT=\\lib\\dynamorio.dll\n" \ "DYNAMORIO_HOT_PATCH_POLICIES=" defsfile "\n" \ "DYNAMORIO_UNSUPPORTED=\n" \ "END_BLOCK\n" DWORD load_test_config(const char *snippet, BOOL use_hotpatch_defs) { char buf[CONFIG_MAX]; _snprintf(buf, CONFIG_MAX, "%s%s", use_hotpatch_defs ? HEADER_SNIPPET("\\conf") : HEADER_SNIPPET(""), snippet); NULL_TERMINATE_BUFFER(buf); DO_ASSERT(strlen(buf) < CONFIG_MAX - 2); DO_DEBUG(DL_VERB, printf("importing %s\n", buf); ); CHECKED_OPERATION(policy_import(buf, FALSE, NULL, NULL)); return ERROR_SUCCESS; } void get_testdir(WCHAR *buf, UINT maxchars) { WCHAR *filePart; WCHAR tmp[MAX_PATH]; DWORD len; len = GetEnvironmentVariable(L"DYNAMORIO_WINDIR", tmp, maxchars); DO_ASSERT(len < maxchars); if (len == 0) { len = GetEnvironmentVariable(L_DYNAMORIO_VAR_HOME, tmp, maxchars); DO_ASSERT(len < maxchars); /* check for cygwin paths on windows */ if (!file_exists(buf)) len = 0; DO_DEBUG(DL_INFO, printf("ignoring invalid-looking DYNAMORIO_HOME=%S\n", tmp); ); } if (len == 0) { wcsncpy(tmp, L"..", MAX_PATH); } len = GetFullPathName(tmp, maxchars, buf, &filePart); DO_DEBUG(DL_INFO, printf("using drhome: %S\n", buf); ); DO_ASSERT(len != 0); return; } void error_cb(unsigned int errcode, WCHAR *message) { if (errcode || !errcode || message) { DO_ASSERT(0); } } extern BOOL do_once; typedef struct evthelp__ { DWORD type; WCHAR *exename; ULONG pid; WCHAR *s3; WCHAR *s4; UINT maxchars; BOOL found; } evthelp; evthelp *cb_eh = NULL; int last_record = -1; void check_event_cb(EVENTLOGRECORD *record) { const WCHAR *strings; if (cb_eh->found) return; last_record = record->RecordNumber; if (record->EventID == cb_eh->type) { if (cb_eh->exename != NULL && 0 != wcscmp(get_event_exename(record), cb_eh->exename)) return; if (cb_eh->pid != 0 && cb_eh->pid != get_event_pid(record)) return; strings = get_message_strings(record); strings = next_message_string(strings); strings = next_message_string(strings); if (cb_eh->s3 != NULL) { cb_eh->s3[0] = L'\0'; if (strings != NULL) { wcsncpy(cb_eh->s3, strings, cb_eh->maxchars); cb_eh->s3[cb_eh->maxchars-1] = L'\0'; } } if (cb_eh->s4 != NULL) { cb_eh->s4[0] = L'\0'; strings = next_message_string(strings); if (strings != NULL) { wcsncpy(cb_eh->s4, strings, cb_eh->maxchars); cb_eh->s4[cb_eh->maxchars-1] = L'\0'; } } cb_eh->found = TRUE; } } void reset_last_event() { last_record = -1; } /* checks for events matching type, exename (if not null), and pid (if * not 0). fills in s3 and s4 with 3rd and 4th message strings of * match, if not null. * next search will start with event after matched event. */ BOOL check_for_event(DWORD type, WCHAR *exename, ULONG pid, WCHAR *s3, WCHAR *s4, UINT maxchars) { evthelp eh; eh.type = type; eh.exename = exename; eh.pid = pid; eh.s3 = s3; eh.s4 = s4; eh.maxchars = maxchars; eh.found = FALSE; cb_eh = &eh; /* backdoor */ do_once = TRUE; CHECKED_OPERATION(start_eventlog_monitor(FALSE, NULL, check_event_cb, error_cb, last_record)); DO_ASSERT(WAIT_OBJECT_0 == WaitForSingleObject(get_eventlog_monitor_thread_handle(), 10000)); stop_eventlog_monitor(); return eh.found; } FILE *event_list_fp; void show_event_cb(unsigned int mID, unsigned int type, WCHAR *message, DWORD timestamp) { /* fool the compiler */ DO_ASSERT(type == 0 || type != 0 || timestamp == 0); fprintf(event_list_fp, " Event %d: %S\n", mID, message); } void show_all_events(FILE *fp) { DO_ASSERT (fp != NULL); event_list_fp = fp; /* backdoor */ do_once = TRUE; CHECKED_OPERATION(start_eventlog_monitor(TRUE, show_event_cb, NULL, error_cb, (DWORD) -1)); DO_ASSERT(WAIT_OBJECT_0 == WaitForSingleObject(get_eventlog_monitor_thread_handle(), 10000)); stop_eventlog_monitor(); return; } # endif /* _DEBUG */ void wcstolower(WCHAR *str) { UINT i; for (i=0; i < wcslen(str); i++) str[i] = towlower(str[i]); } WCHAR * get_exename_from_path(const WCHAR *path) { WCHAR *name = wcsrchr(path, L'\\'); if (name == NULL) name = (WCHAR *) path; else name += 1; return name; } DWORD acquire_shutdown_privilege() { HANDLE hToken = NULL; TOKEN_PRIVILEGES Priv; // get current thread token if (!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY|TOKEN_ADJUST_PRIVILEGES, FALSE, &hToken)) { // can't get thread token, try process token instead if(!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY|TOKEN_ADJUST_PRIVILEGES, &hToken)) { return GetLastError(); } } Priv.PrivilegeCount = 1; Priv.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; LookupPrivilegeValue(NULL, SE_SHUTDOWN_NAME, &Priv.Privileges[0].Luid); // try to enable the privilege if(!AdjustTokenPrivileges(hToken, FALSE, &Priv, sizeof(Priv), NULL, 0)) return GetLastError(); return ERROR_SUCCESS; } /* * FIXME: shutdown reason. we should probably use this, BUT * InitiateSystemShutdownEx is not included in VS6.0, so we'll have * to dynamically link it in. * * from msdn: * SHTDN_REASON_FLAG_PLANNED: The shutdown was planned. On Windows .NET * Server, the system generates a state snapshot. For more information, * see the help for Shutdown Event Tracker. * * various combinations of major/minor flags are "recognized by the * system"; the APPLICATION|RECONFIG is NOT one of these. However, * "You can also define your own shutdown reasons and add them to the * the registry." we should probably do this at installation, once * we're happy we've got a good reason code. */ DWORD reboot_system() { DWORD res; res = acquire_shutdown_privilege(); if (res != ERROR_SUCCESS) return res; /* do we need to harden this at all? * "If the the system is not ready to handle the request, the last * error code is ERROR_NOT_READY. The application should wait a * short while and retry the call." * also ERROR_MACHINE_LOCKED, ERROR_SHUTDOWN_IN_PROGRESS, etc. */ res = InitiateSystemShutdown(NULL, L"A System Restart was requested.", 30, TRUE, TRUE); // SHTDN_REASON_MAJOR_APPLICATION | // SHTDN_REASON_MINOR_RECONFIG); return res; } #define LAST_WCHAR(wstr) wstr[wcslen(wstr) - 1] #endif /* WINDOWS */ /* this sucks. * i can't believe this is best way to implement this in Win32... * but i can't seem to find a better way. * msdn suggests using CreateFile() with CREATE_NEW or OPEN_EXISTING, * and then checking error codes; but the problem there is that C:\\ * returns PATH_NOT_FOUND regardless. */ bool file_exists(const TCHAR *fn) { #ifdef WINDOWS WIN32_FIND_DATA fd; HANDLE search; DO_ASSERT(fn != NULL); search = FindFirstFile(fn, &fd); if (search == INVALID_HANDLE_VALUE) { /* special handling for e.g. C:\\ */ if (LAST_WCHAR(fn) == L'\\' || LAST_WCHAR(fn) == L':') { WCHAR buf[MAX_PATH]; _snwprintf(buf, MAX_PATH, L"%S%S*", fn, LAST_WCHAR(fn) == L'\\' ? L"" : L"\\"); NULL_TERMINATE_BUFFER(buf); search = FindFirstFile(buf, &fd); if (search != INVALID_HANDLE_VALUE) { FindClose(search); return TRUE; } else { DO_DEBUG(DL_VERB, printf("%S: even though we tried hard, %d\n", buf, GetLastError()); ); } } DO_DEBUG(DL_VERB, printf("%S doesn't exist because of: %d\n", fn, GetLastError()); ); return FALSE; } else { FindClose(search); return TRUE; } #else struct stat st; return stat(fn, &st) == 0; #endif } #ifdef WINDOWS #define MAX_COUNTER 999999 /* grokked from the core. * FIXME: shareme! * if NULL is passed for directory, then it is ignored and no directory * check is done, and filename_base is assumed to be absolute. * TODO: make this a proactive check: make sure the file can be * opened, eg, do a create/delete on the filename to be returned. */ BOOL get_unique_filename(const WCHAR* directory, const WCHAR* filename_base, const WCHAR* file_type, WCHAR* filename_buffer, UINT maxlen) { UINT counter = 0; if (directory != NULL && !file_exists(directory)) return FALSE; do { if (directory == NULL) _snwprintf(filename_buffer, maxlen, L"%s.%.8d%s", filename_base, counter, file_type); else _snwprintf(filename_buffer, maxlen, L"%s\\%s.%.8d%s", directory, filename_base, counter, file_type); filename_buffer[maxlen-1] = L'\0'; } while (file_exists(filename_buffer) && (++counter < MAX_COUNTER)); return (counter < MAX_COUNTER); } DWORD delete_file_on_boot(WCHAR *filename) { DWORD res; BOOL success = MoveFileEx(filename, NULL, MOVEFILE_DELAY_UNTIL_REBOOT); /* reboot removal adds an entry to * HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\PendingFileRenameOperations * and smss.exe will delete the file on next boot */ if (success) res = ERROR_SUCCESS; else res = GetLastError(); return res; } DWORD delete_file_rename_in_use(WCHAR *filename) { DWORD res; BOOL success = DeleteFile(filename); if (success) return ERROR_SUCCESS; /* xref case 4512: if we leave a dll in a process after we're done * using it, we won't be able to delete it; however, hopefully * we can rename it so there won't be issues replacing it later. */ res = GetLastError(); if (res != ERROR_SUCCESS) { WCHAR tempname[MAX_PATH]; if (get_unique_filename(NULL, filename, L".tmp", tempname, MAX_PATH)) { success = MoveFile(filename, tempname); if (success) { res = ERROR_SUCCESS; /* as best effort, we also schedule cleanup of the * temporary file on next boot */ delete_file_on_boot(tempname); } else res = GetLastError(); } } return res; } #ifndef PROTECTED_DACL_SECURITY_INFORMATION # define PROTECTED_DACL_SECURITY_INFORMATION (0x80000000L) #endif /* * quick permissions xfer workaround for updating permissions * on upgrade. */ DWORD copy_file_permissions(WCHAR *filedst, WCHAR *filesrc) { DWORD res = ERROR_SUCCESS; SECURITY_DESCRIPTOR *sd = NULL; ACL *dacl = NULL; res = GetNamedSecurityInfo(filesrc, SE_FILE_OBJECT, DACL_SECURITY_INFORMATION, NULL, NULL, &dacl, NULL, &sd); if (res != ERROR_SUCCESS) return res; res = SetNamedSecurityInfo(filedst, SE_FILE_OBJECT, DACL_SECURITY_INFORMATION | PROTECTED_DACL_SECURITY_INFORMATION, NULL, NULL, dacl, NULL); LocalFree(sd); return res; } /* NOTE: for now we only consider the major/minor versions and * platform id. * * the osinfo.szCSDVersion string contains service pack information, * which could be used to distinguish e.g. XPSP2, 2K3SP1, if * necessary. */ DWORD get_platform(DWORD *platform) { /* determine the OS version information */ OSVERSIONINFOW osinfo; /* i#1418: GetVersionEx is just plain broken on win8.1+ so we use the Rtl version */ typedef NTSTATUS (NTAPI *RtlGetVersion_t)(OSVERSIONINFOW *info); RtlGetVersion_t RtlGetVersion; NTSTATUS res = -1; HANDLE ntdll_handle = GetModuleHandle(_T("ntdll.dll")); /* i#1598: on any error or on unknown ver, best to assume it's a new ver * and will look most like the most recent known ver. We'll still return error * return val below, but many callers don't check that (!). */ *platform = PLATFORM_WIN_10; if (ntdll_handle == NULL) return GetLastError(); RtlGetVersion = (RtlGetVersion_t) GetProcAddress((HMODULE)ntdll_handle, "RtlGetVersion"); if (RtlGetVersion == NULL) return GetLastError(); osinfo.dwOSVersionInfoSize = sizeof(osinfo); res = RtlGetVersion(&osinfo); if (NT_SUCCESS(res)) { DO_DEBUG(DL_VERB, WCHAR verbuf[MAX_PATH]; _snwprintf(verbuf, MAX_PATH, L"Major=%d, Minor=%d, Build=%d, SPinfo=%s", osinfo.dwMajorVersion, osinfo.dwMinorVersion, osinfo.dwBuildNumber, osinfo.szCSDVersion); NULL_TERMINATE_BUFFER(verbuf); printf("%S\n", verbuf); ); if (osinfo.dwPlatformId != VER_PLATFORM_WIN32_NT) return ERROR_UNSUPPORTED_OS; if (osinfo.dwMajorVersion == 4) { if (osinfo.dwMinorVersion == 0) { *platform = PLATFORM_WIN_NT_4; return ERROR_SUCCESS; } } else if (osinfo.dwMajorVersion == 5) { if (osinfo.dwMinorVersion == 0) { *platform = PLATFORM_WIN_2000; return ERROR_SUCCESS; } else if (osinfo.dwMinorVersion == 1) { *platform = PLATFORM_WIN_XP; return ERROR_SUCCESS; } else if (osinfo.dwMinorVersion == 2) { *platform = PLATFORM_WIN_2003; return ERROR_SUCCESS; } } else if (osinfo.dwMajorVersion == 6) { if (osinfo.dwMinorVersion == 0) { *platform = PLATFORM_VISTA; return ERROR_SUCCESS; } else if (osinfo.dwMinorVersion == 1) { *platform = PLATFORM_WIN_7; return ERROR_SUCCESS; } else if (osinfo.dwMinorVersion == 2) { *platform = PLATFORM_WIN_8; return ERROR_SUCCESS; } else if (osinfo.dwMinorVersion == 3) { *platform = PLATFORM_WIN_8_1; return ERROR_SUCCESS; } } else if (osinfo.dwMajorVersion == 10) { if (osinfo.dwMinorVersion == 0) { if (GetProcAddress((HMODULE)ntdll_handle, "NtCallEnclave") != NULL) *platform = PLATFORM_WIN_10_1709; else if (GetProcAddress((HMODULE)ntdll_handle, "NtLoadHotPatch") != NULL) *platform = PLATFORM_WIN_10_1703; else if (GetProcAddress((HMODULE)ntdll_handle, "NtCreateRegistryTransaction") != NULL) *platform = PLATFORM_WIN_10_1607; else if (GetProcAddress((HMODULE)ntdll_handle, "NtCreateEnclave") != NULL) *platform = PLATFORM_WIN_10_1511; else *platform = PLATFORM_WIN_10; return ERROR_SUCCESS; } } return ERROR_UNSUPPORTED_OS; } else { return res; } } BOOL is_wow64(HANDLE hProcess) { /* IsWow64Pocess is only available on XP+ */ typedef DWORD (WINAPI *IsWow64Process_Type)(HANDLE hProcess, PBOOL isWow64Process); static HANDLE kernel32_handle; static IsWow64Process_Type IsWow64Process; if (kernel32_handle == NULL) kernel32_handle = GetModuleHandle(L"kernel32.dll"); if (IsWow64Process == NULL && kernel32_handle != NULL) { IsWow64Process = (IsWow64Process_Type) GetProcAddress(kernel32_handle, "IsWow64Process"); } if (IsWow64Process == NULL) { /* should be NT or 2K */ DO_DEBUG(DL_INFO, { DWORD platform = 0; get_platform(&platform); DO_ASSERT(platform == PLATFORM_WIN_NT_4 || platform == PLATFORM_WIN_2000); }); return FALSE; } else { BOOL res; if (!IsWow64Process(hProcess, &res)) return FALSE; return res; } } static const TCHAR * get_dynamorio_home_helper(BOOL reset) { static TCHAR dynamorio_home[MAXIMUM_PATH] = { 0 }; int res; if (reset) dynamorio_home[0] = L'\0'; if (dynamorio_home[0] != L'\0') return dynamorio_home; res = get_config_parameter(L_PRODUCT_NAME, FALSE, L_DYNAMORIO_VAR_HOME, dynamorio_home, MAXIMUM_PATH); if (res == ERROR_SUCCESS && dynamorio_home[0] != L'\0') return dynamorio_home; else return NULL; } const TCHAR * get_dynamorio_home() { return get_dynamorio_home_helper(FALSE); } static const TCHAR * get_dynamorio_logdir_helper(BOOL reset) { static TCHAR dynamorio_logdir[MAXIMUM_PATH] = { 0 }; DWORD res; if (reset) dynamorio_logdir[0] = L'\0'; if (dynamorio_logdir[0] != L'\0') return dynamorio_logdir; res = get_config_parameter(L_PRODUCT_NAME, FALSE, L_DYNAMORIO_VAR_LOGDIR, dynamorio_logdir, MAXIMUM_PATH); if (res == ERROR_SUCCESS && dynamorio_logdir[0] != L'\0') return dynamorio_logdir; else return NULL; } const TCHAR * get_dynamorio_logdir() { return get_dynamorio_logdir_helper(FALSE); } /* If a path is passed in, it is checked for 8.3 compatibility; else, * the default path is checked. This routine does not check the * actual 8.3 reg key. */ BOOL using_system32_for_preinject(const WCHAR *preinject) { DWORD platform = 0; get_platform(&platform); if (platform == PLATFORM_WIN_NT_4) { return TRUE; } else { /* case 7586: we need to check if the system has disabled * 8.3 names; if so, we need to use the system32 for * preinject (since spaces are not allowed in AppInitDLLs) */ WCHAR short_path[MAX_PATH]; WCHAR long_path[MAX_PATH]; if (preinject == NULL) { /* note: with force_local_path == TRUE, we don't have * to worry about get_preinject_path() calling this * method back, and it will always return success. */ get_preinject_path(short_path, MAX_PATH, TRUE, TRUE); wcsncat(short_path, L"\\" L_EXPAND_LEVEL(INJECT_DLL_8_3_NAME), MAX_PATH - wcslen(short_path)); NULL_TERMINATE_BUFFER(short_path); get_preinject_path(long_path, MAX_PATH, TRUE, FALSE); wcsncat(long_path, L"\\" L_EXPAND_LEVEL(INJECT_DLL_8_3_NAME), MAX_PATH - wcslen(long_path)); NULL_TERMINATE_BUFFER(long_path); } else { /* Check the passed-in file */ GetShortPathName(preinject, short_path, BUFFER_SIZE_ELEMENTS(short_path)); NULL_TERMINATE_BUFFER(short_path); wcsncpy(long_path, preinject, BUFFER_SIZE_ELEMENTS(long_path)); } /* if 8.3 names are disabled, file_exists will return FALSE on * the GetShortPathName()'ed path. */ return (file_exists(long_path) && !file_exists(short_path)); } } /* if force_local_path, then this returns the in-installation * path regardless of using_system32_for_preinject(). * otherwise, this returns the path to the actuall DLL that * will be injected, which depends on * using_system32_for_preinject() * if short_path, calls GetShortPathName() on the path before returning it. * for a canonical preinject path, this parameter should be TRUE. */ DWORD get_preinject_path(WCHAR *buf, int nchars, BOOL force_local_path, BOOL short_path) { if (!force_local_path && using_system32_for_preinject(NULL)) { UINT len; len = GetSystemDirectory(buf, MAX_PATH); if (len == 0) return GetLastError(); } else { const WCHAR *home = get_dynamorio_home(); /* using_system32_for_preinject() assumes we always succeed */ _snwprintf(buf, nchars, L"%s\\lib", home == NULL ? L"" : home); } buf[nchars - 1] = L'\0'; if (short_path) GetShortPathName(buf, buf, nchars); return ERROR_SUCCESS; } DWORD get_preinject_name(WCHAR *buf, int nchars) { DWORD res; if (using_system32_for_preinject(NULL)) { wcsncpy(buf, L_EXPAND_LEVEL(INJECT_DLL_NAME), nchars); } else { res = get_preinject_path(buf, nchars, FALSE, TRUE); if (res != ERROR_SUCCESS) return res; wcsncat(buf, L"\\" L_EXPAND_LEVEL(INJECT_DLL_8_3_NAME), nchars - wcslen(buf)); } buf[nchars - 1] = L'\0'; return ERROR_SUCCESS; } #endif /* WINDOWS */ static dr_platform_t registry_view = DR_PLATFORM_DEFAULT; void set_dr_platform(dr_platform_t platform) { registry_view = platform; } dr_platform_t get_dr_platform() { if (registry_view == DR_PLATFORM_64BIT IF_X64(|| registry_view == DR_PLATFORM_DEFAULT)) return DR_PLATFORM_64BIT; return DR_PLATFORM_32BIT; } #ifdef WINDOWS DWORD platform_key_flags() { /* PR 244206: have control over whether using WOW64 redirection or * raw 64-bit registry view. * These flags should be used for all Reg{Create,Open,Delete}KeyEx calls, * on XP+ (invalid on earlier platforms) on redirected keys * (most of HKLM\Software). * The flags don't matter on non-redirected trees like HKLM\System. * Since too many functions in libutil/ end up calling something * that reads/writes the registry, we don't pass the dr_platform_t * around and instead use a global variable. */ DWORD platform = 0; get_platform(&platform); if (platform == PLATFORM_WIN_NT_4 || platform == PLATFORM_WIN_2000) return 0; else { switch (registry_view) { case DR_PLATFORM_DEFAULT: return 0; case DR_PLATFORM_32BIT: return KEY_WOW64_32KEY; case DR_PLATFORM_64BIT: return KEY_WOW64_64KEY; default: DO_ASSERT(false); return 0; } } } /* PR 244206: use this instead of RegDeleteKey for deleting redirected keys * (most of HKLM\Software) */ DWORD delete_product_key(HKEY hkey, LPCWSTR subkey) { /* RegDeleteKeyEx is only available on XP+. We cannot delete * from 64-bit registry if we're WOW64 using RegDeleteKey, so we * dynamically look up RegDeleteKeyEx. * We could instead use NtDeleteKey and first open the subkey HKEY: * we could link with the core's ntdll.c, and also use is_wow64_process(). */ typedef DWORD (WINAPI *RegDeleteKeyExW_Type)(HKEY hKey, LPCWSTR lpSubKey, REGSAM samDesired, DWORD Reserved); static HANDLE advapi32_handle; static RegDeleteKeyExW_Type RegDeleteKeyExW; if (advapi32_handle == NULL) advapi32_handle = GetModuleHandle(L"advapi32.dll"); if (RegDeleteKeyExW == NULL && advapi32_handle != NULL) { RegDeleteKeyExW = (RegDeleteKeyExW_Type) GetProcAddress(advapi32_handle, "RegDeleteKeyExW"); } if (RegDeleteKeyExW == NULL) { /* should be NT or 2K */ DO_DEBUG(DL_INFO, { DWORD platform = 0; get_platform(&platform); DO_ASSERT(platform == PLATFORM_WIN_NT_4 || platform == PLATFORM_WIN_2000); }); return RegDeleteKey(hkey, subkey); } else return RegDeleteKeyExW(hkey, subkey, platform_key_flags(), 0); } DWORD create_root_key() { int res; HKEY hkroot; res = RegCreateKeyEx(DYNAMORIO_REGISTRY_HIVE, L_DYNAMORIO_REGISTRY_KEY, 0, NULL, REG_OPTION_NON_VOLATILE, platform_key_flags()|KEY_WRITE|KEY_ENUMERATE_SUB_KEYS, NULL, &hkroot, NULL); RegCloseKey(hkroot); return res; } /* Deletes the reg key created by create_root_key/setup_installation and the parent * company key if it's empty afterwards (might not be if PE or nodemgr has config subkeys * there. */ DWORD destroy_root_key() { DWORD res; /* This deletes just the product key. */ res = recursive_delete_key(DYNAMORIO_REGISTRY_HIVE, L_DYNAMORIO_REGISTRY_KEY, NULL); /* Delete the company key (this will only work if it is empty, so no need to worry * about clobbering any config settings or doing too much damage if we screw up. */ if (res == ERROR_SUCCESS) { WCHAR company_key[MAX_PATH]; WCHAR *pop; wcsncpy(company_key, L_DYNAMORIO_REGISTRY_KEY, MAX_PATH); NULL_TERMINATE_BUFFER(company_key); pop = wcsstr(company_key, L_COMPANY_NAME); if (pop != NULL) { pop += wcslen(L_COMPANY_NAME); /* sanity check */ if (pop == wcsrchr(company_key, L'\\')) { *pop = L'\0'; delete_product_key(DYNAMORIO_REGISTRY_HIVE, company_key); } else res = ERROR_BAD_FORMAT; } else res = ERROR_BAD_FORMAT; } return res; } DWORD setup_installation(const WCHAR *path, BOOL overwrite) { WCHAR buf[MAX_PATH]; /* if there's something there, leave it */ if (!overwrite && get_dynamorio_home() != NULL) return ERROR_SUCCESS; DO_DEBUG(DL_INFO, printf("setting up installation at: %S\n", path); ); mkdir_with_parents(path); if (!file_exists(path)) return ERROR_PATH_NOT_FOUND; _snwprintf(buf, MAX_PATH, L"%s\\%s", path, L"conf"); NULL_TERMINATE_BUFFER(buf); DO_DEBUG(DL_INFO, printf("making config dir: %S\n", buf); ); mkdir_with_parents(buf); if (!file_exists(buf)) return ERROR_PATH_NOT_FOUND; _snwprintf(buf, MAX_PATH, L"%s\\%s", path, L"logs"); NULL_TERMINATE_BUFFER(buf); DO_DEBUG(DL_INFO, printf("making logdir: %S\n", buf); ); mkdir_with_parents(buf); if (!file_exists(buf)) return ERROR_PATH_NOT_FOUND; CHECKED_OPERATION(create_root_key()); CHECKED_OPERATION(set_config_parameter(L_PRODUCT_NAME, FALSE, L_DYNAMORIO_VAR_HOME, path)); CHECKED_OPERATION(set_config_parameter(L_PRODUCT_NAME, FALSE, L_DYNAMORIO_VAR_LOGDIR, buf)); /* reset the DR_HOME cache */ get_dynamorio_home_helper(TRUE); return ERROR_SUCCESS; } /* modifies permissions for 4.3 cache/User-SID directories to be * created by users themselves */ DWORD setup_cache_permissions(WCHAR *cacheRootDirectory) { DWORD result = ERROR_UNSUPPORTED_OS; #define NUM_ACES 2 /* in C const int isn't good enough */ EXPLICIT_ACCESS ea[NUM_ACES]; PSID pSIDEveryone = NULL; PSID pSIDCreatorOwner = NULL; PACL pACL = NULL; PACL pOldDACL = NULL; SID_IDENTIFIER_AUTHORITY SIDAuthWorld = SECURITY_WORLD_SID_AUTHORITY; SID_IDENTIFIER_AUTHORITY SIDAuthCreator = SECURITY_CREATOR_SID_AUTHORITY; DWORD dwRes; SECURITY_DESCRIPTOR *pSD = NULL; DWORD platform = 0; /* accomodating NT permissions */ get_platform(&platform); /* Note that we prefer to not create ACLs from scratch, so that we * can accommodate Administrator groups unknown to us that would * have been inherited from \Program Files\. We should always * start with a known ACL and just edit the new ACEs */ dwRes = GetNamedSecurityInfo(cacheRootDirectory, SE_FILE_OBJECT, DACL_SECURITY_INFORMATION, NULL, NULL, &pOldDACL, NULL, &pSD); if (dwRes != ERROR_SUCCESS) return dwRes; /* Note: Although we are ADDING possibly existing ACE, it seems * like this is handled well and we don't grow the ACL. For now * this doesn't matter to us, since we expect to have just copied * the flags from the lib\ directory so can't really accumulate. */ // Create a SID for the Everyone group. if (!AllocateAndInitializeSid(&SIDAuthWorld, 1, SECURITY_WORLD_RID, 0, 0, 0, 0, 0, 0, 0, &pSIDEveryone)) { DO_DEBUG(DL_VERB, printf("AllocateAndInitializeSid (Everyone).\n"); ); goto cleanup; } // Create a SID for the CREATOR OWNER group if (!AllocateAndInitializeSid(&SIDAuthCreator, 1, SECURITY_CREATOR_OWNER_RID, 0, 0, 0, 0, 0, 0, 0, &pSIDCreatorOwner)) { DO_DEBUG(DL_VERB, printf("AllocateAndInitializeSid (CreatorOwner).\n"); ); goto cleanup; } ZeroMemory(&ea, NUM_ACES * sizeof(EXPLICIT_ACCESS)); /* Grant create directory access to Everyone, which will be in * addition to existing Read/Execute permissions we are starting * with. */ ea[0].grfAccessPermissions = FILE_ADD_SUBDIRECTORY; ea[0].grfAccessMode = GRANT_ACCESS; /* not SET_ACCESS */ ea[0].grfInheritance = NO_INHERITANCE; /* ONLY in cache\ folder! */ ea[0].Trustee.TrusteeForm = TRUSTEE_IS_SID; ea[0].Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP; ea[0].Trustee.ptstrName = (LPTSTR) pSIDEveryone; /* Set full control for CREATOR OWNER on any subfolders */ ea[1].grfAccessPermissions = GENERIC_ALL; ea[1].grfAccessMode = SET_ACCESS; /* we SET ALL */ if (platform == PLATFORM_WIN_NT_4) { /* case 10502 INHERIT_ONLY_ACE seems to not work */ /* we are mostly interested in any subdirectory, and cache/ is * already created (and also trusted), so adding it there * doesn't affect anything. */ ea[1].grfInheritance = OBJECT_INHERIT_ACE | CONTAINER_INHERIT_ACE; } else { /* not using the same as NT, since Creator Owner may already * have this ACE (and normally does) so we'll clutter with a * new incomplete one */ ea[1].grfInheritance = INHERIT_ONLY_ACE | OBJECT_INHERIT_ACE | CONTAINER_INHERIT_ACE; } ea[1].Trustee.TrusteeForm = TRUSTEE_IS_SID; ea[1].Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP; ea[1].Trustee.ptstrName = (LPTSTR) pSIDCreatorOwner; /* FIXME: we may want to disable the default group maybe should * set CREATOR GROUP to no access otherwise we get the default * Domain Users group (which usually is the Primary group) added, * e.g. KRAMMER\None:R(ead) */ /* MSDN gave a false alarm that this doesn't exist on NT - It is * present at least on sp6. FIXME: may want to use GetProcAddress * if we support earlier versions, but we'll know early enough. * We don't really need to support anything other than User SYSTEM * on NT for which we don't need this to work and can return * ERROR_UNSUPPORTED_OS */ if (ERROR_SUCCESS != SetEntriesInAcl(NUM_ACES, ea, pOldDACL, /* original DACL */ &pACL)) { DO_DEBUG(DL_VERB, printf("SetEntriesInAcl 0x%x\n", GetLastError()); ); goto cleanup; } // Try to modify the object's DACL. result = SetNamedSecurityInfo(cacheRootDirectory, // name of the object SE_FILE_OBJECT, // type of object DACL_SECURITY_INFORMATION | PROTECTED_DACL_SECURITY_INFORMATION , // change only the object's DACL NULL, NULL, // do not change owner or group pACL, // new DACL specified NULL); // do not change SACL if (ERROR_SUCCESS == result) { DO_DEBUG(DL_VERB, printf("Successfully changed DACL\n"); ); } cleanup: if (pSIDEveryone) FreeSid(pSIDEveryone); if (pSIDCreatorOwner) FreeSid(pSIDCreatorOwner); if (pACL) LocalFree(pACL); if (pSD) LocalFree(pSD); return result; #undef NUM_ACES } /* cache_root should normally be get_dynamorio_home() */ DWORD setup_cache_shared_directories(const WCHAR *cache_root) { DWORD res; /* support for new-in-4.2 directories, update the permissions * on the cache/ to be the same as those on lib/, and the * cache/shared/ folder to be the same as those on logs/ * * note that the relative paths of the cache and shared cache * directories here should match the values set in * setup_cache_shared_registry() */ WCHAR libpath[MAX_PATH]; WCHAR cachepath[MAX_PATH]; WCHAR logspath[MAX_PATH]; WCHAR sharedcachepath[MAX_PATH]; _snwprintf(libpath, MAX_PATH, L"%s\\lib", get_dynamorio_home()); NULL_TERMINATE_BUFFER(libpath); _snwprintf(cachepath, MAX_PATH, L"%s\\cache", cache_root); NULL_TERMINATE_BUFFER(cachepath); _snwprintf(logspath, MAX_PATH, L"%s\\logs", get_dynamorio_home()); NULL_TERMINATE_BUFFER(logspath); _snwprintf(sharedcachepath, MAX_PATH, L"%s\\shared", cachepath); NULL_TERMINATE_BUFFER(sharedcachepath); mkdir_with_parents(sharedcachepath); /* FIXME: no error checking */ res = copy_file_permissions(cachepath, libpath); if (res != ERROR_SUCCESS) { return res; } res = copy_file_permissions(sharedcachepath, logspath); if (res != ERROR_SUCCESS) { return res; } /* For in 4.3 ONLY if all users (most importantly services) * validate their per-user directory (or files) for ownership */ res = setup_cache_permissions(cachepath); if (res != ERROR_SUCCESS) { return res; } return ERROR_SUCCESS; } /* cache_root should normally be get_dynamorio_home() */ DWORD setup_cache_shared_registry(const WCHAR *cache_root, ConfigGroup *policy) { /* note that nodemgr doesn't need to do call this routine, * since the registry keys are added to the node policies in * controller/servlets/PolicyUpdateResponseHandler.java in the controller. but * anyway we expect these to be forever the same, and in any * case not configurable from the controller. */ WCHAR wpathbuf [MAX_PATH]; /* set up cache\ shared\ registry keys */ _snwprintf(wpathbuf, MAX_PATH, L"%s\\cache", cache_root); NULL_TERMINATE_BUFFER(wpathbuf); set_config_group_parameter(policy, L_IF_WIN(DYNAMORIO_VAR_CACHE_ROOT), wpathbuf); /* set up cache\ shared\ registry keys */ _snwprintf(wpathbuf, MAX_PATH, L"%s\\cache\\shared", cache_root); NULL_TERMINATE_BUFFER(wpathbuf); set_config_group_parameter(policy, L_IF_WIN(DYNAMORIO_VAR_CACHE_SHARED), wpathbuf); return ERROR_SUCCESS; } /* note that this checks the opstring against the * version of core that matches this build, NOT the version * of the core that's actually installed! */ BOOL check_opstring(const WCHAR *opstring) { char *cbuf; options_t ops; int res; size_t cbuf_size = wcslen(opstring) + 1; cbuf = (char *)malloc(cbuf_size); /* FIXME: if malloc fails, do something */ _snprintf(cbuf, cbuf_size, "%S", opstring); cbuf[cbuf_size-1] = '\0'; res = set_dynamo_options(&ops, cbuf); free(cbuf); return !res; } HANDLE hToken = NULL; TOKEN_PRIVILEGES Priv, OldPriv; DWORD PrivSize = sizeof(OldPriv); DWORD acquire_privileges() { DWORD error; /* if the privileges are already acquired, don't bother. this almost certainly will cause failures if multiple threads are trying to acquire privileges. */ // FIXME - this should have real synchronization!!! if (hToken != NULL) return ERROR_ALREADY_INITIALIZED; // get current thread token if (!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY|TOKEN_ADJUST_PRIVILEGES, FALSE, &hToken)) { // can't get thread token, try process token instead if(!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY|TOKEN_ADJUST_PRIVILEGES, &hToken)) { return GetLastError(); } } Priv.PrivilegeCount = 1; Priv.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; LookupPrivilegeValue(NULL, SE_DEBUG_NAME, &Priv.Privileges[0].Luid); // try to enable the privilege if (!AdjustTokenPrivileges(hToken, FALSE, &Priv, sizeof(Priv), &OldPriv, &PrivSize)) { return GetLastError(); } error = GetLastError(); if (error == ERROR_NOT_ALL_ASSIGNED) { /* acquiring SeDebugPrivilege requires being admin */ return error; } return ERROR_SUCCESS; } DWORD release_privileges() { if(hToken == NULL) return ERROR_NO_SUCH_PRIVILEGE; AdjustTokenPrivileges(hToken, FALSE, &OldPriv, sizeof(OldPriv), NULL, NULL); CloseHandle(hToken); hToken = NULL; return ERROR_SUCCESS; } void wstr_replace(WCHAR *str, WCHAR orig, WCHAR new) { UINT i; for (i = 0; i < wcslen(str); i++) if (str[i] == orig) str[i] = new; return; } /* FIXME: should return error code if the directory wasn't created and * doesn't exist already */ void mkdir_with_parents(const WCHAR *dirname) { WCHAR buf[MAX_PATH], *temp_subdir; wcsncpy(buf, dirname, MAX_PATH); NULL_TERMINATE_BUFFER(buf); /* ensure proper slashes */ wstr_replace(buf, L'/', L'\\'); temp_subdir = buf; while (temp_subdir != NULL) { temp_subdir = wcschr(temp_subdir, L'\\'); if (temp_subdir != NULL) *temp_subdir = L'\0'; DO_DEBUG(DL_VERB, printf("trying to make: %S\n", buf); ); /* ok if this fails, eg the first time it will be C: */ CreateDirectory(buf, NULL); if (temp_subdir != NULL) { *temp_subdir = L'\\'; temp_subdir = temp_subdir + 1; } } return; } void ensure_directory_exists_for_file(WCHAR *filename) { WCHAR *slashptr, buf[MAX_PATH]; wcsncpy(buf, filename, MAX_PATH); NULL_TERMINATE_BUFFER(buf); slashptr = wcsrchr(buf, L'\\'); if (slashptr == NULL) return; *slashptr = L'\0'; mkdir_with_parents(buf); } /* FIXME: apparently there's a bug in MSVCRT that converts * \r\n to \r\r\n ? anyway that's what google and the evidence * seem to indicate. (see policy.c for more) * * so we may want to convert this to using Win32 API instead of * CRT. but then again we may not, just on principle. */ DWORD write_file_contents(WCHAR *path, char *contents, BOOL overwrite) { FILE *fp = NULL; DWORD res = ERROR_SUCCESS; ensure_directory_exists_for_file(path); fp = _wfopen(path, L"r"); if (fp != NULL) { if (!overwrite) return ERROR_ALREADY_EXISTS; fclose(fp); } fp = _wfopen(path, L"w"); if(fp == NULL) { res = delete_file_rename_in_use(path); if (res != ERROR_SUCCESS || (fp = _wfopen(path, L"w")) == NULL) { DO_DEBUG(DL_ERROR, printf("Unable to open file: %S (%d)\n", path, errno); ); return res; } } if(strlen(contents) != fwrite(contents, 1, strlen(contents), fp)) { DO_DEBUG(DL_ERROR, printf("Write failed to file: %S (errno=%d)\n", path, errno); ); res = ERROR_WRITE_FAULT; } DO_DEBUG(DL_INFO, printf("wrote file %S\n", path); ); fclose(fp); return res; } DWORD write_file_contents_if_different(WCHAR *path, char *contents, BOOL *changed) { char *existing; DWORD res; DO_ASSERT(path != NULL); DO_ASSERT(contents != NULL); DO_ASSERT(changed != NULL); existing = (char *)malloc(strlen(contents) + 1); res = read_file_contents(path, existing, strlen(contents) + 1, NULL); if (res == ERROR_SUCCESS && 0 == strcmp(contents, existing)) { *changed = FALSE; res = ERROR_SUCCESS; } else { *changed = TRUE; res = write_file_contents(path, contents, TRUE); } free(existing); return res; } #define READ_BUF_SZ 1024 DWORD read_file_contents(WCHAR *path, char *contents, SIZE_T maxchars, SIZE_T *needed) { FILE *fp = NULL; DWORD res = ERROR_SUCCESS; SIZE_T n_read = 0; SIZE_T n_needed = 0; char buf[READ_BUF_SZ]; DO_ASSERT(path != NULL); DO_ASSERT(contents != NULL || needed != NULL); DO_ASSERT(contents == NULL || maxchars > 0); fp = _wfopen(path, L"r"); if (fp == NULL) { DO_DEBUG(DL_INFO, printf("Not found: %S\n", path); ); return ERROR_FILE_NOT_FOUND; } if (contents != NULL) { n_read = fread(contents, 1, maxchars, fp); /* NULL terminate string. */ contents[n_read == maxchars ? n_read - 1 : n_read] = '\0'; DO_DEBUG(DL_FINEST, printf("*Read %d bytes from %S (max=%d)\n", n_read, path, maxchars); ); } n_needed = n_read; while (!feof(fp)) { res = ERROR_MORE_DATA; n_read = fread(buf, 1, READ_BUF_SZ, fp); DO_DEBUG(DL_FINEST, printf(" Read an additional %d bytes\n", n_read); ); if (n_read == 0 && !feof(fp)) { res = ERROR_READ_FAULT; break; } n_needed += n_read; } /* + 1 for the NULL terminator */ n_needed += 1; if (needed != NULL) *needed = n_needed; fclose(fp); if (res == ERROR_SUCCESS || res == ERROR_MORE_DATA) { DO_DEBUG(DL_VERB, printf("file %S contents: (%d needed)\n\n%s\n", path, n_needed, contents); ); } else { DO_DEBUG(DL_ERROR, printf("read failed, error %d\n", res); ); } return res; } DWORD delete_tree(const WCHAR *path) { WIN32_FIND_DATA data; HANDLE hFind; WCHAR pathbuf[MAX_PATH], subdirbuf[MAX_PATH]; if (path == NULL) return ERROR_INVALID_PARAMETER; _snwprintf(pathbuf, MAX_PATH, L"%s\\*.*", path); NULL_TERMINATE_BUFFER(pathbuf); hFind = FindFirstFile(pathbuf, &data); if (hFind == INVALID_HANDLE_VALUE) return GetLastError(); DO_DEBUG(DL_VERB, printf("dt working on %S\n", path); ); do { if (wcscmp(data.cFileName, L".") == 0 || wcscmp(data.cFileName, L"..") == 0) continue; /* case 7407: FindNextFile on a FAT32 filesystem returns files in * the order they were written to disk, which could be different * from NTFS where the order is alphabetical (from MSDN). * Also on FAT32, FindNextFile sometimes puts us back in the loop * for the file we just renamed and we try to * delete_file_rename_in_use the file we just renamed for a very * long time (>3 hrs). * * FIXME: temporary hack: if filename has .tmp in its name * (first ocurrance), assume we just renamed it and skip. * * note we may want to doublecheck that the file is indeed not * deletable although we now add it to the * PendingFileRenameOperations so such unused files can't stay * around for too long. */ if (wcsstr(data.cFileName, L".tmp") != NULL) continue; DO_DEBUG(DL_VERB, printf("dt still working on %S, %d\n", data.cFileName, data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY); ); _snwprintf(subdirbuf, MAX_PATH, L"%s\\%s", path, data.cFileName); NULL_TERMINATE_BUFFER(subdirbuf); /* case 4512: use rename trick if file is in use, so that * the uninstall/reinstall case will work */ if (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) delete_tree(subdirbuf); else delete_file_rename_in_use(subdirbuf); } while (FindNextFile(hFind, &data)); if (!FindClose(hFind)) return GetLastError(); if (!RemoveDirectory(path)) return GetLastError(); return ERROR_SUCCESS; } /* * helper function for registry permissions workaround. stopgap until * we can make a decent permissions api. * * based on example code obtained from: * http://www.codeproject.com/system/secntobj.asp */ PSID getSID(WCHAR *user) { DWORD dwSidLen = 0, dwDomainLen = 0; SID_NAME_USE SidNameUse; PSID pRet = NULL; PSID pSid = NULL; WCHAR *lpDomainName = NULL; /* The function on the first call retives the length that we need * to initialize the SID & domain name pointers */ if(!LookupAccountName(NULL, user, NULL, &dwSidLen, NULL, &dwDomainLen, &SidNameUse)) { if(ERROR_INSUFFICIENT_BUFFER == GetLastError()) { pSid = LocalAlloc(LMEM_ZEROINIT, dwSidLen); lpDomainName = LocalAlloc(LMEM_ZEROINIT, dwDomainLen*sizeof(WCHAR)); if(pSid && lpDomainName && LookupAccountName(NULL, user, pSid, &dwSidLen, lpDomainName, &dwDomainLen, &SidNameUse)) { pRet = pSid; pSid = NULL; } } } /* if successful, was set to NULL and left in pRet */ LocalFree(pSid); LocalFree(lpDomainName); return pRet; } BOOL make_acl(DWORD count, WCHAR **userArray, DWORD *maskArray, ACL **acl) { DWORD dwLoop = 0; DWORD dwAclLen = 0; PACL pRetAcl = NULL; PSID *ppStoreSid = NULL; BOOL bRes = FALSE; if(acl == NULL) goto cleanup; ppStoreSid = LocalAlloc(LMEM_ZEROINIT, count * sizeof(void*)); if (ppStoreSid == NULL) goto cleanup; for (dwLoop = 0; dwLoop < count; dwLoop++) { ppStoreSid[dwLoop] = getSID(userArray[dwLoop]); if (ppStoreSid[dwLoop] == NULL) goto cleanup; dwAclLen += GetLengthSid(ppStoreSid[dwLoop]) + sizeof(ACCESS_ALLOWED_ACE) - sizeof(DWORD); } dwAclLen += sizeof(ACL); pRetAcl = LocalAlloc(LMEM_ZEROINIT, dwAclLen); if(pRetAcl == NULL || !InitializeAcl(pRetAcl, dwAclLen, ACL_REVISION)) goto cleanup; for (dwLoop = 0; dwLoop < count; dwLoop++) { /* only adding access allowed ACE's */ if(!AddAccessAllowedAce(pRetAcl, ACL_REVISION, maskArray[dwLoop], ppStoreSid[dwLoop])) goto cleanup; } *acl = pRetAcl; pRetAcl = NULL; bRes = TRUE; cleanup: if (ppStoreSid != NULL) { for (dwLoop = 0; dwLoop < count; dwLoop++) LocalFree(ppStoreSid[dwLoop]); LocalFree(ppStoreSid); } /* if successful, was set to NULL and left in *acl */ LocalFree(pRetAcl); return bRes; } #define NUM_ACL_ENTRIES 4 DWORD set_registry_permissions_for_user(WCHAR *hklm_keyname, WCHAR *user) { SECURITY_DESCRIPTOR sd; SID *owner = NULL; ACL *acl1 = NULL; DWORD res; HKEY hkey = NULL; WCHAR *users[NUM_ACL_ENTRIES] = { L"Administrators", L"Everyone", L"SYSTEM", NULL, }; DWORD masks[NUM_ACL_ENTRIES] = { KEY_ALL_ACCESS | DELETE | READ_CONTROL | WRITE_DAC | WRITE_OWNER, KEY_READ, KEY_ALL_ACCESS | DELETE | READ_CONTROL | WRITE_DAC | WRITE_OWNER, KEY_ALL_ACCESS, }; users[NUM_ACL_ENTRIES - 1] = user; DO_DEBUG(DL_VERB, printf("Starting acl..\n"); ); res = RegOpenKeyEx(HKEY_LOCAL_MACHINE, hklm_keyname, 0, platform_key_flags()|KEY_ALL_ACCESS, &hkey); if (res != ERROR_SUCCESS) goto error_out; DO_DEBUG(DL_VERB, printf("Got key handle.\n"); ); if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) { res = GetLastError(); goto error_out; } owner = getSID(users[0]); if (NULL == owner) { res = ERROR_INVALID_DATA; goto error_out; } if (!SetSecurityDescriptorOwner(&sd, owner, FALSE)) { res = GetLastError(); goto error_out; } DO_DEBUG(DL_VERB, printf("Set owner.\n"); ); if (!make_acl(NUM_ACL_ENTRIES, users, masks, &acl1)) { res = ERROR_ACCESS_DENIED; goto error_out; } DO_DEBUG(DL_VERB, printf("Made ACL.\n"); ); if(!SetSecurityDescriptorDacl(&sd, TRUE, acl1, FALSE)) { res = GetLastError(); goto error_out; } if (!IsValidSecurityDescriptor(&sd)) { res = GetLastError(); goto error_out; } res = RegSetKeySecurity(hkey, DACL_SECURITY_INFORMATION | OWNER_SECURITY_INFORMATION, &sd); DO_DEBUG(DL_VERB, printf("Set sacl.\n"); ); goto cleanup; error_out: /* make sure to return an error */ if (res == ERROR_SUCCESS) res = ERROR_ACCESS_DENIED; cleanup: if (hkey != NULL) RegCloseKey(hkey); LocalFree(owner); LocalFree(acl1); return res; } /* will limit to 1 MB */ #define MAX_INSERT_SIZE (1024 * 1024) static void insert_file(FILE *file, wchar_t *file_src_name, BOOL delete) { /* 3rd arg not needed but older headers do not declare as optional */ int fd_src = _wopen(file_src_name, _O_RDONLY|_O_BINARY, 0); long length; int error; if (fd_src == -1) { fprintf(file, "Unable to open file \"%S\" for inserting\n", file_src_name); return; } length = _filelength(fd_src); if (length == -1L) { fprintf(file, "Unable to get file length for file \"%S\"\n", file_src_name); return; } if (length > MAX_INSERT_SIZE) { fprintf(file, "File size exceeds max insert length, truncating from %d to %d\n", length, MAX_INSERT_SIZE); length = MAX_INSERT_SIZE; } fprintf(file, "Inserting file: name=\"%S\" length=%d\n", file_src_name, length); /* hmm, there's prob. a better way to do this ... */ #define COPY_BUF_SIZE 4096 { char buf[COPY_BUF_SIZE] = {0}; long i = 0; while (i + COPY_BUF_SIZE <= length) { _read(fd_src, buf, COPY_BUF_SIZE); fwrite(buf, 1, COPY_BUF_SIZE, file); i += COPY_BUF_SIZE; } if (i < length) { _read(fd_src, buf, length - i); fwrite(buf, 1, length - i, file); } } fprintf(file, "Finished inserting file\n"); if (_read(fd_src, &error, sizeof(error)) != 0) fprintf(file, "ERROR : file continues beyond length\n"); _close(fd_src); if (delete) { DeleteFile(file_src_name); } return; } /* see utils.h for description */ DWORD get_violation_info(EVENTLOGRECORD *pevlr, /* INOUT */ VIOLATION_INFO *info) { DO_ASSERT(pevlr != NULL && info != NULL && pevlr->EventID == MSG_SEC_FORENSICS); info->report = NULL; if (pevlr->EventID != MSG_SEC_FORENSICS) return ERROR_INVALID_PARAMETER; info->report = get_forensics_filename(pevlr); if (file_exists(info->report)) return ERROR_SUCCESS; else return ERROR_FILE_NOT_FOUND; } wchar_t *canary_process_names[] = {L"canary.exe", L"services.exe", L"iexplore.exe"}; #define num_canary_processes BUFFER_SIZE_ELEMENTS(canary_process_names) /* how long to wait for an apparently hung canary process */ #define CANARY_HANG_WAIT 20000 /* interval to wait for the canary process to do something */ #define CANARY_SLEEP_WAIT 100 #define OPTIONS_CANARY_NATIVE L" -list_modules -check_for_hooked_mods_list ntdll.dll" #define OPTIONS_CANARY_THIN_CLIENT L"" #define OPTIONS_CANARY_CLIENT L"" #define OPTIONS_CANARY_MF L"" #define OPTIONS_CANARY_INJECT L"-wait" /* FIXME - could even get ldmps ... */ /* FIXME - xref case 10322 on -syslog_mask 0, eventually should remove and verify * expected eventlog output (and get PE to ignore them). */ #define OPTIONS_THIN_CLIENT L"-thin_client -syslog_mask 0" #define OPTIONS_CLIENT L"-client -syslog_mask 0" /* FIXME - temporary hack so virus scan correctly identified by canary. Weird case * since this is considered a survivable violation by default (and so ignores kill proc). */ #define OPTIONS_MF L"-apc_policy 0 -syslog_mask 0" /* returns the appropriate canary fail code */ static int run_individual_canary_test(FILE *file, WCHAR *logbase, WCHAR *dr_options, int exe_index, ConfigGroup *policy, WCHAR *exe, WCHAR *exe_args, BOOL inject_test, char *type, BOOL early_test) { STARTUPINFO sinfo = {sizeof(sinfo), NULL, L"", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL}; PROCESS_INFORMATION pinfo; int canary_code = CANARY_SUCCESS; WCHAR logbuf[MAX_PATH] = {0}; WCHAR outfile[MAX_PATH]; WCHAR cmd_buf[5*MAX_PATH]; /* set up registry */ get_unique_filename(logbase, L"canary_logs", L"", logbuf, BUFFER_SIZE_ELEMENTS(logbuf)); CreateDirectory(logbuf, NULL); set_config_group_parameter(get_child(canary_process_names[exe_index], policy), L_DYNAMORIO_VAR_LOGDIR, logbuf); set_config_group_parameter(get_child(canary_process_names[exe_index], policy), L_DYNAMORIO_VAR_OPTIONS, dr_options); write_config_group(policy); /* set up cmd_buf */ _snwprintf(outfile, BUFFER_SIZE_ELEMENTS(outfile), L"%s\\out.rep", logbuf); NULL_TERMINATE_BUFFER(outfile); if (early_test) { /* we get the canary_process to re-launch itself to run test with early inject */ _snwprintf(cmd_buf, BUFFER_SIZE_ELEMENTS(cmd_buf), L"\"%s\" \"%s\" -launch_child %s%d \"\\\"%s\\\" %s\"", exe, outfile, inject_test ? L"-verify_inject " : L"", CANARY_HANG_WAIT / 2, outfile, exe_args); } else { _snwprintf(cmd_buf, BUFFER_SIZE_ELEMENTS(cmd_buf), L"\"%s\" \"%s\" %s", exe, outfile, exe_args); } NULL_TERMINATE_BUFFER(cmd_buf); fprintf(file, "Starting Canary Process \"%S\" core_ops=\"%S\" type=%s%s\n", cmd_buf, dr_options, type, inject_test ? " inject" : ""); if (CreateProcess(NULL, cmd_buf, NULL, NULL, TRUE, 0, NULL, NULL, &sinfo, &pinfo)) { if (inject_test && !early_test) { DWORD sleep_count = 0, under_dr_code, ws, build = 0; do { ws = WaitForSingleObject(pinfo.hProcess, CANARY_SLEEP_WAIT); sleep_count += CANARY_SLEEP_WAIT; under_dr_code = under_dynamorio_ex(pinfo.dwProcessId, &build); } while (ws == WAIT_TIMEOUT && sleep_count < CANARY_HANG_WAIT && (under_dr_code == DLL_UNKNOWN || under_dr_code == DLL_NONE)); if (under_dr_code == DLL_UNKNOWN || under_dr_code == DLL_NONE) { canary_code = CANARY_FAIL_APP_INIT_INJECTION; fprintf(file, "Injection Failed - verify registry settings\n"); } else { fprintf(file, "Verified Injection, build %d\n", build); } if (ws == WAIT_TIMEOUT) terminate_process(pinfo.dwProcessId); } else { DWORD ws = WaitForSingleObject(pinfo.hProcess, CANARY_HANG_WAIT); if (ws == WAIT_TIMEOUT) { if (early_test && inject_test) { canary_code = CANARY_FAIL_EARLY_INJECTION; fprintf(file, "Early Injection Failed\n"); } else { canary_code = CANARY_FAIL_HUNG; fprintf(file, "Canary Hung\n"); } terminate_process(pinfo.dwProcessId); } else { DWORD exit_code = 0; GetExitCodeProcess(pinfo.hProcess, &exit_code); /* FIXME - check return value, shouldn't ever fail though */ if (exit_code != CANARY_PROCESS_EXP_EXIT_CODE) { /* FIXME - the -1 is based on the core value for kill * proc, should export that and use it, or really just check for * violations since we'll want the forensics anyways. Doesn't * disambiguate between dr error and violation. */ if (exit_code == (DWORD)-1) { canary_code = CANARY_FAIL_VIOLATION; fprintf(file, "Canary Violation or DR error\n"); } else { canary_code = CANARY_FAIL_CRASH; fprintf(file, "Canary Crashed 0x%08x\n", exit_code); } } else if (early_test && inject_test) { fprintf(file, "Verified Early Injection\n"); } } } CloseHandle(pinfo.hProcess); CloseHandle(pinfo.hThread); { HANDLE hFind; WIN32_FIND_DATA data; WCHAR file_name[MAX_PATH], pattern[MAX_PATH]; _snwprintf(pattern, BUFFER_SIZE_ELEMENTS(pattern), L"%s\\*.*", logbuf); NULL_TERMINATE_BUFFER(pattern); hFind = FindFirstFile(pattern, &data); if (hFind != INVALID_HANDLE_VALUE) { do { if (wcscmp(data.cFileName, L".") == 0 || wcscmp(data.cFileName, L"..") == 0) continue; _snwprintf(file_name, BUFFER_SIZE_ELEMENTS(file_name), L"%s\\%s", logbuf, data.cFileName); NULL_TERMINATE_BUFFER(file_name); insert_file(file, file_name, FALSE); } while (FindNextFile(hFind, &data)); FindClose(hFind); } } fprintf(file, "Canary Finished\n"); } else { fprintf(file, "Canary \"%S\" Failed to Launch\n", cmd_buf); } return canary_code; } #pragma warning( disable : 4127) //conditional expression is constant i.e while (FALSE) /* see utils.h for description */ BOOL run_canary_test_ex(FILE *file, /* INOUT */ CANARY_INFO *info, const WCHAR *scratch_folder, const WCHAR *canary_process) { ConfigGroup *policy, *save_policy; WCHAR exe_buf[num_canary_processes][MAX_PATH]; WCHAR log_folder[MAX_PATH]; DWORD i; BOOL autoinject_set = is_autoinjection_set(); info->canary_code = ERROR_SUCCESS; info->url = L"CFail"; info->msg = L"Canary Failed"; read_config_group(&save_policy, L_PRODUCT_NAME, TRUE); save_policy->should_clear = TRUE; read_config_group(&policy, L_PRODUCT_NAME, TRUE); policy->should_clear = TRUE; remove_children(policy); _snwprintf(log_folder, BUFFER_SIZE_ELEMENTS(log_folder), L"%s\\canary_logs", scratch_folder); NULL_TERMINATE_BUFFER(log_folder); CreateDirectory(log_folder, NULL); for (i = 0; i < num_canary_processes; i++) { _snwprintf(exe_buf[i], BUFFER_SIZE_ELEMENTS(exe_buf[i]), L"%s\\%s", scratch_folder, canary_process_names[i]); NULL_TERMINATE_BUFFER(exe_buf[i]); if (CopyFile(canary_process, exe_buf[i], FALSE) == 0) { fprintf(file, "Failed to copy canary file %S to %S\n", canary_process, exe_buf[i]); /* FIXME- continue if file exists from a previous run that didn't clean up */ info->canary_code = CANARY_UNABLE_TO_TEST; goto canary_exit; } add_config_group(policy, new_config_group(canary_process_names[i])); set_config_group_parameter(get_child(canary_process_names[i], policy), L_DYNAMORIO_VAR_RUNUNDER, L"1"); } write_config_group(policy); /* FIXME - monitor eventlog though we should still detect via forensics and/or * exit code (crash/violation). Xref 10322, for now we suppress eventlogs. */ /* FIXME - the verify injection tests need work, should just talk to canary proc. */ /* FIXME - verify canary output - necessary? not clear what action would be */ /* Files are copied, begin runs */ #define DO_RUN(run_flag, core_ops, canary_options, inject, run_name, test_type) do { \ if (TEST(run_flag, info->run_flags)) { \ WCHAR *canary_ops = TEST(run_flag, info->fault_run) ? \ info->canary_fault_args : canary_options; \ for (i = 0; i < num_canary_processes; i++) { \ int code = \ run_individual_canary_test(file, log_folder, core_ops, i, policy, \ exe_buf[i], canary_ops, inject, \ run_name, FALSE /* not early */); \ if (code >= 0 && test_type != CANARY_TEST_TYPE_NATIVE) { \ code = \ run_individual_canary_test(file, log_folder, core_ops, i, policy, \ exe_buf[i], canary_ops, inject, \ run_name, TRUE /* early inject*/); \ } \ if (code < 0) { \ if (CANARY_RUN_REQUIRES_PASS(run_flag, info->run_flags)) { \ info->canary_code = GET_CANARY_CODE(test_type, code); \ goto canary_exit; \ } \ break; /* skip remaining tests in run once first failure found */ \ } \ } \ } \ } while (FALSE) /* First the native runs. */ unset_autoinjection(); /* native info gathering run. */ DO_RUN(CANARY_RUN_NATIVE, L"", OPTIONS_CANARY_NATIVE, FALSE, "native", CANARY_TEST_TYPE_NATIVE); set_autoinjection(); /* Going to do the non-native runs now */ /* Now the -thin_client inject run */ DO_RUN(CANARY_RUN_THIN_CLIENT_INJECT, OPTIONS_THIN_CLIENT, OPTIONS_CANARY_INJECT, TRUE, "-thin_client", CANARY_TEST_TYPE_THIN_CLIENT); /* now the full -thin_client run */ DO_RUN(CANARY_RUN_THIN_CLIENT, OPTIONS_THIN_CLIENT, OPTIONS_CANARY_THIN_CLIENT, FALSE, "-thin_client", CANARY_TEST_TYPE_THIN_CLIENT); /* Now the -client run */ DO_RUN(CANARY_RUN_CLIENT, OPTIONS_CLIENT, OPTIONS_CANARY_CLIENT, FALSE, "-client", CANARY_TEST_TYPE_CLIENT); /* Now the MF run */ DO_RUN(CANARY_RUN_MF, OPTIONS_MF, OPTIONS_CANARY_MF, FALSE, "MF", CANARY_TEST_TYPE_MF); #undef DO_RUN canary_exit: if (autoinject_set) set_autoinjection(); else unset_autoinjection(); free_config_group(policy); write_config_group(save_policy); free_config_group(save_policy); fprintf(file, "Canary code 0x%08x\n", info->canary_code); if (info->canary_code >= 0) { info->url = L"ctest"; info->msg = L"Canary success"; } return (info->canary_code >= 0); } /* see utils.h for description */ BOOL run_canary_test(/* INOUT */ CANARY_INFO *info, WCHAR *version_msg) { BOOL result; DWORD res; FILE *report_file; WCHAR scratch_folder[MAX_PATH], canary_process[MAX_PATH]; const WCHAR *dynamorio_home = get_dynamorio_home(); const WCHAR *dynamorio_logdir = get_dynamorio_logdir(); _snwprintf(canary_process, BUFFER_SIZE_ELEMENTS(canary_process), L"%s\\bin\\canary.exe", dynamorio_home); NULL_TERMINATE_BUFFER(canary_process); _snwprintf(scratch_folder, BUFFER_SIZE_ELEMENTS(scratch_folder), L"%s\\canary_test", dynamorio_logdir); NULL_TERMINATE_BUFFER(scratch_folder); /* xref case 10157, let's try to make sure this stays clean */ delete_tree(scratch_folder); CreateDirectory(scratch_folder, NULL); /* FIXME - verify directory created */ /* Using get unique file name since we plan to run this more then once, * though only an issue if the caller doesn't cleanup the report file and * leaves it locked. */ get_unique_filename(dynamorio_logdir, L"canary_report", L".crep", info->buf_report, BUFFER_SIZE_ELEMENTS(info->buf_report)); info->report = info->buf_report; report_file = _wfopen(info->report, L"wb"); /* FIXME - verify file creation */ fprintf(report_file, "%S\n", version_msg == NULL ? L"unknown version" : version_msg); result = run_canary_test_ex(report_file, info, scratch_folder, canary_process); res = delete_tree(scratch_folder); fprintf(report_file, "Deleted scratch folder \"%S\", code %d\n", scratch_folder, res); fclose(report_file); return result; } #endif /* WINDOWS */ #else //ifdef UNIT_TEST int main() { set_debuglevel(DL_INFO); set_abortlevel(DL_WARN); /* read/write file */ { char *test1, *test2; char buffy[1024]; WCHAR *fn = L"utils.tst"; SIZE_T needed; BOOL changed; test1 = "This is a stupid file.\r\n\r\nDon't you think?\r\n"; test2 = "foo\r\n"; CHECKED_OPERATION(write_file_contents(fn, test1, TRUE)); DO_ASSERT(ERROR_MORE_DATA == read_file_contents(fn, NULL, 0, &needed)); DO_ASSERT(strlen(test1) + 1 == needed); CHECKED_OPERATION(read_file_contents(fn, buffy, needed, NULL)); DO_ASSERT(0 == strcmp(test1, buffy)); CHECKED_OPERATION(write_file_contents_if_different(fn, test1, &changed)); DO_ASSERT(!changed); CHECKED_OPERATION(write_file_contents_if_different(fn, test2, &changed)); DO_ASSERT(changed); CHECKED_OPERATION(read_file_contents(fn, buffy, 1024, NULL)); DO_ASSERT(0 == strcmp(test2, buffy)); } /* file existence */ { WCHAR *fn = L"tester-file"; DeleteFile(fn); DO_ASSERT(!file_exists(fn)); DO_ASSERT(!file_exists(fn)); CHECKED_OPERATION(write_file_contents(fn, "testing", TRUE)); DO_ASSERT(file_exists(fn)); DeleteFile(fn); DO_ASSERT(file_exists(L"C:\\")); DO_ASSERT(!file_exists(L"%%RY:\\\\zZsduf")); } /* mkdir_with_parents / delete_tree */ { delete_tree(L"__foo_test"); mkdir_with_parents(L"__foo_test"); DO_ASSERT(file_exists(L"__foo_test")); mkdir_with_parents(L"__foo_test\\foo\\bar\\goo"); DO_ASSERT(file_exists(L"__foo_test\\foo\\bar\\goo")); mkdir_with_parents(L"__foo_test/lib/bar/goo/dood"); DO_ASSERT(file_exists(L"__foo_test\\lib\\bar\\goo\\dood")); CHECKED_OPERATION(delete_tree(L"__foo_test")); DO_ASSERT(!file_exists(L"__foo_test")); DO_ASSERT(!file_exists(L"__foo_test\\foo\\bar\\goo")); DO_ASSERT(!file_exists(L"__foo_test\\lib\\bar\\goo\\dood")); } /* setup_installation */ { CHECKED_OPERATION(setup_installation(L"C:\\", TRUE)); CHECKED_OPERATION(setup_installation(L"C:\\foobarra", FALSE)); DO_ASSERT_WSTR_EQ(L"C:\\", get_dynamorio_home()); CHECKED_OPERATION(setup_installation(L"C:\\foobarra", TRUE)); DO_ASSERT_WSTR_EQ(L"C:\\foobarra", get_dynamorio_home()); } { WCHAR piname[MAX_PATH]; BOOL bres = using_system32_for_preinject(NULL); printf("Using SYSTEM32 for preinject: %s\n", bres ? "TRUE" : "FALSE"); CHECKED_OPERATION(get_preinject_name(piname, MAX_PATH)); printf("Preinject name: %S\n", piname); } printf("All Test Passed\n"); return 0; } #endif
1
12,797
Build on AArch64 fails because WCHAR is not defined.
DynamoRIO-dynamorio
c
@@ -20,6 +20,7 @@ import ( func TestRDWS_Template(t *testing.T) { const ( + envName = "test" manifestFileName = "rdws-manifest.yml" stackTemplateFileName = "rdws.stack.yml" )
1
// +build integration localintegration // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package stack_test import ( "io/ioutil" "path/filepath" "testing" "gopkg.in/yaml.v3" "github.com/aws/copilot-cli/internal/pkg/deploy" "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack" "github.com/aws/copilot-cli/internal/pkg/manifest" "github.com/stretchr/testify/require" ) func TestRDWS_Template(t *testing.T) { const ( manifestFileName = "rdws-manifest.yml" stackTemplateFileName = "rdws.stack.yml" ) // Read manifest. manifestBytes, err := ioutil.ReadFile(filepath.Join("testdata", "workloads", manifestFileName)) require.NoError(t, err, "read manifest file") mft, err := manifest.UnmarshalWorkload(manifestBytes) require.NoError(t, err, "unmarshal manifest file") envMft, err := mft.ApplyEnv(envName) require.NoError(t, err, "apply test env to manifest") v, ok := envMft.(*manifest.RequestDrivenWebService) require.True(t, ok) // Read wanted stack template. wantedTemplate, err := ioutil.ReadFile(filepath.Join("testdata", "workloads", stackTemplateFileName)) require.NoError(t, err, "read cloudformation stack") // Read actual stack template. serializer, err := stack.NewRequestDrivenWebService(v, envName, deploy.AppInformation{ Name: appName, }, stack.RuntimeConfig{ AccountID: "123456789123", Region: "us-west-2", }) require.NoError(t, err, "create rdws serializer") actualTemplate, err := serializer.Template() require.NoError(t, err, "get cloudformation template for rdws") // Compare the two. wanted := make(map[interface{}]interface{}) require.NoError(t, yaml.Unmarshal(wantedTemplate, wanted), "unmarshal wanted template to map[interface{}]interface{}") actual := make(map[interface{}]interface{}) require.NoError(t, yaml.Unmarshal([]byte(actualTemplate), actual), "unmarshal actual template to map[interface{}]interface{}") require.Equal(t, wanted, actual, "templates do not match") }
1
19,363
Do we use this const?
aws-copilot-cli
go
@@ -139,3 +139,9 @@ func SetCertificateRequestFailureTime(p metav1.Time) CertificateRequestModifier cr.Status.FailureTime = &p } } + +func SetAnnotations(annotations map[string]string) CertificateRequestModifier { + return func(cr *v1alpha2.CertificateRequest) { + cr.SetAnnotations(annotations) + } +}
1
/* Copyright 2019 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gen import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" ) type CertificateRequestModifier func(*v1alpha2.CertificateRequest) func CertificateRequest(name string, mods ...CertificateRequestModifier) *v1alpha2.CertificateRequest { c := &v1alpha2.CertificateRequest{ ObjectMeta: ObjectMeta(name), } for _, mod := range mods { mod(c) } return c } func CertificateRequestFrom(cr *v1alpha2.CertificateRequest, mods ...CertificateRequestModifier) *v1alpha2.CertificateRequest { cr = cr.DeepCopy() for _, mod := range mods { mod(cr) } return cr } // SetIssuer sets the CertificateRequest.spec.issuerRef field func SetCertificateRequestIssuer(o cmmeta.ObjectReference) CertificateRequestModifier { return func(c *v1alpha2.CertificateRequest) { c.Spec.IssuerRef = o } } func SetCertificateRequestCSR(csr []byte) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Spec.CSRPEM = csr } } func SetCertificateRequestIsCA(isCA bool) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Spec.IsCA = isCA } } func SetCertificateRequestDuration(duration *metav1.Duration) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Spec.Duration = duration } } func SetCertificateRequestCA(ca []byte) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Status.CA = ca } } func SetCertificateRequestCertificate(cert []byte) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Status.Certificate = cert } } func SetCertificateRequestStatusCondition(c v1alpha2.CertificateRequestCondition) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { if len(cr.Status.Conditions) == 0 { cr.Status.Conditions = []v1alpha2.CertificateRequestCondition{c} return } for i, existingC := range cr.Status.Conditions { if existingC.Type == c.Type { cr.Status.Conditions[i] = c return } } cr.Status.Conditions = append(cr.Status.Conditions, c) } } func SetCertificateRequestNamespace(namespace string) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.ObjectMeta.Namespace = namespace } } func SetCertificateRequestName(name string) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.ObjectMeta.Name = name } } func SetCertificateRequestKeyUsages(usages ...v1alpha2.KeyUsage) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Spec.Usages = usages } } func AddCertificateRequestAnnotations(annotations map[string]string) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { // Make sure to do a merge here with new annotations overriding. annotationsNew := cr.GetAnnotations() if annotationsNew == nil { annotationsNew = make(map[string]string) } for k, v := range annotations { annotationsNew[k] = v } cr.SetAnnotations(annotationsNew) } } func SetCertificateRequestAnnotations(annotations map[string]string) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.SetAnnotations(annotations) } } func SetCertificateRequestFailureTime(p metav1.Time) CertificateRequestModifier { return func(cr *v1alpha2.CertificateRequest) { cr.Status.FailureTime = &p } }
1
20,330
I think this may need to be `SetCertificateRequestAnnotations` as it returns a `CertificateRequestModifier`
jetstack-cert-manager
go
@@ -234,6 +234,7 @@ class User < ActiveRecord::Base # # Returns UserIdentifier def identifier_for(scheme) + scheme = scheme.instance_of?(IdentifierScheme) ? scheme.name : scheme identifiers.by_scheme_name(scheme, "User").first end
1
# frozen_string_literal: true # == Schema Information # # Table name: users # # id :integer not null, primary key # accept_terms :boolean # active :boolean default(TRUE) # api_token :string # confirmation_sent_at :datetime # confirmation_token :string # confirmed_at :datetime # current_sign_in_at :datetime # current_sign_in_ip :string # email :string(80) default(""), not null # encrypted_password :string # firstname :string # invitation_accepted_at :datetime # invitation_created_at :datetime # invitation_sent_at :datetime # invitation_token :string # invited_by_type :string # last_sign_in_at :datetime # last_sign_in_ip :string # ldap_password :string # ldap_username :string # other_organisation :string # recovery_email :string # remember_created_at :datetime # reset_password_sent_at :datetime # reset_password_token :string # sign_in_count :integer default(0) # surname :string # created_at :datetime not null # updated_at :datetime not null # department_id :integer # invited_by_id :integer # language_id :integer # org_id :integer # # Indexes # # fk_rails_45f4f12508 (language_id) # fk_rails_f29bf9cdf2 (department_id) # index_users_on_email (email) # index_users_on_org_id (org_id) # # Foreign Keys # # fk_rails_... (department_id => departments.id) # fk_rails_... (language_id => languages.id) # fk_rails_... (org_id => orgs.id) # class User < ActiveRecord::Base include ConditionalUserMailer include ValidationMessages include ValidationValues include DateRangeable include Identifiable extend UniqueRandom ## # Devise # Include default devise modules. Others available are: # :token_authenticatable, :confirmable, # :lockable, :timeoutable and :omniauthable devise :invitable, :database_authenticatable, :registerable, :recoverable, :rememberable, :trackable, :validatable, :omniauthable, omniauth_providers: [:shibboleth, :orcid] ## # User Notification Preferences serialize :prefs, Hash # ================ # = Associations = # ================ has_and_belongs_to_many :perms, join_table: :users_perms belongs_to :language belongs_to :org belongs_to :department, required: false has_one :pref has_many :answers has_many :notes has_many :exported_plans has_many :roles, dependent: :destroy has_many :plans, through: :roles has_and_belongs_to_many :notifications, dependent: :destroy, join_table: "notification_acknowledgements" # =============== # = Validations = # =============== validates :active, inclusion: { in: BOOLEAN_VALUES, message: INCLUSION_MESSAGE } validates :firstname, presence: { message: PRESENCE_MESSAGE } validates :surname, presence: { message: PRESENCE_MESSAGE } validates :org, presence: { message: PRESENCE_MESSAGE } # ========== # = Scopes = # ========== default_scope { includes(:org, :perms) } # Retrieves all of the org_admins for the specified org scope :org_admins, -> (org_id) { joins(:perms).where("users.org_id = ? AND perms.name IN (?) AND " + "users.active = ?", org_id, ["grant_permissions", "modify_templates", "modify_guidance", "change_org_details"], true) } scope :search, -> (term) { if date_range?(term: term) by_date_range(:created_at, term) else search_pattern = "%#{term}%" # MySQL does not support standard string concatenation and since concat_ws # or concat functions do not exist for sqlite, we have to come up with this # conditional if ActiveRecord::Base.connection.adapter_name == "Mysql2" where("lower(concat_ws(' ', firstname, surname)) LIKE lower(?) OR " + "lower(email) LIKE lower(?)", search_pattern, search_pattern) else joins(:org) .where("lower(firstname || ' ' || surname) LIKE lower(:search_pattern) OR lower(email) LIKE lower(:search_pattern) OR lower(orgs.name) LIKE lower (:search_pattern) OR lower(orgs.abbreviation) LIKE lower (:search_pattern) ", search_pattern: search_pattern) end end } # ============= # = Callbacks = # ============= before_update :clear_other_organisation, if: :org_id_changed? before_update :clear_department_id, if: :org_id_changed? after_update :delete_perms!, if: :org_id_changed?, unless: :can_change_org? after_update :remove_token!, if: :org_id_changed?, unless: :can_change_org? # ================= # = Class methods = # ================= ## # Load the user based on the scheme and id provided by the Omniauth call def self.from_omniauth(auth) Identifier.by_scheme_name(auth.provider.downcase, "User") .where(value: auth.uid) .first&.identifiable end def self.to_csv(users) User::AtCsv.new(users).to_csv end # =========================== # = Public instance methods = # =========================== # This method uses Devise's built-in handling for inactive users # # Returns Boolean def active_for_authentication? super && active? end # EVALUATE CLASS AND INSTANCE METHODS BELOW # # What do they do? do they do it efficiently, and do we need them? # Determines the locale set for the user or the organisation he/she belongs # # Returns String # Returns nil def get_locale if !self.language.nil? self.language.abbreviation elsif !self.org.nil? self.org.get_locale else nil end end # Gives either the name of the user, or the email if name unspecified # # user_email - Use the email if there is no firstname or surname (defaults: true) # # Returns String def name(use_email = true) if (firstname.blank? && surname.blank?) || use_email then email else name = "#{firstname} #{surname}" name.strip end end # The user's identifier for the specified scheme name # # scheme - The identifier scheme name (e.g. ORCID) # # Returns UserIdentifier def identifier_for(scheme) identifiers.by_scheme_name(scheme, "User").first end # Checks if the user is a super admin. If the user has any privelege which requires # them to see the super admin page then they are a super admin. # # Returns Boolean def can_super_admin? self.can_add_orgs? || self.can_grant_api_to_orgs? || self.can_change_org? end # Checks if the user is an organisation admin if the user has any privlege which # requires them to see the org-admin pages then they are an org admin. # # Returns Boolean def can_org_admin? return true if can_super_admin? # Automatically false if the user has no Org or the Org is not managed return false unless org.present? && org.managed? can_grant_permissions? || can_modify_guidance? || can_modify_templates? || can_modify_org_details? || can_review_plans? end # Can the User add new organisations? # # Returns Boolean def can_add_orgs? perms.include? Perm.add_orgs end # Can the User change their organisation affiliations? # # Returns Boolean def can_change_org? perms.include? Perm.change_affiliation end # Can the User can grant their permissions to others? # # Returns Boolean def can_grant_permissions? perms.include? Perm.grant_permissions end # Can the User modify organisation templates? # # Returns Boolean def can_modify_templates? self.perms.include? Perm.modify_templates end # Can the User modify organisation guidance? # # Returns Boolean def can_modify_guidance? perms.include? Perm.modify_guidance end # Can the User use the API? # # Returns Boolean def can_use_api? perms.include? Perm.use_api end # Can the User modify their org's details? # # Returns Boolean def can_modify_org_details? perms.include? Perm.change_org_details end ## # Can the User grant the api to organisations? # # Returns Boolean def can_grant_api_to_orgs? perms.include? Perm.grant_api end ## # Can the user review their organisation's plans? # # Returns Boolean def can_review_plans? perms.include? Perm.review_plans end # Removes the api_token from the user # # Returns nil # Returns Boolean def remove_token! return if new_record? update_column(:api_token, nil) end # Generates a new token for the user unless the user already has a token. # # Returns nil # Returns Boolean def keep_or_generate_token! if api_token.nil? || api_token.empty? new_token = User.unique_random(field_name: 'api_token') update_column(:api_token, new_token) unless new_record? end end # The User's preferences for a given base key # # Returns Hash def get_preferences(key) defaults = Pref.default_settings[key.to_sym] || Pref.default_settings[key.to_s] if pref.present? existing = pref.settings[key.to_s].deep_symbolize_keys # Check for new preferences defaults.keys.each do |grp| defaults[grp].keys.each do |pref, v| # If the group isn't present in the saved values add all of it's preferences existing[grp] = defaults[grp] if existing[grp].nil? # If the preference isn't present in the saved values add the default existing[grp][pref] = defaults[grp][pref] if existing[grp][pref].nil? end end existing else defaults end end # Override devise_invitable email title def deliver_invitation(options = {}) super(options.merge(subject: _("A Data Management Plan in " + "%{application_name} has been shared with you") % { application_name: Rails.configuration.branding[:application][:name] }) ) end # Case insensitive search over User model # # field - The name of the field being queried # val - The String to search for, case insensitive. val is duck typed to check # whether or not downcase method exist. # # Returns ActiveRecord::Relation # Raises ArgumentError def self.where_case_insensitive(field, val) unless columns.map(&:name).include?(field.to_s) raise ArgumentError, "Field #{field} is not present on users table" end User.where("LOWER(#{field}) = :value", value: val.to_s.downcase) end # Acknowledge a Notification # # notification - Notification to acknowledge # # Returns ActiveRecord::Associations::CollectionProxy # Returns nil def acknowledge(notification) notifications << notification if notification.dismissable? end # remove personal data from the user account and save # leave account in-place, with org for statistics (until we refactor those) # # Returns boolean def archive self.firstname = 'Deleted' self.surname = 'User' self.email = User.unique_random(field_name: 'email', prefix: 'user_', suffix: Rails.configuration.branding[:application].fetch(:archived_accounts_email_suffix, '@example.org'), length: 5) self.recovery_email = nil self.api_token = nil self.encrypted_password = nil self.last_sign_in_ip = nil self.current_sign_in_ip = nil self.active = false return self.save end def merge(to_be_merged) scheme_ids = identifiers.pluck(:identifier_scheme_id) # merge logic # => answers -> map id to_be_merged.answers.update_all(user_id: id) # => notes -> map id to_be_merged.notes.update_all(user_id: id) # => plans -> map on id roles to_be_merged.roles.update_all(user_id: id) # => prefs -> Keep's from self # => auths -> map onto keep id only if keep does not have the identifier to_be_merged.identifiers .where.not(identifier_scheme_id: scheme_ids) .update_all(user_id: id) # => ignore any perms the deleted user has to_be_merged.destroy end private # ============================ # = Private instance methods = # ============================ def delete_perms! perms.destroy_all end def clear_other_organisation self.other_organisation = nil end def clear_department_id self.department_id = nil end end
1
19,128
it would likely be better to do this in the Identifier.by_scheme_name method itself.
DMPRoadmap-roadmap
rb
@@ -129,6 +129,10 @@ class DatasetContext extends RawDKANEntityContext { } switch ($orderby) { + case 'Date created': + $orderby = 'created'; + break; + case 'Date changed': $orderby = 'changed'; break;
1
<?php namespace Drupal\DKANExtension\Context; use Behat\Behat\Hook\Scope\BeforeScenarioScope; use Behat\Gherkin\Node\TableNode; use SearchApiQuery; /** * Defines application features from the specific context. */ class DatasetContext extends RawDKANEntityContext { use ModeratorTrait; /** * */ public function __construct($fields, $labels = array(), $sets = array(), $defaults = array()) { $this->datasetFieldLabels = $labels['labels']; $this->datasetFieldSets = $sets['sets']; $this->datasetFieldDefaults = $defaults['defaults']; parent::__construct( 'node', 'dataset', $fields['fields'], array( 'moderation', 'moderation_date', ) ); } /** * @BeforeScenario */ public function gatherContexts(BeforeScenarioScope $scope) { parent::gatherContexts($scope); $environment = $scope->getEnvironment(); $this->groupContext = $environment->getContext('Drupal\DKANExtension\Context\GroupContext'); $this->dkanContext = $environment->getContext('Drupal\DKANExtension\Context\DKANContext'); } /** * Creates datasets from a table. * * @Given datasets: */ public function addDatasets(TableNode $datasetsTable) { parent::addMultipleFromTable($datasetsTable); } /** * Looks for a dataset in the dataset view with the given name on the current page. * * @Then I should see a dataset called :text * * @throws \Exception * If region or text within it cannot be found. */ public function iShouldSeeADatasetCalled($text) { $session = $this->getSession(); $page = $session->getPage(); $search_region = $page->find('css', '.view-dkan-datasets'); $search_results = $search_region->findAll('css', '.views-row'); $found = FALSE; foreach ($search_results as $search_result) { $title = $search_result->find('css', 'h2'); if ($title->getText() === $text) { $found = TRUE; } } if (!$found) { throw new \Exception(sprintf("The text '%s' was not found", $text)); } } /** * @Then The dataset :title is in :state moderation state */ public function theDatasetIsInModerationState($title, $state) { $node = reset($this->getNodeByTitle($title)); if (!$node) { throw new \Exception(sprintf($title . " node not found.")); } $this->isNodeInModerationState($node, $state); } /** * */ public function pre_save($wrapper, $fields) { $this->preSaveModerate($wrapper, $fields); parent::pre_save($wrapper, $fields); } /** * */ public function post_save($wrapper, $fields) { parent::post_save($wrapper, $fields); $this->moderate($wrapper, $fields); } /** * @Then I should see the local preview link */ public function iShouldSeeTheLocalPreviewLink() { $this->assertSession()->pageTextContains(variable_get('dkan_dataset_teaser_preview_label', '') . ' ' . t('Preview')); } /** * @Given I should see the first :number dataset items in :orderby :sortdirection order. */ public function iShouldSeeTheFirstDatasetListInOrder($number, $orderby, $sortdirection) { $number = (int) $number; // Search the list of datasets actually on the page (up to $number items) $dataset_list = array(); $count = 0; while (($count < $number) && ($row = $this->getSession()->getPage()->find('css', '.views-row-' . ($count + 1))) !== NULL) { $row = $row->find('css', 'h2'); $dataset_list[] = $row->getText(); $count++; } if ($count !== $number) { throw new \Exception("Couldn't find $number datasets on the page. Found $count."); } switch ($orderby) { case 'Date changed': $orderby = 'changed'; break; case 'Title': $orderby = 'title'; break; default: throw new \Exception("Ordering by '$orderby' is not supported by this step."); } $index = search_api_index_load('datasets'); $query = new SearchApiQuery($index); $results = $query->condition('type', 'dataset') ->condition('status', '1') ->sort($orderby, strtoupper($sortdirection)) ->range(0, $number) ->execute(); $count = count($results['results']); if (count($results['results']) !== $number) { throw new \Exception("Couldn't find $number datasets in the database. Found $count."); } foreach ($results['results'] as $nid => $result) { $dataset = node_load($nid); $found_title = array_shift($dataset_list); // Drupal removes extra spacing on titles somehow so reproducing here. $title = trim($dataset->title); if ($found_title !== $title) { throw new \Exception("Does not match order of list, $found_title was next on page but expected $dataset->title"); } } } /** * @Given /^I add a Dataset Filtered List$/ */ public function iAddADatasetFilteredList() { $add_button = $this->getXPathElement("//fieldset[@class='widget-preview panel panel-default'][3]//a"); $add_button->click(); } /** * @When I empty the resources field :locator * * Empty the 'Resources' autocomplete field on a Dataset form. */ public function iEmptyTheResourcesField($locator) { $session = $this->getSession(); $page = $session->getPage(); $field = $page->find('xpath', '//div[@id="' . $locator . '"]'); if ($field === NULL) { throw new \InvalidArgumentException(sprintf('Cannot find chosen field: "%s"', $locator)); } $field_choices = $field->findAll('css', '.chosen-choices .search-choice'); foreach ($field_choices as $field_choice) { $remove_button = $field_choice->find('css', '.search-choice-close'); if ($remove_button) { $remove_button->click(); } } } /** * @Then I should see all published datasets */ public function iShouldSeeAllPublishedDatasets() { $session = $this->getSession(); $page = $session->getPage(); $search_region = $page->find('css', '.view-dkan-datasets'); $search_results = $search_region->findAll('css', '.view-header'); $index = search_api_index_load('datasets'); $query = new SearchApiQuery($index); $results = $query->condition('type', 'dataset') ->condition('status', '1') ->execute(); $total = count($results['results']); $text = $total . " results"; foreach ($search_results as $search_result) { $found = $search_result->getText(); } if ($found !== $text) { throw new \Exception("Found $found in the page but total is $total."); } } /** * @Then I should see all published search content */ public function iShouldSeeAllPublishedSearchContent(){ $session = $this->getSession(); $page = $session->getPage(); $search_region = $page->find('css', '.view-dkan-datasets'); $search_results = $search_region->findAll('css', '.view-header'); $indices = array('datasets'); $indexes = search_api_index_load_multiple($indices); $results = array(); foreach ($indexes as $index) { $query = new SearchApiQuery($index); $result = $query->condition('status', '1') ->execute(); $results[] = $result; } $total = 0; foreach ($results as $result) { $total = $total + count($result['results']); } $text = $total . " results"; foreach ($search_results as $search_result) { $found = $search_result->getText(); } if ($found !== $text) { throw new \Exception("Found $found in the page but total is $total."); } } /** * @Then I should see all the dataset fields in the form */ public function iShouldSeeAllTheDatasetFieldsInTheForm() { $form_css_selector = 'form#dataset-node-form'; // We could use field_info_instances() to get the list of fields for the 'dataset' content // type but that would not cover the case where a field is removed accidentally. $dataset_fields = $this->datasetFieldLabels; $dataset_fieldsets = $this->datasetFieldSets; // Get all available form fields. // Searching by the Label as a text on the page is not enough since a text like 'Resources' // could appear because other reasons. $session = $this->getSession(); $page = $session->getPage(); $form_region = $page->find('css', $form_css_selector); $form_fieldset_elements = $form_region->findAll('css', 'fieldset div.fieldset-legend'); // Clean found fieldsets. Some of them are empty values. $available_form_fieldsets = array(); foreach ($form_fieldset_elements as $form_fieldset_element) { $label = $form_fieldset_element->getText(); if (!empty($label)) { $available_form_fieldsets[] = $label; } } $query_script = "jQuery('.form-item label', jQuery('$form_css_selector')) .map(function(){ return jQuery(this).text().trim(); })"; $available_form_fields = $session->evaluateScript($query_script); foreach ($dataset_fields as $key => $field_name) { // Add way for sites to skip specific fields. if (empty($field_name)) { continue; } if (!in_array($field_name, $available_form_fields)) { throw new \Exception("Field $field_name was not found in the form with CSS selector '$form_css_selector'"); } } // Check that all form fieldsets are present. foreach ($dataset_fieldsets as $key => $fieldset_name) { if (empty($fieldset_name)) { continue; } if (!in_array($fieldset_name, $available_form_fieldsets)) { throw new \Exception("Field set $fieldset_name was not found in the form with CSS selector '$form_css_selector'"); } } } /** * @Given I :operation the :option on DKAN Dataset Forms */ public function iTheOnDkanDatasetForms($operation, $option) { $enabled = 0; if ($operation === "enable") { $enabled = 1; } switch ($option) { case 'Strict POD validation': variable_set('dkan_dataset_form_pod_validation', $enabled); break; case 'Groups validation': variable_set('dkan_dataset_form_group_validation', $enabled); break; default: break; } } /** * @Then I should see the :option groups option */ public function iShouldSeeTheGroupsOption($option) { $element = $this->find_select_option('og_group_ref[und][]', $option); if (!$element) { throw new \Exception(sprintf('The %s option could not be found.', $option)); } } /** * @Then I should not see the :option groups option */ public function iShouldNotSeeTheGroupsOption($option) { $element = $this->find_select_option('og_group_ref[und][]', $option); if ($element) { throw new \Exception(sprintf('The %s option was found.', $option)); } } /** * Helper function to search for an option element inside a select element. */ private function find_select_option($select_name, $option) { $session = $this->getSession(); $xpath = "//select[@name='" . $select_name . "']//option[text()='" . $option . "']"; return $session->getPage()->find('xpath', $session->getSelectorsHandler()->selectorToXpath('xpath', $xpath)); } }
1
20,604
@janette looking at this test code, I'm pretty sure that it's going to give a false positive. Where is it actually checking the contents of the first four datasets against expected values? All the assertion at the end of the test seems to check is that at least four datasets exist.
GetDKAN-dkan
php
@@ -68,6 +68,14 @@ module.exports = function (defaults) { optional: ['es6.spec.symbols'], includePolyfill: true }, + 'ember-service-worker': { + rootUrl: '/ghost/' + }, + 'esw-cache-fallback': { + patterns: [ + '/ghost/api/(.+)' + ], + }, outputPaths: { app: { js: assetLocation('ghost.js')
1
/* eslint-disable */ /* global require, module */ var EmberApp = require('ember-cli/lib/broccoli/ember-app'), concat = require('broccoli-concat'), mergeTrees = require('broccoli-merge-trees'), uglify = require('broccoli-uglify-js'), cleanCSS = require('broccoli-clean-css'), environment = EmberApp.env(), isProduction = environment === 'production', mythCompress = isProduction || environment === 'test', disabled = {enabled: false}, assetLocation, codemirrorAssets; assetLocation = function (fileName) { if (isProduction) { fileName = fileName.replace('.', '.min.'); } return '/assets/' + fileName; }; codemirrorAssets = function () { var codemirrorFiles = [ 'lib/codemirror.css', 'theme/xq-light.css', 'lib/codemirror.js', 'mode/htmlmixed/htmlmixed.js', 'mode/xml/xml.js', 'mode/css/css.js', 'mode/javascript/javascript.js' ]; if (environment === 'test') { return {import: codemirrorFiles}; } return { public: { include: codemirrorFiles, destDir: '/', processTree: function (tree) { var jsTree = concat(tree, { outputFile: 'assets/codemirror/codemirror.js', headerFiles: ['lib/codemirror.js'], inputFiles: ['mode/**/*'], sourceMapConfig: {enabled: false} }); var cssTree = concat(tree, { outputFile: 'assets/codemirror/codemirror.css', inputFiles: ['**/*.css'] }); if (isProduction) { jsTree = uglify(jsTree); cssTree = cleanCSS(cssTree); } return mergeTrees([jsTree, cssTree]); } } }; }; module.exports = function (defaults) { var app = new EmberApp(defaults, { "ember-cli-babel": { optional: ['es6.spec.symbols'], includePolyfill: true }, outputPaths: { app: { js: assetLocation('ghost.js') }, vendor: { js: assetLocation('vendor.js'), css: assetLocation('vendor.css') } }, mythOptions: { source: './app/styles/app.css', inputFile: 'app.css', browsers: 'last 2 versions', // @TODO: enable sourcemaps for development without including them in the release sourcemap: false, compress: mythCompress, outputFile: isProduction ? 'ghost.min.css' : 'ghost.css' }, fingerprint: disabled, nodeAssets: { 'blueimp-md5': { import: ['js/md5.js'] }, codemirror: codemirrorAssets(), 'jquery-deparam': { import: ['jquery-deparam.js'] }, moment: { import: ['moment.js'] }, 'moment-timezone': { import: ['builds/moment-timezone-with-data.js'] }, 'password-generator': { import: ['lib/password-generator.js'] } }, 'ember-cli-selectize': { theme: false } }); // 'dem Scripts app.import('bower_components/validator-js/validator.js'); app.import('bower_components/rangyinputs/rangyinputs-jquery-src.js'); app.import('bower_components/showdown-ghost/src/showdown.js'); app.import('bower_components/showdown-ghost/src/extensions/ghostgfm.js'); app.import('bower_components/showdown-ghost/src/extensions/ghostimagepreview.js'); app.import('bower_components/showdown-ghost/src/extensions/footnotes.js'); app.import('bower_components/showdown-ghost/src/extensions/highlight.js'); app.import('bower_components/keymaster/keymaster.js'); app.import('bower_components/devicejs/lib/device.js'); // jquery-ui partial build app.import('bower_components/jquery-ui/ui/core.js'); app.import('bower_components/jquery-ui/ui/widget.js'); app.import('bower_components/jquery-ui/ui/mouse.js'); app.import('bower_components/jquery-ui/ui/draggable.js'); app.import('bower_components/jquery-ui/ui/droppable.js'); app.import('bower_components/jquery-ui/ui/sortable.js'); app.import('bower_components/jquery-file-upload/js/jquery.fileupload.js'); app.import('bower_components/blueimp-load-image/js/load-image.all.min.js'); app.import('bower_components/jquery-file-upload/js/jquery.fileupload-process.js'); app.import('bower_components/jquery-file-upload/js/jquery.fileupload-image.js'); app.import('bower_components/google-caja/html-css-sanitizer-bundle.js'); app.import('bower_components/jqueryui-touch-punch/jquery.ui.touch-punch.js'); if (app.env === 'test') { app.import(app.bowerDirectory + '/jquery.simulate.drag-sortable/jquery.simulate.drag-sortable.js', {type: 'test'}); } return app.toTree(); };
1
7,844
Would this break things if Ghost is run in a subdirectory or no?
TryGhost-Admin
js
@@ -24,7 +24,11 @@ class ProposalUpdateRecorder end def update_comment_format(key) - "#{bullet}*#{property_name(key)}* was changed " + former_value(key) + "to #{new_value(key)}" + if key !~ /id/ + "#{bullet}*#{property_name(key)}* was changed " + former_value(key) + "to #{new_value(key)}" + else + "#{bullet}*#{association_name(key)}* was changed " + former_association_value(key) + "to #{new_association_value(key)}" + end end def bullet
1
class ProposalUpdateRecorder include ValueHelper def initialize(client_data) @client_data = client_data end def run comment_texts = changed_attributes.map do |key, _value| update_comment_format(key) end if comment_texts.any? create_comment(comment_texts) end end private attr_accessor :client_data def changed_attributes @changed_attributes ||= client_data.changed_attributes.except(:updated_at) end def update_comment_format(key) "#{bullet}*#{property_name(key)}* was changed " + former_value(key) + "to #{new_value(key)}" end def bullet if changed_attributes.length > 1 "- " else "" end end def property_name(key) client_data.class.human_attribute_name(key) end def former_value(key) value = property_to_s(client_data.send(key + "_was")) if value.present? "from #{value} " else "" end end def new_value(key) value = property_to_s(client_data[key]) if value.empty? "*empty*" else value end end def create_comment(comment_texts) if client_data.approved? comment_texts << "_Modified post-approval_" end proposal.comments.create( comment_text: comment_texts.join("\n"), update_comment: true, user: client_data.modifier || client_data.requester ) end def proposal client_data.proposal end end
1
16,312
maybe switch the order in order to make this a positive assertion instead of a negative one? i.e. `if key =~ /id/` first.
18F-C2
rb
@@ -20,6 +20,7 @@ from selenium.common.exceptions import NoSuchFrameException from selenium.common.exceptions import StaleElementReferenceException from selenium.common.exceptions import WebDriverException from selenium.common.exceptions import NoAlertPresentException +from selenium.common.exceptions import ElementNotVisibleException """ * Canned "Expected Conditions" which are generally useful within webdriver
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoSuchFrameException from selenium.common.exceptions import StaleElementReferenceException from selenium.common.exceptions import WebDriverException from selenium.common.exceptions import NoAlertPresentException """ * Canned "Expected Conditions" which are generally useful within webdriver * tests. """ class title_is(object): """An expectation for checking the title of a page. title is the expected title, which must be an exact match returns True if the title matches, false otherwise.""" def __init__(self, title): self.title = title def __call__(self, driver): return self.title == driver.title class title_contains(object): """ An expectation for checking that the title contains a case-sensitive substring. title is the fragment of title expected returns True when the title matches, False otherwise """ def __init__(self, title): self.title = title def __call__(self, driver): return self.title in driver.title class presence_of_element_located(object): """ An expectation for checking that an element is present on the DOM of a page. This does not necessarily mean that the element is visible. locator - used to find the element returns the WebElement once it is located """ def __init__(self, locator): self.locator = locator def __call__(self, driver): return _find_element(driver, self.locator) class visibility_of_element_located(object): """ An expectation for checking that an element is present on the DOM of a page and visible. Visibility means that the element is not only displayed but also has a height and width that is greater than 0. locator - used to find the element returns the WebElement once it is located and visible """ def __init__(self, locator): self.locator = locator def __call__(self, driver): try: return _element_if_visible(_find_element(driver, self.locator)) except StaleElementReferenceException: return False class visibility_of(object): """ An expectation for checking that an element, known to be present on the DOM of a page, is visible. Visibility means that the element is not only displayed but also has a height and width that is greater than 0. element is the WebElement returns the (same) WebElement once it is visible """ def __init__(self, element): self.element = element def __call__(self, ignored): return _element_if_visible(self.element) def _element_if_visible(element, visibility=True): return element if element.is_displayed() == visibility else False class presence_of_all_elements_located(object): """ An expectation for checking that there is at least one element present on a web page. locator is used to find the element returns the list of WebElements once they are located """ def __init__(self, locator): self.locator = locator def __call__(self, driver): return _find_elements(driver, self.locator) class visibility_of_any_elements_located(object): """ An expectation for checking that there is at least one element visible on a web page. locator is used to find the element returns the list of WebElements once they are located """ def __init__(self, locator): self.locator = locator def __call__(self, driver): return [element for element in _find_elements(driver, self.locator) if _element_if_visible(element)] class text_to_be_present_in_element(object): """ An expectation for checking if the given text is present in the specified element. locator, text """ def __init__(self, locator, text_): self.locator = locator self.text = text_ def __call__(self, driver): try: element_text = _find_element(driver, self.locator).text return self.text in element_text except StaleElementReferenceException: return False class text_to_be_present_in_element_value(object): """ An expectation for checking if the given text is present in the element's locator, text """ def __init__(self, locator, text_): self.locator = locator self.text = text_ def __call__(self, driver): try: element_text = _find_element(driver, self.locator).get_attribute("value") if element_text: return self.text in element_text else: return False except StaleElementReferenceException: return False class frame_to_be_available_and_switch_to_it(object): """ An expectation for checking whether the given frame is available to switch to. If the frame is available it switches the given driver to the specified frame. """ def __init__(self, locator): self.frame_locator = locator def __call__(self, driver): try: if isinstance(self.frame_locator, tuple): driver.switch_to.frame(_find_element(driver, self.frame_locator)) else: driver.switch_to.frame(self.frame_locator) return True except NoSuchFrameException: return False class invisibility_of_element_located(object): """ An Expectation for checking that an element is either invisible or not present on the DOM. locator used to find the element """ def __init__(self, locator): self.locator = locator def __call__(self, driver): try: return _element_if_visible(_find_element(driver, self.locator), False) except (NoSuchElementException, StaleElementReferenceException): # In the case of NoSuchElement, returns true because the element is # not present in DOM. The try block checks if the element is present # but is invisible. # In the case of StaleElementReference, returns true because stale # element reference implies that element is no longer visible. return True class element_to_be_clickable(object): """ An Expectation for checking an element is visible and enabled such that you can click it.""" def __init__(self, locator): self.locator = locator def __call__(self, driver): element = visibility_of_element_located(self.locator)(driver) if element and element.is_enabled(): return element else: return False class staleness_of(object): """ Wait until an element is no longer attached to the DOM. element is the element to wait for. returns False if the element is still attached to the DOM, true otherwise. """ def __init__(self, element): self.element = element def __call__(self, ignored): try: # Calling any method forces a staleness check self.element.is_enabled() return False except StaleElementReferenceException: return True class element_to_be_selected(object): """ An expectation for checking the selection is selected. element is WebElement object """ def __init__(self, element): self.element = element def __call__(self, ignored): return self.element.is_selected() class element_located_to_be_selected(object): """An expectation for the element to be located is selected. locator is a tuple of (by, path)""" def __init__(self, locator): self.locator = locator def __call__(self, driver): return _find_element(driver, self.locator).is_selected() class element_selection_state_to_be(object): """ An expectation for checking if the given element is selected. element is WebElement object is_selected is a Boolean." """ def __init__(self, element, is_selected): self.element = element self.is_selected = is_selected def __call__(self, ignored): return self.element.is_selected() == self.is_selected class element_located_selection_state_to_be(object): """ An expectation to locate an element and check if the selection state specified is in that state. locator is a tuple of (by, path) is_selected is a boolean """ def __init__(self, locator, is_selected): self.locator = locator self.is_selected = is_selected def __call__(self, driver): try: element = _find_element(driver, self.locator) return element.is_selected() == self.is_selected except StaleElementReferenceException: return False class number_of_windows_to_be(object): """ An expectation for the number of windows to be a certain value.""" def __init__(self, num_windows): self.num_windows = num_windows def __call__(self, driver): return len(driver.window_handles) == self.num_windows class new_window_is_opened(object): """ An expectation that a new window will be opened and have the number of windows handles increase""" def __init__(self, current_handles): self.current_handles = current_handles def __call__(self, driver): return len(driver.window_handles) > len(self.current_handles) class alert_is_present(object): """ Expect an alert to be present.""" def __init__(self): pass def __call__(self, driver): try: alert = driver.switch_to.alert alert.text return alert except NoAlertPresentException: return False def _find_element(driver, by): """Looks up an element. Logs and re-raises ``WebDriverException`` if thrown.""" try: return driver.find_element(*by) except NoSuchElementException as e: raise e except WebDriverException as e: raise e def _find_elements(driver, by): try: return driver.find_elements(*by) except WebDriverException as e: raise e
1
14,271
Is this an oversight for some code in the body?
SeleniumHQ-selenium
py
@@ -922,6 +922,8 @@ public class AddProductActivity extends AppCompatActivity { toast.show(); mOfflineSavedProductDao.deleteInTx(mOfflineSavedProductDao.queryBuilder().where(OfflineSavedProductDao.Properties.Barcode.eq(code)).list()); Intent intent = new Intent(); + state.setProduct(mProduct); + intent.putExtra("state", state); intent.putExtra("uploadedToServer", true); setResult(RESULT_OK, intent); finish();
1
package openfoodfacts.github.scrachx.openfood.views; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import android.os.Build; import android.os.Bundle; import android.support.design.widget.Snackbar; import android.support.v4.app.ActivityOptionsCompat; import android.support.v4.view.ViewPager; import android.support.v7.app.ActionBar; import android.support.v7.app.AppCompatActivity; import android.util.Log; import android.view.Gravity; import android.view.MenuItem; import android.view.View; import android.widget.ImageView; import android.widget.ProgressBar; import android.widget.TextView; import android.widget.Toast; import com.afollestad.materialdialogs.MaterialDialog; import com.fasterxml.jackson.databind.JsonNode; import com.squareup.picasso.Callback; import com.squareup.picasso.Picasso; import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.Map; import javax.inject.Inject; import butterknife.BindView; import butterknife.ButterKnife; import butterknife.OnClick; import butterknife.OnPageChange; import io.reactivex.SingleObserver; import io.reactivex.android.schedulers.AndroidSchedulers; import io.reactivex.disposables.Disposable; import io.reactivex.schedulers.Schedulers; import okhttp3.MediaType; import okhttp3.RequestBody; import openfoodfacts.github.scrachx.openfood.BuildConfig; import openfoodfacts.github.scrachx.openfood.R; import openfoodfacts.github.scrachx.openfood.fragments.AddProductIngredientsFragment; import openfoodfacts.github.scrachx.openfood.fragments.AddProductNutritionFactsFragment; import openfoodfacts.github.scrachx.openfood.fragments.AddProductOverviewFragment; import openfoodfacts.github.scrachx.openfood.fragments.AddProductPhotosFragment; import openfoodfacts.github.scrachx.openfood.models.OfflineSavedProduct; import openfoodfacts.github.scrachx.openfood.models.OfflineSavedProductDao; import openfoodfacts.github.scrachx.openfood.models.Product; import openfoodfacts.github.scrachx.openfood.models.ProductImage; import openfoodfacts.github.scrachx.openfood.models.ProductImageField; import openfoodfacts.github.scrachx.openfood.models.State; import openfoodfacts.github.scrachx.openfood.models.ToUploadProduct; import openfoodfacts.github.scrachx.openfood.models.ToUploadProductDao; import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService; import openfoodfacts.github.scrachx.openfood.utils.Utils; import openfoodfacts.github.scrachx.openfood.views.adapters.ProductFragmentPagerAdapter; import static openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService.PRODUCT_API_COMMENT; import static openfoodfacts.github.scrachx.openfood.utils.Utils.isExternalStorageWritable; public class AddProductActivity extends AppCompatActivity { private final Map<String, String> productDetails = new HashMap<>(); @Inject OpenFoodAPIService client; @BindView(R.id.overview_indicator) View overviewIndicator; @BindView(R.id.ingredients_indicator) View ingredientsIndicator; @BindView(R.id.nutrition_facts_indicator) View nutritionFactsIndicator; @BindView(R.id.text_nutrition_facts_indicator) TextView nutritionFactsIndicatorText; @BindView(R.id.viewpager) ViewPager viewPager; AddProductOverviewFragment addProductOverviewFragment = new AddProductOverviewFragment(); AddProductIngredientsFragment addProductIngredientsFragment = new AddProductIngredientsFragment(); AddProductNutritionFactsFragment addProductNutritionFactsFragment = new AddProductNutritionFactsFragment(); AddProductPhotosFragment addProductPhotosFragment = new AddProductPhotosFragment(); private Product mProduct; private ToUploadProductDao mToUploadProductDao; private OfflineSavedProductDao mOfflineSavedProductDao; private Disposable disposable; private String[] imagesFilePath = new String[3]; private OfflineSavedProduct offlineSavedProduct; private Bundle bundle = new Bundle(); private MaterialDialog dialog; private boolean image_front_uploaded; private boolean image_ingredients_uploaded; private boolean image_nutrition_facts_uploaded; private boolean edit_product; // These fields are used to compare the existing values of a product already present on the server with the product which was saved offline and is being uploaded. private String ingredientsTextOnServer; private String productNameOnServer; private String quantityOnServer; private String linkOnServer; private String ingredientsImageOnServer; public static File getCameraPicLocation(Context context) { File cacheDir = context.getCacheDir(); if (isExternalStorageWritable()) { cacheDir = context.getExternalCacheDir(); } File dir = new File(cacheDir, "EasyImage"); if (!dir.exists()) { if (dir.mkdirs()) { Log.i(AddProductActivity.class.getSimpleName(), "Directory created"); } else { Log.i(AddProductActivity.class.getSimpleName(), "Couldn't create directory"); } } return dir; } public static void clearCachedCameraPic(Context context) { File[] files = getCameraPicLocation(context).listFiles(); for (File file : files) { if (file.delete()) { Log.i(AddProductActivity.class.getSimpleName(), "Deleted cached photo"); } else { Log.i(AddProductActivity.class.getSimpleName(), "Couldn't delete cached photo"); } } } @OnPageChange(value = R.id.viewpager, callback = OnPageChange.Callback.PAGE_SELECTED) void onPageSelected(int position) { switch (position) { case 0: updateTimelineIndicator(1, 0, 0); break; case 1: updateTimelineIndicator(2, 1, 0); break; case 2: updateTimelineIndicator(2, 2, 1); break; default: updateTimelineIndicator(1, 0, 0); } } /** * This method is used to update the timeline. * 0 means inactive stage, 1 means active stage and 2 means completed stage * * @param overviewStage change the state of overview indicator * @param ingredientsStage change the state of ingredients indicator * @param nutritionFactsStage change the state of nutrition facts indicator */ private void updateTimelineIndicator(int overviewStage, int ingredientsStage, int nutritionFactsStage) { switch (overviewStage) { case 0: overviewIndicator.setBackgroundResource(R.drawable.stage_inactive); break; case 1: overviewIndicator.setBackgroundResource(R.drawable.stage_active); break; case 2: overviewIndicator.setBackgroundResource(R.drawable.stage_complete); break; } switch (ingredientsStage) { case 0: ingredientsIndicator.setBackgroundResource(R.drawable.stage_inactive); break; case 1: ingredientsIndicator.setBackgroundResource(R.drawable.stage_active); break; case 2: ingredientsIndicator.setBackgroundResource(R.drawable.stage_complete); break; } switch (nutritionFactsStage) { case 0: nutritionFactsIndicator.setBackgroundResource(R.drawable.stage_inactive); break; case 1: nutritionFactsIndicator.setBackgroundResource(R.drawable.stage_active); break; case 2: nutritionFactsIndicator.setBackgroundResource(R.drawable.stage_complete); break; } } @Override public void onBackPressed() { if (offlineSavedProduct != null) { checkFields(); } else { new MaterialDialog.Builder(this) .content(R.string.save_product) .positiveText(R.string.txtSave) .negativeText(R.string.txtPictureNeededDialogNo) .onPositive((dialog, which) -> checkFields()) .onNegative((dialog, which) -> super.onBackPressed()) .show(); } } @Override public boolean onOptionsItemSelected(MenuItem item) { if (item.getItemId() == android.R.id.home) { if (offlineSavedProduct != null) { checkFields(); } else { new MaterialDialog.Builder(this) .content(R.string.save_product) .positiveText(R.string.txtSave) .negativeText(R.string.txtPictureNeededDialogNo) .onPositive((dialog, which) -> checkFields()) .onNegative((dialog, which) -> finish()) .show(); } } return super.onOptionsItemSelected(item); } @Override protected void onCreate(Bundle savedInstanceState) { OFFApplication.getAppComponent().inject(this); super.onCreate(savedInstanceState); setContentView(R.layout.activity_add_product); ButterKnife.bind(this); setTitle(R.string.offline_product_addition_title); ActionBar actionBar = getSupportActionBar(); if (actionBar != null) { getSupportActionBar().setDisplayHomeAsUpEnabled(true); } mToUploadProductDao = Utils.getAppDaoSession(this).getToUploadProductDao(); mOfflineSavedProductDao = Utils.getAppDaoSession(this).getOfflineSavedProductDao(); final State state = (State) getIntent().getSerializableExtra("state"); offlineSavedProduct = (OfflineSavedProduct) getIntent().getSerializableExtra("edit_offline_product"); Product mEditProduct = (Product) getIntent().getSerializableExtra("edit_product"); if (state != null) { mProduct = state.getProduct(); // Search if the barcode already exists in the OfflineSavedProducts db offlineSavedProduct = mOfflineSavedProductDao.queryBuilder().where(OfflineSavedProductDao.Properties.Barcode.eq(mProduct.getCode())).unique(); } if (mEditProduct != null) { setTitle(R.string.edit_product_title); mProduct = mEditProduct; edit_product = true; bundle.putBoolean("edit_product", true); } else if (offlineSavedProduct != null) { bundle.putSerializable("edit_offline_product", offlineSavedProduct); // Save the already existing images in productDetails for UI imagesFilePath[0] = offlineSavedProduct.getProductDetailsMap().get("image_front"); imagesFilePath[1] = offlineSavedProduct.getProductDetailsMap().get("image_ingredients"); imagesFilePath[2] = offlineSavedProduct.getProductDetailsMap().get("image_nutrition_facts"); // get the status of images from productDetailsMap, whether uploaded or not String image_front_status = offlineSavedProduct.getProductDetailsMap().get("image_front_uploaded"); String image_ingredients_status = offlineSavedProduct.getProductDetailsMap().get("image_ingredients_uploaded"); String image_nutrition_facts_status = offlineSavedProduct.getProductDetailsMap().get("image_nutrition_facts_uploaded"); image_front_uploaded = image_front_status != null && image_front_status.equals("true"); image_ingredients_uploaded = image_ingredients_status != null && image_ingredients_status.equals("true"); image_nutrition_facts_uploaded = image_nutrition_facts_status != null && image_nutrition_facts_status.equals("true"); } if (state == null && offlineSavedProduct == null && mEditProduct == null) { Toast.makeText(this, R.string.error_adding_product, Toast.LENGTH_SHORT).show(); finish(); } setupViewPager(viewPager); } @Override public void onDestroy() { super.onDestroy(); if (dialog != null && dialog.isShowing()) { dialog.dismiss(); } if (disposable != null && !disposable.isDisposed()) { disposable.dispose(); } clearCachedCameraPic(this); } private void setupViewPager(ViewPager viewPager) { ProductFragmentPagerAdapter adapterResult = new ProductFragmentPagerAdapter(getSupportFragmentManager()); bundle.putSerializable("product", mProduct); addProductOverviewFragment.setArguments(bundle); addProductIngredientsFragment.setArguments(bundle); adapterResult.addFragment(addProductOverviewFragment, "Overview"); adapterResult.addFragment(addProductIngredientsFragment, "Ingredients"); if (BuildConfig.FLAVOR.equals("off") || BuildConfig.FLAVOR.equals("opff")) { addProductNutritionFactsFragment.setArguments(bundle); adapterResult.addFragment(addProductNutritionFactsFragment, "Nutrition Facts"); } else if (BuildConfig.FLAVOR.equals("obf") || BuildConfig.FLAVOR.equals("opf")) { nutritionFactsIndicatorText.setText(R.string.photos); addProductPhotosFragment.setArguments(bundle); adapterResult.addFragment(addProductPhotosFragment, "Photos"); } viewPager.setOffscreenPageLimit(2); viewPager.setAdapter(adapterResult); } private void saveProduct() { addProductOverviewFragment.getDetails(); addProductIngredientsFragment.getDetails(); if (BuildConfig.FLAVOR.equals("off") || BuildConfig.FLAVOR.equals("opff")) { addProductNutritionFactsFragment.getDetails(); } final SharedPreferences settings = getSharedPreferences("login", 0); final String login = settings.getString("user", ""); final String password = settings.getString("pass", ""); if (!login.isEmpty() && !password.isEmpty()) { productDetails.put("user_id", login); productDetails.put("password", password); } String code = productDetails.get("code"); String fields = "link,quantity,image_ingredients_url,ingredients_text_" + getProductLanguage() + ",product_name_" + getProductLanguage(); client.getExistingProductDetails(code, fields, Utils.getUserAgent(Utils.HEADER_USER_AGENT_SEARCH)) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<State>() { @Override public void onSubscribe(Disposable d) { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.toastSending) .content(R.string.please_wait) .cancelable(false) .progress(true, 0); dialog = builder.build(); dialog.show(); } @Override public void onSuccess(State state) { dialog.dismiss(); if (state.getStatus() == 0) { // Product doesn't exist yet on the server. Add as it is. checkFrontImageUploadStatus(); } else { // Product already exists on the server. Compare values saved locally with the values existing on server. ingredientsTextOnServer = state.getProduct().getIngredientsText(getProductLanguage()); productNameOnServer = state.getProduct().getProductName(getProductLanguage()); quantityOnServer = state.getProduct().getQuantity(); linkOnServer = state.getProduct().getManufactureUrl(); ingredientsImageOnServer = state.getProduct().getImageIngredientsUrl(); checkForExistingIngredients(); } } @Override public void onError(Throwable e) { dialog.dismiss(); saveProductOffline(); } }); } /** * Checks if ingredients already exist on server and compare it with the ingredients stored locally. */ private void checkForExistingIngredients() { String lc = productDetails.get("lang") != null ? productDetails.get("lang") : "en"; if (ingredientsTextOnServer != null && !ingredientsTextOnServer.isEmpty() && productDetails.get("ingredients_text" + "_" + lc) != null) { MaterialDialog.Builder builder = new MaterialDialog.Builder(this) .title(R.string.ingredients_overwrite) .customView(R.layout.dialog_compare_ingredients, true) .positiveText(R.string.choose_mine) .negativeText(R.string.keep_previous_version) .onPositive((dialog, which) -> { dialog.dismiss(); checkForExistingProductName(); }) .onNegative((dialog, which) -> { dialog.dismiss(); productDetails.remove("ingredients_text" + "_" + lc); productDetails.remove("image_ingredients"); imagesFilePath[1] = null; checkForExistingProductName(); }); MaterialDialog dialog = builder.build(); dialog.show(); View view = dialog.getCustomView(); if (view != null) { ImageView imageLocal = view.findViewById(R.id.image_ingredients_local); ImageView imageServer = view.findViewById(R.id.image_ingredients_server); TextView ingredientsLocal = view.findViewById(R.id.txt_ingredients_local); TextView ingredientsServer = view.findViewById(R.id.txt_ingredients_server); ProgressBar imageProgressServer = view.findViewById(R.id.image_progress_server); ProgressBar imageProgressLocal = view.findViewById(R.id.image_progress_local); ingredientsLocal.setText(productDetails.get("ingredients_text" + "_" + lc)); ingredientsServer.setText(ingredientsTextOnServer); Picasso.with(this) .load(ingredientsImageOnServer) .error(R.drawable.placeholder_thumb) .into(imageServer, new Callback() { @Override public void onSuccess() { imageProgressServer.setVisibility(View.GONE); // Add option to zoom image. imageServer.setOnClickListener(v -> { Intent intent = new Intent(AddProductActivity.this, FullScreenImage.class); Bundle bundle = new Bundle(); bundle.putString("imageurl", ingredientsImageOnServer); intent.putExtras(bundle); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(AddProductActivity.this, imageServer, getString(R.string.product_transition)); startActivity(intent, options.toBundle()); } else { startActivity(intent); } }); } @Override public void onError() { imageProgressServer.setVisibility(View.GONE); } }); Picasso.with(this) .load("file://" + imagesFilePath[1]) .error(R.drawable.placeholder_thumb) .into(imageLocal, new Callback() { @Override public void onSuccess() { imageProgressLocal.setVisibility(View.GONE); // Add option to zoom image. imageLocal.setOnClickListener(v -> { Intent intent = new Intent(AddProductActivity.this, FullScreenImage.class); Bundle bundle = new Bundle(); bundle.putString("imageurl", "file://" + imagesFilePath[1]); intent.putExtras(bundle); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(AddProductActivity.this, imageLocal, getString(R.string.product_transition)); startActivity(intent, options.toBundle()); } else { startActivity(intent); } }); } @Override public void onError() { imageProgressLocal.setVisibility(View.GONE); } }); } } else { checkForExistingProductName(); } } /** * Checks if product name already exist on server and compare it with the product name stored locally. */ private void checkForExistingProductName() { String lc = productDetails.get("lang") != null ? productDetails.get("lang") : "en"; if (productNameOnServer != null && !productNameOnServer.isEmpty() && productDetails.get("product_name" + "_" + lc) != null) { new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.product_name_overwrite) .content(getString(R.string.yours) + productDetails.get("product_name" + "_" + lc) + "\n" + getString(R.string.currently_on, getString(R.string.app_name_long)) + productNameOnServer) .positiveText(R.string.choose_mine) .negativeText(R.string.keep_previous_version) .onPositive((dialog, which) -> { dialog.dismiss(); checkForExistingQuantity(); }) .onNegative((dialog, which) -> { dialog.dismiss(); productDetails.remove("product_name" + "_" + lc); checkForExistingQuantity(); }) .build() .show(); } else { checkForExistingQuantity(); } } /** * Checks if quantity already exist on server and compare it with the quantity stored locally. */ private void checkForExistingQuantity() { if (quantityOnServer != null && !quantityOnServer.isEmpty() && productDetails.get("quantity") != null) { new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.quantity_overwrite) .content(getString(R.string.yours) + productDetails.get("quantity") + "\n" + getString(R.string.currently_on, getString(R.string.app_name_long)) + quantityOnServer) .positiveText(R.string.choose_mine) .negativeText(R.string.keep_previous_version) .onPositive((dialog, which) -> { dialog.dismiss(); checkForExistingLink(); }) .onNegative((dialog, which) -> { dialog.dismiss(); productDetails.remove("quantity"); checkForExistingLink(); }) .build() .show(); } else { checkForExistingLink(); } } /** * Checks if link already exist on server and compare it with the link stored locally. */ private void checkForExistingLink() { if (linkOnServer != null && !linkOnServer.isEmpty() && productDetails.get("link") != null) { new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.link_overwrite) .content(getString(R.string.yours) + productDetails.get("link") + "\n" + getString(R.string.currently_on, getString(R.string.app_name_long)) + linkOnServer) .positiveText(R.string.choose_mine) .negativeText(R.string.keep_previous_version) .onPositive((dialog, which) -> { dialog.dismiss(); checkFrontImageUploadStatus(); }) .onNegative((dialog, which) -> { dialog.dismiss(); productDetails.remove("link"); checkFrontImageUploadStatus(); }) .build() .show(); } else { checkFrontImageUploadStatus(); } } /** * Upload and set the front image if it is not uploaded already. */ private void checkFrontImageUploadStatus() { String code = productDetails.get("code"); if (!image_front_uploaded && imagesFilePath[0] != null && !imagesFilePath[0].isEmpty()) { // front image is not yet uploaded. File photoFile = new File(imagesFilePath[0]); Map<String, RequestBody> imgMap = new HashMap<>(); RequestBody barcode = RequestBody.create(MediaType.parse("text/plain"), code); RequestBody imageField = RequestBody.create(MediaType.parse("text/plain"), ProductImageField.FRONT.toString() + '_' + getProductLanguage()); RequestBody image = RequestBody.create(MediaType.parse("image/*"), photoFile); imgMap.put("code", barcode); imgMap.put("imagefield", imageField); imgMap.put("imgupload_front\"; filename=\"front_" + getProductLanguage() + ".png\"", image); // Attribute the upload to the connected user final SharedPreferences settings = getSharedPreferences("login", 0); final String login = settings.getString("user", ""); final String password = settings.getString("pass", ""); if (!login.isEmpty() && !password.isEmpty()) { imgMap.put("user_id", RequestBody.create(MediaType.parse("text/plain"), login)); imgMap.put("password", RequestBody.create(MediaType.parse("text/plain"), password)); } client.saveImageSingle(imgMap) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.uploading_front_image) .content(R.string.please_wait) .cancelable(false) .progress(true, 0); dialog = builder.build(); dialog.show(); } @Override public void onSuccess(JsonNode jsonNode) { String status = jsonNode.get("status").asText(); if (status.equals("status not ok")) { dialog.dismiss(); String error = jsonNode.get("error").asText(); if (error.equals("This picture has already been sent.")) { image_front_uploaded = true; checkIngredientsImageUploadStatus(); } } else { image_front_uploaded = true; String imagefield = jsonNode.get("imagefield").asText(); String imgid = jsonNode.get("image").get("imgid").asText(); Map<String, String> queryMap = new HashMap<>(); queryMap.put("imgid", imgid); queryMap.put("id", imagefield); client.editImageSingle(code, queryMap) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { } @Override public void onSuccess(JsonNode jsonNode) { dialog.dismiss(); checkIngredientsImageUploadStatus(); } @Override public void onError(Throwable e) { dialog.dismiss(); if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } }); } } @Override public void onError(Throwable e) { dialog.dismiss(); if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } }); } else { // front image is uploaded, check the status of ingredients image. checkIngredientsImageUploadStatus(); } } /** * Upload and set the ingredients image if it is not uploaded already. */ private void checkIngredientsImageUploadStatus() { String code = productDetails.get("code"); if (!image_ingredients_uploaded && imagesFilePath[1] != null && !imagesFilePath[1].isEmpty()) { // ingredients image is not yet uploaded. File photoFile = new File(imagesFilePath[1]); Map<String, RequestBody> imgMap = new HashMap<>(); RequestBody barcode = RequestBody.create(MediaType.parse("text/plain"), code); RequestBody imageField = RequestBody.create(MediaType.parse("text/plain"), ProductImageField.INGREDIENTS.toString() + '_' + getProductLanguage()); RequestBody image = RequestBody.create(MediaType.parse("image/*"), photoFile); imgMap.put("code", barcode); imgMap.put("imagefield", imageField); imgMap.put("imgupload_ingredients\"; filename=\"ingredients_" + getProductLanguage() + ".png\"", image); // Attribute the upload to the connected user final SharedPreferences settings = getSharedPreferences("login", 0); final String login = settings.getString("user", ""); final String password = settings.getString("pass", ""); if (!login.isEmpty() && !password.isEmpty()) { imgMap.put("user_id", RequestBody.create(MediaType.parse("text/plain"), login)); imgMap.put("password", RequestBody.create(MediaType.parse("text/plain"), password)); } client.saveImageSingle(imgMap) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.uploading_ingredients_image) .content(R.string.please_wait) .cancelable(false) .progress(true, 0); dialog = builder.build(); dialog.show(); } @Override public void onSuccess(JsonNode jsonNode) { String status = jsonNode.get("status").asText(); if (status.equals("status not ok")) { dialog.dismiss(); String error = jsonNode.get("error").asText(); if (error.equals("This picture has already been sent.")) { image_ingredients_uploaded = true; checkNutritionFactsImageUploadStatus(); } } else { image_ingredients_uploaded = true; String imagefield = jsonNode.get("imagefield").asText(); String imgid = jsonNode.get("image").get("imgid").asText(); Map<String, String> queryMap = new HashMap<>(); queryMap.put("imgid", imgid); queryMap.put("id", imagefield); client.editImageSingle(code, queryMap) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { } @Override public void onSuccess(JsonNode jsonNode) { dialog.dismiss(); checkNutritionFactsImageUploadStatus(); } @Override public void onError(Throwable e) { dialog.dismiss(); if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } }); } } @Override public void onError(Throwable e) { dialog.dismiss(); if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } }); } else { // ingredients image is uploaded, check the status of nutrition facts image. checkNutritionFactsImageUploadStatus(); } } /** * Upload and set the nutrition facts image if it is not uploaded already. */ private void checkNutritionFactsImageUploadStatus() { String code = productDetails.get("code"); if (!image_nutrition_facts_uploaded && imagesFilePath[2] != null && !imagesFilePath[2].isEmpty()) { // nutrition facts image is not yet uploaded. File photoFile = new File(imagesFilePath[2]); Map<String, RequestBody> imgMap = new HashMap<>(); RequestBody barcode = RequestBody.create(MediaType.parse("text/plain"), code); RequestBody imageField = RequestBody.create(MediaType.parse("text/plain"), ProductImageField.NUTRITION.toString() + '_' + getProductLanguage()); RequestBody image = RequestBody.create(MediaType.parse("image/*"), photoFile); imgMap.put("code", barcode); imgMap.put("imagefield", imageField); imgMap.put("imgupload_nutrition\"; filename=\"nutrition_" + getProductLanguage() + ".png\"", image); // Attribute the upload to the connected user final SharedPreferences settings = getSharedPreferences("login", 0); final String login = settings.getString("user", ""); final String password = settings.getString("pass", ""); if (!login.isEmpty() && !password.isEmpty()) { imgMap.put("user_id", RequestBody.create(MediaType.parse("text/plain"), login)); imgMap.put("password", RequestBody.create(MediaType.parse("text/plain"), password)); } client.saveImageSingle(imgMap) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.uploading_nutrition_image) .content(R.string.please_wait) .cancelable(false) .progress(true, 0); dialog = builder.build(); dialog.show(); } @Override public void onSuccess(JsonNode jsonNode) { String status = jsonNode.get("status").asText(); if (status.equals("status not ok")) { dialog.dismiss(); String error = jsonNode.get("error").asText(); if (error.equals("This picture has already been sent.")) { image_nutrition_facts_uploaded = true; addProductToServer(); } } else { image_nutrition_facts_uploaded = true; String imagefield = jsonNode.get("imagefield").asText(); String imgid = jsonNode.get("image").get("imgid").asText(); Map<String, String> queryMap = new HashMap<>(); queryMap.put("imgid", imgid); queryMap.put("id", imagefield); client.editImageSingle(code, queryMap) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { } @Override public void onSuccess(JsonNode jsonNode) { dialog.dismiss(); addProductToServer(); } @Override public void onError(Throwable e) { dialog.dismiss(); if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } }); } } @Override public void onError(Throwable e) { dialog.dismiss(); if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } }); } else { // nutrition facts image is uploaded, upload the product to server. addProductToServer(); } } /** * Performs network call and uploads the product to the server or stores it locally if there is no internet connection. */ private void addProductToServer() { String code = productDetails.get("code"); for (Map.Entry<String, String> entry : productDetails.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); Log.d(key, value); } client.saveProductSingle(code, productDetails, PRODUCT_API_COMMENT + " " + Utils.getVersionName(this)) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<State>() { @Override public void onSubscribe(Disposable d) { disposable = d; MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.toastSending) .content(R.string.please_wait) .progress(true, 0) .cancelable(false); dialog = builder.build(); dialog.show(); } @Override public void onSuccess(State state) { dialog.dismiss(); Toast toast = Toast.makeText(OFFApplication.getInstance(), R.string.product_uploaded_successfully, Toast.LENGTH_LONG); toast.setGravity(Gravity.CENTER, 0, 0); View view = toast.getView(); TextView textView = view.findViewById(android.R.id.message); textView.setTextSize(18); view.setBackgroundColor(getResources().getColor(R.color.green_500)); toast.setDuration(Toast.LENGTH_SHORT); toast.show(); mOfflineSavedProductDao.deleteInTx(mOfflineSavedProductDao.queryBuilder().where(OfflineSavedProductDao.Properties.Barcode.eq(code)).list()); Intent intent = new Intent(); intent.putExtra("uploadedToServer", true); setResult(RESULT_OK, intent); finish(); } @Override public void onError(Throwable e) { dialog.dismiss(); Log.e(AddProductActivity.class.getSimpleName(), e.getMessage()); // A network error happened if (e instanceof IOException) { if (!edit_product) { saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.device_offline_dialog_title) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } // Not a network error else { if (!edit_product) { Toast.makeText(AddProductActivity.this, e.getMessage(), Toast.LENGTH_SHORT).show(); saveProductOffline(); } else { MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this) .title(R.string.error_adding_product) .positiveText(R.string.txt_try_again) .negativeText(R.string.dialog_cancel) .onPositive((dialog, which) -> checkFrontImageUploadStatus()) .onNegative((dialog, which) -> dialog.dismiss()); dialog = builder.build(); dialog.show(); } } } }); } /** * save the current product in the offline db */ private void saveProductOffline() { // Add the images to the productDetails to display them in UI later. productDetails.put("image_front", imagesFilePath[0]); productDetails.put("image_ingredients", imagesFilePath[1]); productDetails.put("image_nutrition_facts", imagesFilePath[2]); // Add the status of images to the productDetails, whether uploaded or not if (image_front_uploaded) { productDetails.put("image_front_uploaded", "true"); } if (image_ingredients_uploaded) { productDetails.put("image_ingredients_uploaded", "true"); } if (image_nutrition_facts_uploaded) { productDetails.put("image_nutrition_facts_uploaded", "true"); } OfflineSavedProduct offlineSavedProduct = new OfflineSavedProduct(); offlineSavedProduct.setBarcode(productDetails.get("code")); offlineSavedProduct.setProductDetailsMap(productDetails); mOfflineSavedProductDao.insertOrReplace(offlineSavedProduct); Toast.makeText(OFFApplication.getInstance(), R.string.txtDialogsContentInfoSave, Toast.LENGTH_LONG).show(); Intent intent = new Intent(); intent.putExtra("uploadedToServer", false); setResult(RESULT_OK, intent); finish(); } public void proceed() { switch (viewPager.getCurrentItem()) { case 0: viewPager.setCurrentItem(1, true); break; case 1: viewPager.setCurrentItem(2, true); break; case 2: checkFields(); break; } } private void checkFields() { if (!edit_product) { if (addProductOverviewFragment.areRequiredFieldsEmpty()) { viewPager.setCurrentItem(0, true); } else if ((BuildConfig.FLAVOR.equals("off") || BuildConfig.FLAVOR.equals("opff")) && addProductNutritionFactsFragment.nutritionCheckFailed()) { viewPager.setCurrentItem(2, true); } else { saveProduct(); } } else { // edit mode, therefore do not check whether front image is empty or not however do check the nutrition facts values. if ((BuildConfig.FLAVOR.equals("off") || BuildConfig.FLAVOR.equals("opff")) && addProductNutritionFactsFragment.nutritionCheckFailed()) { viewPager.setCurrentItem(2, true); } else { saveEditedProduct(); } } } private void saveEditedProduct() { addProductOverviewFragment.getAllDetails(); addProductIngredientsFragment.getAllDetails(); if (BuildConfig.FLAVOR.equals("off") || BuildConfig.FLAVOR.equals("opff")) { addProductNutritionFactsFragment.getAllDetails(); } final SharedPreferences settings = getSharedPreferences("login", 0); final String login = settings.getString("user", ""); final String password = settings.getString("pass", ""); if (!login.isEmpty() && !password.isEmpty()) { productDetails.put("user_id", login); productDetails.put("password", password); } checkFrontImageUploadStatus(); } @OnClick(R.id.overview_indicator) void switchToOverviewPage() { viewPager.setCurrentItem(0, true); } @OnClick(R.id.ingredients_indicator) void switchToIngredientsPage() { viewPager.setCurrentItem(1, true); } @OnClick(R.id.nutrition_facts_indicator) void switchToNutritionFactsPage() { viewPager.setCurrentItem(2, true); } public void addToMap(String key, String value) { productDetails.put(key, value); } public void addToPhotoMap(ProductImage image, int position) { String lang = getProductLanguage(); boolean ocr = false; Map<String, RequestBody> imgMap = new HashMap<>(); imgMap.put("code", image.getCode()); RequestBody imageField = RequestBody.create(MediaType.parse("text/plain"), image.getImageField().toString() + '_' + lang); imgMap.put("imagefield", imageField); if (image.getImguploadFront() != null) { imagesFilePath[0] = image.getFilePath(); imgMap.put("imgupload_front\"; filename=\"front_" + lang + ".png\"", image.getImguploadFront()); } if (image.getImguploadIngredients() != null) { imgMap.put("imgupload_ingredients\"; filename=\"ingredients_" + lang + ".png\"", image.getImguploadIngredients()); ocr = true; imagesFilePath[1] = image.getFilePath(); } if (image.getImguploadNutrition() != null) { imgMap.put("imgupload_nutrition\"; filename=\"nutrition_" + lang + ".png\"", image.getImguploadNutrition()); imagesFilePath[2] = image.getFilePath(); } if (image.getImguploadOther() != null) imgMap.put("imgupload_other\"; filename=\"other_" + lang + ".png\"", image.getImguploadOther()); // Attribute the upload to the connected user final SharedPreferences settings = getSharedPreferences("login", 0); final String login = settings.getString("user", ""); final String password = settings.getString("pass", ""); if (!login.isEmpty() && !password.isEmpty()) { imgMap.put("user_id", RequestBody.create(MediaType.parse("text/plain"), login)); imgMap.put("password", RequestBody.create(MediaType.parse("text/plain"), password)); } savePhoto(imgMap, image, position, ocr); } private void savePhoto(Map<String, RequestBody> imgMap, ProductImage image, int position, boolean ocr) { client.saveImageSingle(imgMap) .observeOn(AndroidSchedulers.mainThread()) .doOnSubscribe(disposable -> showImageProgress(position)) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { disposable = d; } @Override public void onSuccess(JsonNode jsonNode) { String status = jsonNode.get("status").asText(); if (status.equals("status not ok")) { String error = jsonNode.get("error").asText(); if (error.equals("This picture has already been sent.") && ocr) { hideImageProgress(position, false, getString(R.string.image_uploaded_successfully)); performOCR(image.getBarcode(), "ingredients_" + getProductLanguage()); } else { hideImageProgress(position, true, error); } } else { if (image.getImageField() == ProductImageField.FRONT) { image_front_uploaded = true; } else if (image.getImageField() == ProductImageField.INGREDIENTS) { image_ingredients_uploaded = true; } else if (image.getImageField() == ProductImageField.NUTRITION) { image_nutrition_facts_uploaded = true; } hideImageProgress(position, false, getString(R.string.image_uploaded_successfully)); String imagefield = jsonNode.get("imagefield").asText(); String imgid = jsonNode.get("image").get("imgid").asText(); if (position != 3 && position != 4) { // Not OTHER image setPhoto(image, imagefield, imgid, ocr); } } } @Override public void onError(Throwable e) { // A network error happened if (e instanceof IOException) { hideImageProgress(position, false, getString(R.string.no_internet_connection)); Log.e(AddProductActivity.class.getSimpleName(), e.getMessage()); if (image.getImageField() == ProductImageField.OTHER) { ToUploadProduct product = new ToUploadProduct(image.getBarcode(), image.getFilePath(), image.getImageField().toString()); mToUploadProductDao.insertOrReplace(product); } } else { hideImageProgress(position, true, e.getMessage()); Log.i(this.getClass().getSimpleName(), e.getMessage()); Toast.makeText(OFFApplication.getInstance(), e.getMessage(), Toast.LENGTH_SHORT).show(); } } }); } private void setPhoto(ProductImage image, String imagefield, String imgid, boolean ocr) { Map<String, String> queryMap = new HashMap<>(); queryMap.put("imgid", imgid); queryMap.put("id", imagefield); client.editImageSingle(image.getBarcode(), queryMap) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { } @Override public void onSuccess(JsonNode jsonNode) { String status = jsonNode.get("status").asText(); if (status.equals("status ok")) { if (ocr) { performOCR(image.getBarcode(), imagefield); } } } @Override public void onError(Throwable e) { if (e instanceof IOException) { if (ocr) { View view = findViewById(R.id.coordinator_layout); Snackbar.make(view, R.string.no_internet_unable_to_extract_ingredients, Snackbar.LENGTH_INDEFINITE) .setAction(R.string.txt_try_again, v -> setPhoto(image, imagefield, imgid, true)).show(); } } else { Log.i(this.getClass().getSimpleName(), e.getMessage()); Toast.makeText(OFFApplication.getInstance(), e.getMessage(), Toast.LENGTH_SHORT).show(); } } }); } public void performOCR(String code, String imageField) { client.getIngredients(code, imageField) .observeOn(AndroidSchedulers.mainThread()) .doOnSubscribe(disposable -> addProductIngredientsFragment.showOCRProgress()) .subscribe(new SingleObserver<JsonNode>() { @Override public void onSubscribe(Disposable d) { } @Override public void onSuccess(JsonNode jsonNode) { addProductIngredientsFragment.hideOCRProgress(); String status = jsonNode.get("status").toString(); if (status.equals("0")) { String ocrResult = jsonNode.get("ingredients_text_from_image").asText(); addProductIngredientsFragment.setIngredients(status, ocrResult); } else { addProductIngredientsFragment.setIngredients(status, null); } } @Override public void onError(Throwable e) { addProductIngredientsFragment.hideOCRProgress(); if (e instanceof IOException) { View view = findViewById(R.id.coordinator_layout); Snackbar.make(view, R.string.no_internet_unable_to_extract_ingredients, Snackbar.LENGTH_INDEFINITE) .setAction(R.string.txt_try_again, v -> performOCR(code, imageField)).show(); } else { Log.i(this.getClass().getSimpleName(), e.getMessage()); Toast.makeText(AddProductActivity.this, e.getMessage(), Toast.LENGTH_SHORT).show(); } } }); } private void hideImageProgress(int position, boolean errorUploading, String message) { switch (position) { case 0: addProductOverviewFragment.hideImageProgress(errorUploading, message); break; case 1: addProductIngredientsFragment.hideImageProgress(errorUploading, message); break; case 2: addProductNutritionFactsFragment.hideImageProgress(errorUploading, message); break; case 3: addProductOverviewFragment.hideOtherImageProgress(errorUploading, message); break; case 4: addProductPhotosFragment.hideImageProgress(errorUploading, message); } } private void showImageProgress(int position) { switch (position) { case 0: addProductOverviewFragment.showImageProgress(); break; case 1: addProductIngredientsFragment.showImageProgress(); break; case 2: addProductNutritionFactsFragment.showImageProgress(); break; case 3: addProductOverviewFragment.showOtherImageProgress(); break; case 4: addProductPhotosFragment.showImageProgress(); } } public String getProductLanguage() { return productDetails.get("lang"); } public void setIngredients(String status, String ingredients) { addProductIngredientsFragment.setIngredients(status, ingredients); } }
1
66,410
Reason for adding this? I couldn't find where you are retrieving it back.
openfoodfacts-openfoodfacts-androidapp
java
@@ -62,6 +62,10 @@ static void setup_globals(mrb_state *mrb) h2o_mruby_eval_expr(mrb, "$LOAD_PATH << \"#{$H2O_ROOT}/share/h2o/mruby\""); h2o_mruby_assert(mrb); + + /* require core modules and include built-in libraries */ + h2o_mruby_eval_expr(mrb, "require \"preloads.rb\""); + h2o_mruby_assert(mrb); } mrb_value h2o_mruby_to_str(mrb_state *mrb, mrb_value v)
1
/* * Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Ryosuke Matsumoto, * Masayoshi Takahashi * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <errno.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <mruby.h> #include <mruby/proc.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/compile.h> #include <mruby/error.h> #include <mruby/hash.h> #include <mruby/string.h> #include <mruby/throw.h> #include <mruby/variable.h> #include <mruby_input_stream.h> #include "h2o.h" #include "h2o/mruby_.h" #define STATUS_FALLTHRU 399 #define FALLTHRU_SET_PREFIX "x-fallthru-set-" #define FREEZE_STRING(v) RSTR_SET_FROZEN_FLAG(mrb_str_ptr(v)) __thread h2o_mruby_generator_t *h2o_mruby_current_generator = NULL; void h2o_mruby__assert_failed(mrb_state *mrb, const char *file, int line) { mrb_value obj = mrb_funcall(mrb, mrb_obj_value(mrb->exc), "inspect", 0); struct RString *error = mrb_str_ptr(obj); fprintf(stderr, "unexpected ruby error at file: \"%s\", line %d: %s", file, line, error->as.heap.ptr); abort(); } static void setup_globals(mrb_state *mrb) { const char *root = getenv("H2O_ROOT"); if (root == NULL) root = H2O_TO_STR(H2O_ROOT); mrb_gv_set(mrb, mrb_intern_lit(mrb, "$H2O_ROOT"), mrb_str_new(mrb, root, strlen(root))); h2o_mruby_eval_expr(mrb, "$LOAD_PATH << \"#{$H2O_ROOT}/share/h2o/mruby\""); h2o_mruby_assert(mrb); } mrb_value h2o_mruby_to_str(mrb_state *mrb, mrb_value v) { if (!mrb_string_p(v)) H2O_MRUBY_EXEC_GUARD({ v = mrb_str_to_str(mrb, v); }); return v; } mrb_value h2o_mruby_eval_expr(mrb_state *mrb, const char *expr) { return mrb_funcall(mrb, mrb_top_self(mrb), "eval", 1, mrb_str_new_cstr(mrb, expr)); } void h2o_mruby_define_callback(mrb_state *mrb, const char *name, int id) { char buf[1024]; sprintf(buf, "module Kernel\n" " def %s(*args)\n" " ret = Fiber.yield([\n" " %d,\n" " _h2o_create_resumer(),\n" " args,\n" " ])\n" " if ret.kind_of? Exception\n" " raise ret\n" " end\n" " ret\n" " end\n" "end", name, id); h2o_mruby_eval_expr(mrb, buf); if (mrb->exc != NULL) { fprintf(stderr, "failed to define mruby function: %s\n", name); h2o_mruby_assert(mrb); } } mrb_value h2o_mruby_create_data_instance(mrb_state *mrb, mrb_value class_obj, void *ptr, const mrb_data_type *type) { struct RClass *klass = mrb_class_ptr(class_obj); struct RData *data = mrb_data_object_alloc(mrb, klass, ptr, type); return mrb_obj_value(data); } mrb_value h2o_mruby_compile_code(mrb_state *mrb, h2o_mruby_config_vars_t *config, char *errbuf) { mrbc_context *cxt; struct mrb_parser_state *parser; struct RProc *proc = NULL; mrb_value result = mrb_nil_value(); setup_globals(mrb); /* parse */ if ((cxt = mrbc_context_new(mrb)) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } if (config->path != NULL) mrbc_filename(mrb, cxt, config->path); cxt->capture_errors = 1; cxt->lineno = config->lineno; if ((parser = mrb_parse_nstring(mrb, config->source.base, (int)config->source.len, cxt)) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } /* return erro if errbuf is supplied, or abort */ if (parser->nerr != 0) { if (errbuf == NULL) { fprintf(stderr, "%s: internal error (unexpected state)\n", H2O_MRUBY_MODULE_NAME); abort(); } snprintf(errbuf, 256, "line %d:%s", parser->error_buffer[0].lineno, parser->error_buffer[0].message); strcat(errbuf, "\n\n"); if (h2o_str_at_position(errbuf + strlen(errbuf), config->source.base, config->source.len, parser->error_buffer[0].lineno - config->lineno + 1, parser->error_buffer[0].column) != 0) { /* remove trailing "\n\n" in case we failed to append the source code at the error location */ errbuf[strlen(errbuf) - 2] = '\0'; } goto Exit; } /* generate code */ if ((proc = mrb_generate_code(mrb, parser)) == NULL) { fprintf(stderr, "%s: internal error (mrb_generate_code failed)\n", H2O_MRUBY_MODULE_NAME); abort(); } result = mrb_run(mrb, proc, mrb_top_self(mrb)); if (mrb->exc != NULL) { mrb_value obj = mrb_funcall(mrb, mrb_obj_value(mrb->exc), "inspect", 0); struct RString *error = mrb_str_ptr(obj); snprintf(errbuf, 256, "%s", error->as.heap.ptr); mrb->exc = 0; result = mrb_nil_value(); goto Exit; } else if (mrb_nil_p(result)) { snprintf(errbuf, 256, "returned value is not callable"); goto Exit; } Exit: mrb_parser_free(parser); mrbc_context_free(mrb, cxt); return result; } static h2o_iovec_t convert_header_name_to_env(h2o_mem_pool_t *pool, const char *name, size_t len) { #define KEY_PREFIX "HTTP_" #define KEY_PREFIX_LEN (sizeof(KEY_PREFIX) - 1) h2o_iovec_t ret; ret.len = len + KEY_PREFIX_LEN; ret.base = h2o_mem_alloc_pool(pool, ret.len); memcpy(ret.base, KEY_PREFIX, KEY_PREFIX_LEN); char *d = ret.base + KEY_PREFIX_LEN; for (; len != 0; ++name, --len) *d++ = *name == '-' ? '_' : h2o_toupper(*name); return ret; #undef KEY_PREFIX #undef KEY_PREFIX_LEN } static mrb_value build_constants(mrb_state *mrb, const char *server_name, size_t server_name_len) { mrb_value ary = mrb_ary_new_capa(mrb, H2O_MRUBY_NUM_CONSTANTS); mrb_int i; int gc_arena = mrb_gc_arena_save(mrb); { h2o_mem_pool_t pool; h2o_mem_init_pool(&pool); for (i = 0; i != H2O_MAX_TOKENS; ++i) { const h2o_token_t *token = h2o__tokens + i; mrb_value lit = mrb_nil_value(); if (token == H2O_TOKEN_CONTENT_TYPE) { lit = mrb_str_new_lit(mrb, "CONTENT_TYPE"); } else if (token->buf.len != 0) { h2o_iovec_t n = convert_header_name_to_env(&pool, token->buf.base, token->buf.len); lit = mrb_str_new(mrb, n.base, n.len); } if (mrb_string_p(lit)) { FREEZE_STRING(lit); mrb_ary_set(mrb, ary, i, lit); } } h2o_mem_clear_pool(&pool); } #define SET_STRING(idx, value) \ do { \ mrb_value lit = (value); \ FREEZE_STRING(lit); \ mrb_ary_set(mrb, ary, idx, lit); \ } while (0) #define SET_LITERAL(idx, str) SET_STRING(idx, mrb_str_new_lit(mrb, str)) SET_LITERAL(H2O_MRUBY_LIT_REQUEST_METHOD, "REQUEST_METHOD"); SET_LITERAL(H2O_MRUBY_LIT_SCRIPT_NAME, "SCRIPT_NAME"); SET_LITERAL(H2O_MRUBY_LIT_PATH_INFO, "PATH_INFO"); SET_LITERAL(H2O_MRUBY_LIT_QUERY_STRING, "QUERY_STRING"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_NAME, "SERVER_NAME"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_ADDR, "SERVER_ADDR"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_PORT, "SERVER_PORT"); SET_LITERAL(H2O_MRUBY_LIT_CONTENT_LENGTH, "CONTENT_LENGTH"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_ADDR, "REMOTE_ADDR"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_PORT, "REMOTE_PORT"); SET_LITERAL(H2O_MRUBY_LIT_REMOTE_USER, "REMOTE_USER"); SET_LITERAL(H2O_MRUBY_LIT_RACK_URL_SCHEME, "rack.url_scheme"); SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTITHREAD, "rack.multithread"); SET_LITERAL(H2O_MRUBY_LIT_RACK_MULTIPROCESS, "rack.multiprocess"); SET_LITERAL(H2O_MRUBY_LIT_RACK_RUN_ONCE, "rack.run_once"); SET_LITERAL(H2O_MRUBY_LIT_RACK_HIJACK_, "rack.hijack?"); SET_LITERAL(H2O_MRUBY_LIT_RACK_INPUT, "rack.input"); SET_LITERAL(H2O_MRUBY_LIT_RACK_ERRORS, "rack.errors"); SET_LITERAL(H2O_MRUBY_LIT_SERVER_SOFTWARE, "SERVER_SOFTWARE"); SET_STRING(H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE, mrb_str_new(mrb, server_name, server_name_len)); SET_LITERAL(H2O_MRUBY_LIT_SEPARATOR_COMMA, ", "); SET_LITERAL(H2O_MRUBY_LIT_SEPARATOR_SEMICOLON, "; "); #undef SET_LITERAL #undef SET_STRING mrb_ary_set(mrb, ary, H2O_MRUBY_PROC_EACH_TO_ARRAY, h2o_mruby_eval_expr(mrb, "Proc.new do |o|\n" " a = []\n" " o.each do |x|\n" " a << x\n" " end\n" " a\n" "end")); h2o_mruby_assert(mrb); /* sends exception using H2O_MRUBY_CALLBACK_ID_EXCEPTION_RAISED */ mrb_ary_set(mrb, ary, H2O_MRUBY_PROC_APP_TO_FIBER, h2o_mruby_eval_expr(mrb, "Proc.new do |app|\n" " cached = nil\n" " Proc.new do |req|\n" " fiber = cached\n" " cached = nil\n" " if !fiber\n" " fiber = Fiber.new do\n" " self_fiber = Fiber.current\n" " req = Fiber.yield\n" " while 1\n" " begin\n" " while 1\n" " resp = app.call(req)\n" " cached = self_fiber\n" " req = Fiber.yield(resp)\n" " end\n" " rescue => e\n" " cached = self_fiber\n" " req = Fiber.yield([-1, e])\n" " end\n" " end\n" " end\n" " fiber.resume\n" " end\n" " fiber.resume(req)\n" " end\n" "end")); h2o_mruby_assert(mrb); h2o_mruby_eval_expr(mrb, "module Kernel\n" " def _h2o_create_resumer()\n" " me = Fiber.current\n" " Proc.new do |v|\n" " me.resume(v)\n" " end\n" " end\n" "end"); h2o_mruby_assert(mrb); mrb_gc_arena_restore(mrb, gc_arena); return ary; } static void on_context_init(h2o_handler_t *_handler, h2o_context_t *ctx) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_mem_alloc(sizeof(*handler_ctx)); handler_ctx->handler = handler; /* init mruby in every thread */ if ((handler_ctx->mrb = mrb_open()) == NULL) { fprintf(stderr, "%s: no memory\n", H2O_MRUBY_MODULE_NAME); abort(); } handler_ctx->constants = build_constants(handler_ctx->mrb, ctx->globalconf->server_name.base, ctx->globalconf->server_name.len); handler_ctx->symbols.sym_call = mrb_intern_lit(handler_ctx->mrb, "call"); handler_ctx->symbols.sym_close = mrb_intern_lit(handler_ctx->mrb, "close"); handler_ctx->symbols.sym_method = mrb_intern_lit(handler_ctx->mrb, "method"); handler_ctx->symbols.sym_headers = mrb_intern_lit(handler_ctx->mrb, "headers"); handler_ctx->symbols.sym_body = mrb_intern_lit(handler_ctx->mrb, "body"); handler_ctx->symbols.sym_async = mrb_intern_lit(handler_ctx->mrb, "async"); h2o_mruby_send_chunked_init_context(handler_ctx); h2o_mruby_http_request_init_context(handler_ctx); /* compile code (must be done for each thread) */ int arena = mrb_gc_arena_save(handler_ctx->mrb); mrb_value proc = h2o_mruby_compile_code(handler_ctx->mrb, &handler->config, NULL); handler_ctx->proc = mrb_funcall_argv(handler_ctx->mrb, mrb_ary_entry(handler_ctx->constants, H2O_MRUBY_PROC_APP_TO_FIBER), handler_ctx->symbols.sym_call, 1, &proc); h2o_mruby_assert(handler_ctx->mrb); mrb_gc_arena_restore(handler_ctx->mrb, arena); mrb_gc_protect(handler_ctx->mrb, handler_ctx->proc); h2o_context_set_handler_context(ctx, &handler->super, handler_ctx); } static void on_context_dispose(h2o_handler_t *_handler, h2o_context_t *ctx) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_context_get_handler_context(ctx, &handler->super); if (handler_ctx == NULL) return; mrb_close(handler_ctx->mrb); free(handler_ctx); } static void on_handler_dispose(h2o_handler_t *_handler) { h2o_mruby_handler_t *handler = (void *)_handler; free(handler->config.source.base); free(handler->config.path); free(handler); } static void report_exception(h2o_req_t *req, mrb_state *mrb) { mrb_value obj = mrb_funcall(mrb, mrb_obj_value(mrb->exc), "inspect", 0); struct RString *error = mrb_str_ptr(obj); h2o_req_log_error(req, H2O_MRUBY_MODULE_NAME, "mruby raised: %s\n", error->as.heap.ptr); mrb->exc = NULL; } static void stringify_address(h2o_conn_t *conn, socklen_t (*cb)(h2o_conn_t *conn, struct sockaddr *), mrb_state *mrb, mrb_value *host, mrb_value *port) { struct sockaddr_storage ss; socklen_t sslen; char buf[NI_MAXHOST]; *host = mrb_nil_value(); *port = mrb_nil_value(); if ((sslen = cb(conn, (void *)&ss)) == 0) return; size_t l = h2o_socket_getnumerichost((void *)&ss, sslen, buf); if (l != SIZE_MAX) *host = mrb_str_new(mrb, buf, l); int32_t p = h2o_socket_getport((void *)&ss); if (p != -1) { l = (int)sprintf(buf, "%" PRIu16, (uint16_t)p); *port = mrb_str_new(mrb, buf, l); } } static void on_rack_input_free(mrb_state *mrb, const char *base, mrb_int len, void *_input_stream) { /* reset ref to input_stream */ mrb_value *input_stream = _input_stream; *input_stream = mrb_nil_value(); } static int build_env_sort_header_cb(const void *_x, const void *_y) { const h2o_header_t *x = (const h2o_header_t *)_x, *y = (const h2o_header_t *)_y; if (x->name->len < y->name->len) return -1; if (x->name->len > y->name->len) return 1; if (x->name->base == y->name->base) return 0; return memcmp(x->name->base, y->name->base, x->name->len); } static mrb_value build_env(h2o_mruby_generator_t *generator) { mrb_state *mrb = generator->ctx->mrb; mrb_value env = mrb_hash_new_capa(mrb, 16); /* environment */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_REQUEST_METHOD), mrb_str_new(mrb, generator->req->method.base, generator->req->method.len)); size_t confpath_len_wo_slash = generator->req->pathconf->path.len; if (generator->req->pathconf->path.base[generator->req->pathconf->path.len - 1] == '/') --confpath_len_wo_slash; mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SCRIPT_NAME), mrb_str_new(mrb, generator->req->pathconf->path.base, confpath_len_wo_slash)); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_PATH_INFO), mrb_str_new(mrb, generator->req->path_normalized.base + confpath_len_wo_slash, generator->req->path_normalized.len - confpath_len_wo_slash)); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_QUERY_STRING), generator->req->query_at != SIZE_MAX ? mrb_str_new(mrb, generator->req->path.base + generator->req->query_at + 1, generator->req->path.len - (generator->req->query_at + 1)) : mrb_str_new_lit(mrb, "")); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_NAME), mrb_str_new(mrb, generator->req->hostconf->authority.host.base, generator->req->hostconf->authority.host.len)); { mrb_value h, p; stringify_address(generator->req->conn, generator->req->conn->callbacks->get_sockname, mrb, &h, &p); if (!mrb_nil_p(h)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_ADDR), h); if (!mrb_nil_p(p)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_PORT), p); } mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_TOKEN_HOST - h2o__tokens), mrb_str_new(mrb, generator->req->authority.base, generator->req->authority.len)); if (generator->req->entity.base != NULL) { char buf[32]; int l = sprintf(buf, "%zu", generator->req->entity.len); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_CONTENT_LENGTH), mrb_str_new(mrb, buf, l)); generator->rack_input = mrb_input_stream_value(mrb, NULL, 0); mrb_input_stream_set_data(mrb, generator->rack_input, generator->req->entity.base, (mrb_int)generator->req->entity.len, 0, on_rack_input_free, &generator->rack_input); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_INPUT), generator->rack_input); } { mrb_value h, p; stringify_address(generator->req->conn, generator->req->conn->callbacks->get_peername, mrb, &h, &p); if (!mrb_nil_p(h)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_REMOTE_ADDR), h); if (!mrb_nil_p(p)) mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_REMOTE_PORT), p); } { size_t i; for (i = 0; i != generator->req->env.size; i += 2) { h2o_iovec_t *name = generator->req->env.entries + i, *value = name + 1; mrb_hash_set(mrb, env, mrb_str_new(mrb, name->base, name->len), mrb_str_new(mrb, value->base, value->len)); } } { /* headers */ h2o_header_t *headers_sorted = alloca(sizeof(*headers_sorted) * generator->req->headers.size); memcpy(headers_sorted, generator->req->headers.entries, sizeof(*headers_sorted) * generator->req->headers.size); qsort(headers_sorted, generator->req->headers.size, sizeof(*headers_sorted), build_env_sort_header_cb); size_t i = 0; for (i = 0; i != generator->req->headers.size; ++i) { const h2o_header_t *header = headers_sorted + i; mrb_value n, v; if (h2o_iovec_is_token(header->name)) { const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, header->name); if (token == H2O_TOKEN_TRANSFER_ENCODING) continue; n = mrb_ary_entry(generator->ctx->constants, (mrb_int)(token - h2o__tokens)); } else { h2o_iovec_t vec = convert_header_name_to_env(&generator->req->pool, header->name->base, header->name->len); n = mrb_str_new(mrb, vec.base, vec.len); } v = mrb_str_new(mrb, header->value.base, header->value.len); while (i < generator->req->headers.size - 1) { if (!h2o_memis(headers_sorted[i + 1].name->base, headers_sorted[i + 1].name->len, header->name->base, header->name->len)) break; header = headers_sorted + ++i; v = mrb_str_append(mrb, v, mrb_ary_entry(generator->ctx->constants, header->name == &H2O_TOKEN_COOKIE->buf ? H2O_MRUBY_LIT_SEPARATOR_SEMICOLON : H2O_MRUBY_LIT_SEPARATOR_COMMA)); v = mrb_str_append(mrb, v, mrb_str_new(mrb, header->value.base, header->value.len)); } mrb_hash_set(mrb, env, n, v); } } /* rack.* */ /* TBD rack.version? */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_URL_SCHEME), mrb_str_new(mrb, generator->req->scheme->name.base, generator->req->scheme->name.len)); /* we are using shared-none architecture, and therefore declare ourselves as multiprocess */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_MULTITHREAD), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_MULTIPROCESS), mrb_true_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_RUN_ONCE), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_HIJACK_), mrb_false_value()); mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_RACK_ERRORS), mrb_gv_get(mrb, mrb_intern_lit(mrb, "$stderr"))); /* server name */ mrb_hash_set(mrb, env, mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE), mrb_ary_entry(generator->ctx->constants, H2O_MRUBY_LIT_SERVER_SOFTWARE_VALUE)); return env; } static int handle_response_header(h2o_mruby_context_t *handler_ctx, h2o_iovec_t name, h2o_iovec_t value, void *_req) { h2o_req_t *req = _req; const h2o_token_t *token; static const h2o_iovec_t fallthru_set_prefix = {H2O_STRLIT(FALLTHRU_SET_PREFIX)}; /* convert name to lowercase */ name = h2o_strdup(&req->pool, name.base, name.len); h2o_strtolower(name.base, name.len); if ((token = h2o_lookup_token(name.base, name.len)) != NULL) { if (token->proxy_should_drop) { /* skip */ } else if (token == H2O_TOKEN_CONTENT_LENGTH) { req->res.content_length = h2o_strtosize(value.base, value.len); } else { if (token == H2O_TOKEN_LINK) h2o_push_path_in_link_header(req, value.base, value.len); value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header(&req->pool, &req->res.headers, token, value.base, value.len); } } else if (name.len > fallthru_set_prefix.len && h2o_memis(name.base, fallthru_set_prefix.len, fallthru_set_prefix.base, fallthru_set_prefix.len)) { /* register environment variables (with the name converted to uppercase, and using `_`) */ size_t i; name.base += fallthru_set_prefix.len; name.len -= fallthru_set_prefix.len; for (i = 0; i != name.len; ++i) name.base[i] = name.base[i] == '-' ? '_' : h2o_toupper(name.base[i]); h2o_iovec_t *slot = h2o_req_getenv(req, name.base, name.len, 1); *slot = h2o_strdup(&req->pool, value.base, value.len); } else { value = h2o_strdup(&req->pool, value.base, value.len); h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, value.base, value.len); } return 0; } static void clear_rack_input(h2o_mruby_generator_t *generator) { if (!mrb_nil_p(generator->rack_input)) mrb_input_stream_set_data(generator->ctx->mrb, generator->rack_input, NULL, -1, 0, NULL, NULL); } static void on_generator_dispose(void *_generator) { h2o_mruby_generator_t *generator = _generator; clear_rack_input(generator); generator->req = NULL; if (generator->chunked != NULL) h2o_mruby_send_chunked_dispose(generator); } static int on_req(h2o_handler_t *_handler, h2o_req_t *req) { h2o_mruby_handler_t *handler = (void *)_handler; h2o_mruby_context_t *handler_ctx = h2o_context_get_handler_context(req->conn->ctx, &handler->super); int gc_arena = mrb_gc_arena_save(handler_ctx->mrb); h2o_mruby_generator_t *generator = h2o_mem_alloc_shared(&req->pool, sizeof(*generator), on_generator_dispose); generator->super.proceed = NULL; generator->super.stop = NULL; generator->req = req; generator->ctx = h2o_context_get_handler_context(req->conn->ctx, &handler->super); generator->rack_input = mrb_nil_value(); generator->chunked = NULL; mrb_value env = build_env(generator); int is_delegate = 0; h2o_mruby_run_fiber(generator, generator->ctx->proc, env, &is_delegate); mrb_gc_arena_restore(handler_ctx->mrb, gc_arena); if (is_delegate) return -1; return 0; } static void send_response(h2o_mruby_generator_t *generator, mrb_int status, mrb_value resp, int *is_delegate) { mrb_state *mrb = generator->ctx->mrb; mrb_value body; h2o_iovec_t content = {NULL}; /* set status */ generator->req->res.status = (int)status; /* set headers */ if (h2o_mruby_iterate_headers(generator->ctx, mrb_ary_entry(resp, 1), handle_response_header, generator->req) != 0) { assert(mrb->exc != NULL); goto GotException; } /* return without processing body, if status is fallthru */ if (generator->req->res.status == STATUS_FALLTHRU) { if (is_delegate != NULL) *is_delegate = 1; else h2o_delegate_request_deferred(generator->req, &generator->ctx->handler->super); return; } /* obtain body */ body = mrb_ary_entry(resp, 2); /* flatten body if possible */ if (mrb_array_p(body)) { mrb_int i, len = mrb_ary_len(mrb, body); /* calculate the length of the output, while at the same time converting the elements of the output array to string */ content.len = 0; for (i = 0; i != len; ++i) { mrb_value e = mrb_ary_entry(body, i); if (!mrb_string_p(e)) { e = h2o_mruby_to_str(mrb, e); if (mrb->exc != NULL) goto GotException; mrb_ary_set(mrb, body, i, e); } content.len += RSTRING_LEN(e); } /* allocate memory, and copy the response */ char *dst = content.base = h2o_mem_alloc_pool(&generator->req->pool, content.len); for (i = 0; i != len; ++i) { mrb_value e = mrb_ary_entry(body, i); assert(mrb_string_p(e)); memcpy(dst, RSTRING_PTR(e), RSTRING_LEN(e)); dst += RSTRING_LEN(e); } /* reset body to nil, now that we have read all data */ body = mrb_nil_value(); } /* use fiber in case we need to call #each */ if (!mrb_nil_p(body)) { h2o_start_response(generator->req, &generator->super); mrb_value receiver = h2o_mruby_send_chunked_init(generator, body); if (!mrb_nil_p(receiver)) h2o_mruby_run_fiber(generator, receiver, body, 0); return; } /* send the entire response immediately */ if (h2o_memis(generator->req->input.method.base, generator->req->input.method.len, H2O_STRLIT("HEAD"))) { h2o_start_response(generator->req, &generator->super); h2o_send(generator->req, NULL, 0, 1); } else { if (content.len < generator->req->res.content_length) { generator->req->res.content_length = content.len; } else { content.len = generator->req->res.content_length; } h2o_start_response(generator->req, &generator->super); h2o_send(generator->req, &content, 1, 1); } return; GotException: report_exception(generator->req, mrb); h2o_send_error_500(generator->req, "Internal Server Error", "Internal Server Error", 0); } void h2o_mruby_run_fiber(h2o_mruby_generator_t *generator, mrb_value receiver, mrb_value input, int *is_delegate) { mrb_state *mrb = generator->ctx->mrb; mrb_value output; mrb_int status; if (!mrb_obj_eq(mrb, generator->ctx->proc, receiver)) { mrb_gc_unregister(mrb, receiver); mrb_gc_protect(mrb, receiver); } h2o_mruby_current_generator = generator; while (1) { /* send input to fiber */ output = mrb_funcall_argv(mrb, receiver, generator->ctx->symbols.sym_call, 1, &input); if (mrb->exc != NULL) goto GotException; if (!mrb_array_p(output)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "rack app did not return an array")); goto GotException; } /* fetch status */ mrb_value v = mrb_to_int(mrb, mrb_ary_entry(output, 0)); if (mrb->exc != NULL) goto GotException; status = mrb_fixnum(v); /* take special action depending on the status code */ if (status < 0) { if (status == H2O_MRUBY_CALLBACK_ID_EXCEPTION_RAISED) { mrb->exc = mrb_obj_ptr(mrb_ary_entry(output, 1)); goto GotException; } receiver = mrb_ary_entry(output, 1); int next_action = H2O_MRUBY_CALLBACK_NEXT_ACTION_IMMEDIATE; mrb_value args = mrb_ary_entry(output, 2); if (mrb_array_p(args)) { switch (status) { case H2O_MRUBY_CALLBACK_ID_SEND_CHUNKED_EOS: input = h2o_mruby_send_chunked_eos_callback(generator, receiver, args, &next_action); break; case H2O_MRUBY_CALLBACK_ID_HTTP_JOIN_RESPONSE: input = h2o_mruby_http_join_response_callback(generator, receiver, args, &next_action); break; case H2O_MRUBY_CALLBACK_ID_HTTP_FETCH_CHUNK: input = h2o_mruby_http_fetch_chunk_callback(generator, receiver, args, &next_action); break; default: input = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpected callback id sent from rack app"); break; } } else { input = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "callback from rack app did not receive an array arg"); } switch (next_action) { case H2O_MRUBY_CALLBACK_NEXT_ACTION_STOP: return; case H2O_MRUBY_CALLBACK_NEXT_ACTION_ASYNC: goto Async; default: assert(next_action == H2O_MRUBY_CALLBACK_NEXT_ACTION_IMMEDIATE); break; } goto Next; } /* if no special actions were necessary, then the output is a rack response */ break; Next: mrb_gc_protect(mrb, receiver); mrb_gc_protect(mrb, input); } h2o_mruby_current_generator = NULL; if (!(100 <= status && status <= 999)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "status returned from rack app is out of range")); goto GotException; } /* send the response (unless req is already closed) */ if (generator->req == NULL) return; if (generator->req->_generator != NULL) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "unexpectedly received a rack response")); goto GotException; } send_response(generator, status, output, is_delegate); return; GotException: h2o_mruby_current_generator = NULL; if (generator->req != NULL) { report_exception(generator->req, mrb); if (generator->req->_generator == NULL) { h2o_send_error_500(generator->req, "Internal Server Error", "Internal Server Error", 0); } else { h2o_mruby_send_chunked_close(generator); } } return; Async: h2o_mruby_current_generator = NULL; if (!mrb_obj_eq(mrb, generator->ctx->proc, receiver)) mrb_gc_register(mrb, receiver); return; } h2o_mruby_handler_t *h2o_mruby_register(h2o_pathconf_t *pathconf, h2o_mruby_config_vars_t *vars) { h2o_mruby_handler_t *handler = (void *)h2o_create_handler(pathconf, sizeof(*handler)); handler->super.on_context_init = on_context_init; handler->super.on_context_dispose = on_context_dispose; handler->super.dispose = on_handler_dispose; handler->super.on_req = on_req; handler->config.source = h2o_strdup(NULL, vars->source.base, vars->source.len); if (vars->path != NULL) handler->config.path = h2o_strdup(NULL, vars->path, SIZE_MAX).base; return handler; } mrb_value h2o_mruby_each_to_array(h2o_mruby_context_t *handler_ctx, mrb_value src) { return mrb_funcall_argv(handler_ctx->mrb, mrb_ary_entry(handler_ctx->constants, H2O_MRUBY_PROC_EACH_TO_ARRAY), handler_ctx->symbols.sym_call, 1, &src); } static int iterate_headers_handle_pair(h2o_mruby_context_t *handler_ctx, mrb_value name, mrb_value value, int (*cb)(h2o_mruby_context_t *, h2o_iovec_t, h2o_iovec_t, void *), void *cb_data) { /* convert name and value to string */ name = h2o_mruby_to_str(handler_ctx->mrb, name); if (handler_ctx->mrb->exc != NULL) return -1; value = h2o_mruby_to_str(handler_ctx->mrb, value); if (handler_ctx->mrb->exc != NULL) return -1; /* call the callback, splitting the values with '\n' */ const char *vstart = RSTRING_PTR(value), *vend = vstart + RSTRING_LEN(value), *eol; while (1) { for (eol = vstart; eol != vend; ++eol) if (*eol == '\n') break; if (cb(handler_ctx, h2o_iovec_init(RSTRING_PTR(name), RSTRING_LEN(name)), h2o_iovec_init(vstart, eol - vstart), cb_data) != 0) return -1; if (eol == vend) break; vstart = eol + 1; } return 0; } int h2o_mruby_iterate_headers(h2o_mruby_context_t *handler_ctx, mrb_value headers, int (*cb)(h2o_mruby_context_t *, h2o_iovec_t, h2o_iovec_t, void *), void *cb_data) { mrb_state *mrb = handler_ctx->mrb; if (!(mrb_hash_p(headers) || mrb_array_p(headers))) { headers = h2o_mruby_each_to_array(handler_ctx, headers); if (mrb->exc != NULL) return -1; assert(mrb_array_p(headers)); } if (mrb_hash_p(headers)) { mrb_value keys = mrb_hash_keys(mrb, headers); mrb_int i, len = mrb_ary_len(mrb, keys); for (i = 0; i != len; ++i) { mrb_value k = mrb_ary_entry(keys, i); mrb_value v = mrb_hash_get(mrb, headers, k); if (iterate_headers_handle_pair(handler_ctx, k, v, cb, cb_data) != 0) return -1; } } else { assert(mrb_array_p(headers)); mrb_int i, len = mrb_ary_len(mrb, headers); for (i = 0; i != len; ++i) { mrb_value pair = mrb_ary_entry(headers, i); if (!mrb_array_p(pair)) { mrb->exc = mrb_obj_ptr(mrb_exc_new_str_lit(mrb, E_ARGUMENT_ERROR, "array element of headers MUST by an array")); return -1; } if (iterate_headers_handle_pair(handler_ctx, mrb_ary_entry(pair, 0), mrb_ary_entry(pair, 1), cb, cb_data) != 0) return -1; } } return 0; }
1
11,305
Can we expect adequate information emitted to the log in case either requiring preloads.rb or doing the requires in preloads.rb fails? If the answer is yes, I think we can merge this PR right away.
h2o-h2o
c
@@ -1,2 +1,3 @@ -class PagesController < ApplicationController -end +class PagesController < HighVoltage::PagesController + layout false +end
1
class PagesController < ApplicationController end
1
6,422
How about a `app/views/layouts/pages.html.erb` layout that contains the HTML head, body, wrappers and yield's the `new-topics` template into it? I think we might be able to delete the `app/controllers/pages_controller.rb` file at that point.
thoughtbot-upcase
rb
@@ -2218,7 +2218,9 @@ Document.prototype.isSelected = function isSelected(path) { if (this.$__.selected == null) { return true; } - + if (!path) { + return false; + } if (path === '_id') { return this.$__.selected._id !== 0; }
1
'use strict'; /*! * Module dependencies. */ const EventEmitter = require('events').EventEmitter; const InternalCache = require('./internal'); const MongooseError = require('./error/index'); const MixedSchema = require('./schema/mixed'); const ObjectExpectedError = require('./error/objectExpected'); const ObjectParameterError = require('./error/objectParameter'); const ParallelValidateError = require('./error/parallelValidate'); const Schema = require('./schema'); const StrictModeError = require('./error/strict'); const ValidationError = require('./error/validation'); const ValidatorError = require('./error/validator'); const VirtualType = require('./virtualtype'); const promiseOrCallback = require('./helpers/promiseOrCallback'); const cleanModifiedSubpaths = require('./helpers/document/cleanModifiedSubpaths'); const compile = require('./helpers/document/compile').compile; const defineKey = require('./helpers/document/compile').defineKey; const flatten = require('./helpers/common').flatten; const flattenObjectWithDottedPaths = require('./helpers/path/flattenObjectWithDottedPaths'); const get = require('./helpers/get'); const getEmbeddedDiscriminatorPath = require('./helpers/document/getEmbeddedDiscriminatorPath'); const getKeysInSchemaOrder = require('./helpers/schema/getKeysInSchemaOrder'); const handleSpreadDoc = require('./helpers/document/handleSpreadDoc'); const immediate = require('./helpers/immediate'); const isDefiningProjection = require('./helpers/projection/isDefiningProjection'); const isExclusive = require('./helpers/projection/isExclusive'); const inspect = require('util').inspect; const internalToObjectOptions = require('./options').internalToObjectOptions; const markArraySubdocsPopulated = require('./helpers/populate/markArraySubdocsPopulated'); const mpath = require('mpath'); const queryhelpers = require('./queryhelpers'); const utils = require('./utils'); const isPromise = require('./helpers/isPromise'); const clone = utils.clone; const deepEqual = utils.deepEqual; const isMongooseObject = utils.isMongooseObject; const arrayAtomicsBackupSymbol = require('./helpers/symbols').arrayAtomicsBackupSymbol; const arrayAtomicsSymbol = require('./helpers/symbols').arrayAtomicsSymbol; const documentArrayParent = require('./helpers/symbols').documentArrayParent; const documentIsModified = require('./helpers/symbols').documentIsModified; const documentModifiedPaths = require('./helpers/symbols').documentModifiedPaths; const documentSchemaSymbol = require('./helpers/symbols').documentSchemaSymbol; const getSymbol = require('./helpers/symbols').getSymbol; const populateModelSymbol = require('./helpers/symbols').populateModelSymbol; const scopeSymbol = require('./helpers/symbols').scopeSymbol; const schemaMixedSymbol = require('./schema/symbols').schemaMixedSymbol; const parentPaths = require('./helpers/path/parentPaths'); let DocumentArray; let MongooseArray; let Embedded; const specialProperties = utils.specialProperties; /** * The core Mongoose document constructor. You should not call this directly, * the Mongoose [Model constructor](./api.html#Model) calls this for you. * * @param {Object} obj the values to set * @param {Object} [fields] optional object containing the fields which were selected in the query returning this document and any populated paths data * @param {Object} [options] various configuration options for the document * @param {Boolean} [options.defaults=true] if `false`, skip applying default values to this document. * @inherits NodeJS EventEmitter http://nodejs.org/api/events.html#events_class_events_eventemitter * @event `init`: Emitted on a document after it has been retrieved from the db and fully hydrated by Mongoose. * @event `save`: Emitted when the document is successfully saved * @api private */ function Document(obj, fields, skipId, options) { if (typeof skipId === 'object' && skipId != null) { options = skipId; skipId = options.skipId; } options = Object.assign({}, options); // Support `browserDocument.js` syntax if (this.$__schema == null) { const _schema = utils.isObject(fields) && !fields.instanceOfSchema ? new Schema(fields) : fields; this.$__setSchema(_schema); fields = skipId; skipId = options; options = arguments[4] || {}; } this.$__ = new InternalCache; this.$__.emitter = new EventEmitter(); this.$isNew = 'isNew' in options ? options.isNew : true; if ('priorDoc' in options) { this.$__.priorDoc = options.priorDoc; } if (obj != null && typeof obj !== 'object') { throw new ObjectParameterError(obj, 'obj', 'Document'); } let defaults = true; if (options.defaults !== undefined) { this.$__.defaults = options.defaults; defaults = options.defaults; } const schema = this.$__schema; if (typeof fields === 'boolean' || fields === 'throw') { this.$__.strictMode = fields; fields = undefined; } else { this.$__.strictMode = schema.options.strict; if (fields !== undefined) { this.$__.selected = fields; } } const requiredPaths = schema.requiredPaths(true); for (const path of requiredPaths) { this.$__.activePaths.require(path); } this.$__.emitter.setMaxListeners(0); let exclude = null; // determine if this doc is a result of a query with // excluded fields if (utils.isPOJO(fields)) { exclude = isExclusive(fields); } const hasIncludedChildren = exclude === false && fields ? $__hasIncludedChildren(fields) : {}; if (this._doc == null) { this.$__buildDoc(obj, fields, skipId, exclude, hasIncludedChildren, false); // By default, defaults get applied **before** setting initial values // Re: gh-6155 if (defaults) { $__applyDefaults(this, fields, skipId, exclude, hasIncludedChildren, true, { isNew: this.$isNew }); } } if (obj) { // Skip set hooks if (this.$__original_set) { this.$__original_set(obj, undefined, true); } else { this.$set(obj, undefined, true); } if (obj instanceof Document) { this.$isNew = obj.$isNew; } } // Function defaults get applied **after** setting initial values so they // see the full doc rather than an empty one, unless they opt out. // Re: gh-3781, gh-6155 if (options.willInit && defaults) { EventEmitter.prototype.once.call(this, 'init', () => { $__applyDefaults(this, fields, skipId, exclude, hasIncludedChildren, false, options.skipDefaults, { isNew: this.$isNew }); }); } else if (defaults) { $__applyDefaults(this, fields, skipId, exclude, hasIncludedChildren, false, options.skipDefaults, { isNew: this.$isNew }); } this.$__._id = this._id; if (!this.$__.strictMode && obj) { const _this = this; const keys = Object.keys(this._doc); keys.forEach(function(key) { // Avoid methods, virtuals, existing fields, and `$` keys. The latter is to avoid overwriting // Mongoose internals. if (!(key in schema.tree) && !(key in schema.methods) && !(key in schema.virtuals) && !key.startsWith('$')) { defineKey({ prop: key, subprops: null, prototype: _this }); } }); } applyQueue(this); } Object.defineProperty(Document.prototype, 'isNew', { get: function() { return this.$isNew; }, set: function(value) { this.$isNew = value; } }); Object.defineProperty(Document.prototype, 'errors', { get: function() { return this.$errors; }, set: function(value) { this.$errors = value; } }); /*! * Document exposes the NodeJS event emitter API, so you can use * `on`, `once`, etc. */ utils.each( ['on', 'once', 'emit', 'listeners', 'removeListener', 'setMaxListeners', 'removeAllListeners', 'addListener'], function(emitterFn) { Document.prototype[emitterFn] = function() { return this.$__.emitter[emitterFn].apply(this.$__.emitter, arguments); }; Document.prototype[`$${emitterFn}`] = Document.prototype[emitterFn]; }); Document.prototype.constructor = Document; for (const i in EventEmitter.prototype) { Document[i] = EventEmitter.prototype[i]; } /** * The document's internal schema. * * @api private * @property schema * @memberOf Document * @instance */ Document.prototype.$__schema; /** * The document's schema. * * @api public * @property schema * @memberOf Document * @instance */ Document.prototype.schema; /** * Empty object that you can use for storing properties on the document. This * is handy for passing data to middleware without conflicting with Mongoose * internals. * * ####Example: * * schema.pre('save', function() { * // Mongoose will set `isNew` to `false` if `save()` succeeds * this.$locals.wasNew = this.isNew; * }); * * schema.post('save', function() { * // Prints true if `isNew` was set before `save()` * console.log(this.$locals.wasNew); * }); * * @api public * @property $locals * @memberOf Document * @instance */ Object.defineProperty(Document.prototype, '$locals', { configurable: false, enumerable: false, get: function() { if (this.$__.locals == null) { this.$__.locals = {}; } return this.$__.locals; }, set: function(v) { this.$__.locals = v; } }); /** * Boolean flag specifying if the document is new. * * @api public * @property $isNew * @memberOf Document * @instance */ Document.prototype.$isNew; /** * Boolean flag specifying if the document is new. * * @api public * @property isNew * @memberOf Document * @instance */ Document.prototype.isNew; /** * Set this property to add additional query filters when Mongoose saves this document and `isNew` is false. * * ####Example: * * // Make sure `save()` never updates a soft deleted document. * schema.pre('save', function() { * this.$where = { isDeleted: false }; * }); * * @api public * @property $where * @memberOf Document * @instance */ Object.defineProperty(Document.prototype, '$where', { configurable: false, enumerable: false, writable: true }); /** * The string version of this documents _id. * * ####Note: * * This getter exists on all documents by default. The getter can be disabled by setting the `id` [option](/docs/guide.html#id) of its `Schema` to false at construction time. * * new Schema({ name: String }, { id: false }); * * @api public * @see Schema options /docs/guide.html#options * @property id * @memberOf Document * @instance */ Document.prototype.id; /** * Hash containing current validation $errors. * * @api public * @property $errors * @memberOf Document * @instance */ Document.prototype.$errors; /** * Hash containing current validation errors. * * @api public * @property errors * @memberOf Document * @instance */ Document.prototype.errors; /** * A string containing the current operation that Mongoose is executing * on this document. May be `null`, `'save'`, `'validate'`, or `'remove'`. * * ####Example: * * const doc = new Model({ name: 'test' }); * doc.$op; // null * * const promise = doc.save(); * doc.$op; // 'save' * * await promise; * doc.$op; // null * * @api public * @property $op * @memberOf Document * @instance */ Object.defineProperty(Document.prototype, '$op', { get: function() { return this.$__.op || null; }, set: function(value) { this.$__.op = value; } }); /*! * ignore */ function $__hasIncludedChildren(fields) { const hasIncludedChildren = {}; const keys = Object.keys(fields); for (const key of keys) { const parts = key.split('.'); const c = []; for (const part of parts) { c.push(part); hasIncludedChildren[c.join('.')] = 1; } } return hasIncludedChildren; } /*! * ignore */ function $__applyDefaults(doc, fields, skipId, exclude, hasIncludedChildren, isBeforeSetters, pathsToSkip) { const paths = Object.keys(doc.$__schema.paths); const plen = paths.length; for (let i = 0; i < plen; ++i) { let def; let curPath = ''; const p = paths[i]; if (p === '_id' && skipId) { continue; } const type = doc.$__schema.paths[p]; const path = type.splitPath(); const len = path.length; let included = false; let doc_ = doc._doc; for (let j = 0; j < len; ++j) { if (doc_ == null) { break; } const piece = path[j]; curPath += (!curPath.length ? '' : '.') + piece; if (exclude === true) { if (curPath in fields) { break; } } else if (exclude === false && fields && !included) { if (curPath in fields) { included = true; } else if (!hasIncludedChildren[curPath]) { break; } } if (j === len - 1) { if (doc_[piece] !== void 0) { break; } if (typeof type.defaultValue === 'function') { if (!type.defaultValue.$runBeforeSetters && isBeforeSetters) { break; } if (type.defaultValue.$runBeforeSetters && !isBeforeSetters) { break; } } else if (!isBeforeSetters) { // Non-function defaults should always run **before** setters continue; } if (pathsToSkip && pathsToSkip[curPath]) { break; } if (fields && exclude !== null) { if (exclude === true) { // apply defaults to all non-excluded fields if (p in fields) { continue; } try { def = type.getDefault(doc, false); } catch (err) { doc.invalidate(p, err); break; } if (typeof def !== 'undefined') { doc_[piece] = def; doc.$__.activePaths.default(p); } } else if (included) { // selected field try { def = type.getDefault(doc, false); } catch (err) { doc.invalidate(p, err); break; } if (typeof def !== 'undefined') { doc_[piece] = def; doc.$__.activePaths.default(p); } } } else { try { def = type.getDefault(doc, false); } catch (err) { doc.invalidate(p, err); break; } if (typeof def !== 'undefined') { doc_[piece] = def; doc.$__.activePaths.default(p); } } } else { doc_ = doc_[piece]; } } } } /*! * ignore */ function $applyDefaultsToNested(val, path, doc) { if (val == null) { return; } flattenObjectWithDottedPaths(val); const paths = Object.keys(doc.$__schema.paths); const plen = paths.length; const pathPieces = path.indexOf('.') === -1 ? [path] : path.split('.'); for (let i = 0; i < plen; ++i) { let curPath = ''; const p = paths[i]; if (!p.startsWith(path + '.')) { continue; } const type = doc.$__schema.paths[p]; const pieces = type.splitPath().slice(pathPieces.length); const len = pieces.length; if (type.defaultValue === void 0) { continue; } let cur = val; for (let j = 0; j < len; ++j) { if (cur == null) { break; } const piece = pieces[j]; if (j === len - 1) { if (cur[piece] !== void 0) { break; } try { const def = type.getDefault(doc, false); if (def !== void 0) { cur[piece] = def; } } catch (err) { doc.invalidate(path + '.' + curPath, err); break; } break; } curPath += (!curPath.length ? '' : '.') + piece; cur[piece] = cur[piece] || {}; cur = cur[piece]; } } } /** * Builds the default doc structure * * @param {Object} obj * @param {Object} [fields] * @param {Boolean} [skipId] * @api private * @method $__buildDoc * @memberOf Document * @instance */ Document.prototype.$__buildDoc = function(obj, fields, skipId, exclude, hasIncludedChildren) { const doc = {}; const paths = Object.keys(this.$__schema.paths). // Don't build up any paths that are underneath a map, we don't know // what the keys will be filter(p => !p.includes('$*')); const plen = paths.length; let ii = 0; for (; ii < plen; ++ii) { const p = paths[ii]; if (p === '_id') { if (skipId) { continue; } if (obj && '_id' in obj) { continue; } } const path = this.$__schema.paths[p].splitPath(); const len = path.length; const last = len - 1; let curPath = ''; let doc_ = doc; let included = false; for (let i = 0; i < len; ++i) { const piece = path[i]; curPath += (!curPath.length ? '' : '.') + piece; // support excluding intermediary levels if (exclude === true) { if (curPath in fields) { break; } } else if (exclude === false && fields && !included) { if (curPath in fields) { included = true; } else if (!hasIncludedChildren[curPath]) { break; } } if (i < last) { doc_ = doc_[piece] || (doc_[piece] = {}); } } } this._doc = doc; }; /*! * Converts to POJO when you use the document for querying */ Document.prototype.toBSON = function() { return this.toObject(internalToObjectOptions); }; /** * Initializes the document without setters or marking anything modified. * * Called internally after a document is returned from mongodb. Normally, * you do **not** need to call this function on your own. * * This function triggers `init` [middleware](/docs/middleware.html). * Note that `init` hooks are [synchronous](/docs/middleware.html#synchronous). * * @param {Object} doc document returned by mongo * @api public * @memberOf Document * @instance */ Document.prototype.init = function(doc, opts, fn) { if (typeof opts === 'function') { fn = opts; opts = null; } this.$__init(doc, opts); if (fn) { fn(null, this); } return this; }; Document.prototype.$init = function() { return this.constructor.prototype.init.apply(this, arguments); }; /*! * ignore */ Document.prototype.$__init = function(doc, opts) { this.$isNew = false; opts = opts || {}; // handle docs with populated paths // If doc._id is not null or undefined if (doc._id != null && opts.populated && opts.populated.length) { const id = String(doc._id); for (const item of opts.populated) { if (item.isVirtual) { this.$populated(item.path, utils.getValue(item.path, doc), item); } else { this.$populated(item.path, item._docs[id], item); } if (item._childDocs == null) { continue; } for (const child of item._childDocs) { if (child == null || child.$__ == null) { continue; } child.$__.parent = this; } item._childDocs = []; } } init(this, doc, this._doc, opts); markArraySubdocsPopulated(this, opts.populated); this.$emit('init', this); this.constructor.emit('init', this); this.$__._id = this._id; return this; }; /*! * Init helper. * * @param {Object} self document instance * @param {Object} obj raw mongodb doc * @param {Object} doc object we are initializing * @api private */ function init(self, obj, doc, opts, prefix) { prefix = prefix || ''; const keys = Object.keys(obj); const len = keys.length; let schema; let path; let i; let index = 0; const strict = self.$__.strictMode; while (index < len) { _init(index++); } function _init(index) { i = keys[index]; path = prefix + i; schema = self.$__schema.path(path); // Should still work if not a model-level discriminator, but should not be // necessary. This is *only* to catch the case where we queried using the // base model and the discriminated model has a projection if (self.$__schema.$isRootDiscriminator && !self.$__isSelected(path)) { return; } if (!schema && utils.isPOJO(obj[i])) { // assume nested object if (!doc[i]) { doc[i] = {}; } init(self, obj[i], doc[i], opts, path + '.'); } else if (!schema) { doc[i] = obj[i]; if (!strict && !prefix) { // Set top-level properties that aren't in the schema if strict is false self[i] = obj[i]; } } else { // Retain order when overwriting defaults if (doc.hasOwnProperty(i) && obj[i] !== void 0) { delete doc[i]; } if (obj[i] === null) { doc[i] = schema._castNullish(null); } else if (obj[i] !== undefined) { const intCache = obj[i].$__ || {}; const wasPopulated = intCache.wasPopulated || null; if (schema && !wasPopulated) { try { doc[i] = schema.cast(obj[i], self, true); } catch (e) { self.invalidate(e.path, new ValidatorError({ path: e.path, message: e.message, type: 'cast', value: e.value, reason: e })); } } else { doc[i] = obj[i]; } } // mark as hydrated if (!self.$isModified(path)) { self.$__.activePaths.init(path); } } } } /** * Sends an update command with this document `_id` as the query selector. * * ####Example: * * weirdCar.update({$inc: {wheels:1}}, { w: 1 }, callback); * * ####Valid options: * * - same as in [Model.update](#model_Model.update) * * @see Model.update #model_Model.update * @param {Object} doc * @param {Object} options * @param {Function} callback * @return {Query} * @api public * @memberOf Document * @instance */ Document.prototype.update = function update() { const args = utils.args(arguments); args.unshift({ _id: this._id }); const query = this.constructor.update.apply(this.constructor, args); if (this.$session() != null) { if (!('session' in query.options)) { query.options.session = this.$session(); } } return query; }; /** * Sends an updateOne command with this document `_id` as the query selector. * * ####Example: * * weirdCar.updateOne({$inc: {wheels:1}}, { w: 1 }, callback); * * ####Valid options: * * - same as in [Model.updateOne](#model_Model.updateOne) * * @see Model.updateOne #model_Model.updateOne * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](/docs/api.html#query_Query-lean) and the [Mongoose lean tutorial](/docs/tutorials/lean.html). * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Note that this allows you to overwrite timestamps. Does nothing if schema-level timestamps are not set. * @param {Function} callback * @return {Query} * @api public * @memberOf Document * @instance */ Document.prototype.updateOne = function updateOne(doc, options, callback) { const query = this.constructor.updateOne({ _id: this._id }, doc, options); query.pre(cb => { this.constructor._middleware.execPre('updateOne', this, [this], cb); }); query.post(cb => { this.constructor._middleware.execPost('updateOne', this, [this], {}, cb); }); if (this.$session() != null) { if (!('session' in query.options)) { query.options.session = this.$session(); } } if (callback != null) { return query.exec(callback); } return query; }; /** * Sends a replaceOne command with this document `_id` as the query selector. * * ####Valid options: * * - same as in [Model.replaceOne](https://mongoosejs.com/docs/api/model.html#model_Model.replaceOne) * * @see Model.replaceOne #model_Model.replaceOne * @param {Object} doc * @param {Object} options * @param {Function} callback * @return {Query} * @api public * @memberOf Document * @instance */ Document.prototype.replaceOne = function replaceOne() { const args = utils.args(arguments); args.unshift({ _id: this._id }); return this.constructor.replaceOne.apply(this.constructor, args); }; /** * Getter/setter around the session associated with this document. Used to * automatically set `session` if you `save()` a doc that you got from a * query with an associated session. * * ####Example: * * const session = MyModel.startSession(); * const doc = await MyModel.findOne().session(session); * doc.$session() === session; // true * doc.$session(null); * doc.$session() === null; // true * * If this is a top-level document, setting the session propagates to all child * docs. * * @param {ClientSession} [session] overwrite the current session * @return {ClientSession} * @method $session * @api public * @memberOf Document */ Document.prototype.$session = function $session(session) { if (arguments.length === 0) { if (this.$__.session != null && this.$__.session.hasEnded) { this.$__.session = null; return null; } return this.$__.session; } if (session != null && session.hasEnded) { throw new MongooseError('Cannot set a document\'s session to a session that has ended. Make sure you haven\'t ' + 'called `endSession()` on the session you are passing to `$session()`.'); } if (session == null && this.$__.session == null) { return; } this.$__.session = session; if (!this.$isSubdocument) { const subdocs = this.$getAllSubdocs(); for (const child of subdocs) { child.$session(session); } } return session; }; /** * Overwrite all values in this document with the values of `obj`, except * for immutable properties. Behaves similarly to `set()`, except for it * unsets all properties that aren't in `obj`. * * @param {Object} obj the object to overwrite this document with * @method overwrite * @name overwrite * @memberOf Document * @instance * @api public */ Document.prototype.overwrite = function overwrite(obj) { const keys = Array.from(new Set(Object.keys(this._doc).concat(Object.keys(obj)))); for (const key of keys) { if (key === '_id') { continue; } // Explicitly skip version key if (this.$__schema.options.versionKey && key === this.$__schema.options.versionKey) { continue; } if (this.$__schema.options.discriminatorKey && key === this.$__schema.options.discriminatorKey) { continue; } this.$set(key, obj[key]); } return this; }; /** * Alias for `set()`, used internally to avoid conflicts * * @param {String|Object} path path or object of key/vals to set * @param {Any} val the value to set * @param {Schema|String|Number|Buffer|*} [type] optionally specify a type for "on-the-fly" attributes * @param {Object} [options] optionally specify options that modify the behavior of the set * @method $set * @name $set * @memberOf Document * @instance * @api public */ Document.prototype.$set = function $set(path, val, type, options) { if (utils.isPOJO(type)) { options = type; type = undefined; } options = options || {}; const merge = options.merge; const adhoc = type && type !== true; const constructing = type === true; const typeKey = this.$__schema.options.typeKey; let adhocs; let keys; let i = 0; let pathtype; let key; let prefix; const strict = 'strict' in options ? options.strict : this.$__.strictMode; if (adhoc) { adhocs = this.$__.adhocPaths || (this.$__.adhocPaths = {}); adhocs[path] = this.$__schema.interpretAsType(path, type, this.$__schema.options); } if (path == null) { [path, val] = [val, path]; } else if (typeof path !== 'string') { // new Document({ key: val }) if (path instanceof Document) { if (path.$__isNested) { path = path.toObject(); } else { path = path._doc; } } if (path == null) { [path, val] = [val, path]; } prefix = val ? val + '.' : ''; keys = getKeysInSchemaOrder(this.$__schema, path); const len = keys.length; // `_skipMinimizeTopLevel` is because we may have deleted the top-level // nested key to ensure key order. const _skipMinimizeTopLevel = get(options, '_skipMinimizeTopLevel', false); if (len === 0 && _skipMinimizeTopLevel) { delete options._skipMinimizeTopLevel; if (val) { this.$set(val, {}); } return this; } for (let i = 0; i < len; ++i) { key = keys[i]; const pathName = prefix + key; pathtype = this.$__schema.pathType(pathName); const valForKey = path[key]; // On initial set, delete any nested keys if we're going to overwrite // them to ensure we keep the user's key order. if (type === true && !prefix && path[key] != null && pathtype === 'nested' && this._doc[key] != null) { delete this._doc[key]; // Make sure we set `{}` back even if we minimize re: gh-8565 options = Object.assign({}, options, { _skipMinimizeTopLevel: true }); } else { // Make sure we set `{_skipMinimizeTopLevel: false}` if don't have overwrite: gh-10441 options = Object.assign({}, options, { _skipMinimizeTopLevel: false }); } if (utils.isNonBuiltinObject(valForKey) && pathtype === 'nested') { $applyDefaultsToNested(path[key], prefix + key, this); this.$set(prefix + key, path[key], constructing, Object.assign({}, options, { _skipMarkModified: true })); continue; } else if (strict) { // Don't overwrite defaults with undefined keys (gh-3981) (gh-9039) if (constructing && path[key] === void 0 && this.$get(pathName) !== void 0) { continue; } if (pathtype === 'adhocOrUndefined') { pathtype = getEmbeddedDiscriminatorPath(this, pathName, { typeOnly: true }); } if (pathtype === 'real' || pathtype === 'virtual') { const p = path[key]; this.$set(prefix + key, p, constructing, options); } else if (pathtype === 'nested' && path[key] instanceof Document) { this.$set(prefix + key, path[key].toObject({ transform: false }), constructing, options); } else if (strict === 'throw') { if (pathtype === 'nested') { throw new ObjectExpectedError(key, path[key]); } else { throw new StrictModeError(key); } } } else if (path[key] !== void 0) { this.$set(prefix + key, path[key], constructing, options); } } // Ensure all properties are in correct order by deleting and recreating every property. for (const key of Object.keys(this.$__schema.tree)) { if (this._doc.hasOwnProperty(key)) { const val = this._doc[key]; delete this._doc[key]; this._doc[key] = val; } } return this; } let pathType = this.$__schema.pathType(path); if (pathType === 'adhocOrUndefined') { pathType = getEmbeddedDiscriminatorPath(this, path, { typeOnly: true }); } // Assume this is a Mongoose document that was copied into a POJO using // `Object.assign()` or `{...doc}` val = handleSpreadDoc(val); // if this doc is being constructed we should not trigger getters const priorVal = (() => { if (this.$__.priorDoc != null) { return this.$__.priorDoc.$__getValue(path); } if (constructing) { return void 0; } return this.$__getValue(path); })(); if (pathType === 'nested' && val) { if (typeof val === 'object' && val != null) { if (val.$__ != null) { val = val.toObject(internalToObjectOptions); } if (val == null) { this.invalidate(path, new MongooseError.CastError('Object', val, path)); return this; } const hasInitialVal = this.$__.savedState != null && this.$__.savedState.hasOwnProperty(path); if (this.$__.savedState != null && !this.$isNew && !this.$__.savedState.hasOwnProperty(path)) { const initialVal = this.$__getValue(path); this.$__.savedState[path] = initialVal; const keys = Object.keys(initialVal || {}); for (const key of keys) { this.$__.savedState[path + '.' + key] = initialVal[key]; } } if (!merge) { this.$__setValue(path, null); cleanModifiedSubpaths(this, path); } else { return this.$set(val, path, constructing); } const keys = getKeysInSchemaOrder(this.$__schema, val, path); this.$__setValue(path, {}); for (const key of keys) { this.$set(path + '.' + key, val[key], constructing, options); } if (priorVal != null && utils.deepEqual(hasInitialVal ? this.$__.savedState[path] : priorVal, val)) { this.unmarkModified(path); } else { this.markModified(path); } cleanModifiedSubpaths(this, path, { skipDocArrays: true }); return this; } this.invalidate(path, new MongooseError.CastError('Object', val, path)); return this; } let schema; const parts = path.indexOf('.') === -1 ? [path] : path.split('.'); // Might need to change path for top-level alias if (typeof this.$__schema.aliases[parts[0]] == 'string') { parts[0] = this.$__schema.aliases[parts[0]]; } if (pathType === 'adhocOrUndefined' && strict) { // check for roots that are Mixed types let mixed; for (i = 0; i < parts.length; ++i) { const subpath = parts.slice(0, i + 1).join('.'); // If path is underneath a virtual, bypass everything and just set it. if (i + 1 < parts.length && this.$__schema.pathType(subpath) === 'virtual') { mpath.set(path, val, this); return this; } schema = this.$__schema.path(subpath); if (schema == null) { continue; } if (schema instanceof MixedSchema) { // allow changes to sub paths of mixed types mixed = true; break; } } if (schema == null) { // Check for embedded discriminators schema = getEmbeddedDiscriminatorPath(this, path); } if (!mixed && !schema) { if (strict === 'throw') { throw new StrictModeError(path); } return this; } } else if (pathType === 'virtual') { schema = this.$__schema.virtualpath(path); schema.applySetters(val, this); return this; } else { schema = this.$__path(path); } // gh-4578, if setting a deeply nested path that doesn't exist yet, create it let cur = this._doc; let curPath = ''; for (i = 0; i < parts.length - 1; ++i) { cur = cur[parts[i]]; curPath += (curPath.length > 0 ? '.' : '') + parts[i]; if (!cur) { this.$set(curPath, {}); // Hack re: gh-5800. If nested field is not selected, it probably exists // so `MongoServerError: cannot use the part (nested of nested.num) to // traverse the element ({nested: null})` is not likely. If user gets // that error, its their fault for now. We should reconsider disallowing // modifying not selected paths for 6.x if (!this.$__isSelected(curPath)) { this.unmarkModified(curPath); } cur = this.$__getValue(curPath); } } let pathToMark; // When using the $set operator the path to the field must already exist. // Else mongodb throws: "LEFT_SUBFIELD only supports Object" if (parts.length <= 1) { pathToMark = path; } else { for (i = 0; i < parts.length; ++i) { const subpath = parts.slice(0, i + 1).join('.'); if (this.$get(subpath, null, { getters: false }) === null) { pathToMark = subpath; break; } } if (!pathToMark) { pathToMark = path; } } if (!schema) { this.$__set(pathToMark, path, options, constructing, parts, schema, val, priorVal); return this; } // If overwriting a subdocument path, make sure to clear out // any errors _before_ setting, so new errors that happen // get persisted. Re: #9080 if (schema.$isSingleNested || schema.$isMongooseArray) { _markValidSubpaths(this, path); } if (schema.$isSingleNested && val != null && merge) { if (val instanceof Document) { val = val.toObject({ virtuals: false, transform: false }); } const keys = Object.keys(val); for (const key of keys) { this.$set(path + '.' + key, val[key], constructing, options); } return this; } let shouldSet = true; try { // If the user is trying to set a ref path to a document with // the correct model name, treat it as populated const refMatches = (() => { if (schema.options == null) { return false; } if (!(val instanceof Document)) { return false; } const model = val.constructor; // Check ref const ref = schema.options.ref; if (ref != null && (ref === model.modelName || ref === model.baseModelName)) { return true; } // Check refPath const refPath = schema.options.refPath; if (refPath == null) { return false; } const modelName = val.get(refPath); return modelName === model.modelName || modelName === model.baseModelName; })(); let didPopulate = false; if (refMatches && val instanceof Document) { this.$populated(path, val._id, { [populateModelSymbol]: val.constructor }); val.$__.wasPopulated = true; didPopulate = true; } let popOpts; if (schema.options && Array.isArray(schema.options[typeKey]) && schema.options[typeKey].length && schema.options[typeKey][0].ref && _isManuallyPopulatedArray(val, schema.options[typeKey][0].ref)) { popOpts = { [populateModelSymbol]: val[0].constructor }; this.$populated(path, val.map(function(v) { return v._id; }), popOpts); for (const doc of val) { doc.$__.wasPopulated = true; } didPopulate = true; } if (this.$__schema.singleNestedPaths[path] == null && (!refMatches || !schema.$isSingleNested || !val.$__)) { // If this path is underneath a single nested schema, we'll call the setter // later in `$__set()` because we don't take `_doc` when we iterate through // a single nested doc. That's to make sure we get the correct context. // Otherwise we would double-call the setter, see gh-7196. val = schema.applySetters(val, this, false, priorVal); } if (schema.$isMongooseDocumentArray && Array.isArray(val) && val.length > 0 && val[0] != null && val[0].$__ != null && val[0].$__.populated != null) { const populatedPaths = Object.keys(val[0].$__.populated); for (const populatedPath of populatedPaths) { this.$populated(path + '.' + populatedPath, val.map(v => v.$populated(populatedPath)), val[0].$__.populated[populatedPath].options); } didPopulate = true; } if (!didPopulate && this.$__.populated) { // If this array partially contains populated documents, convert them // all to ObjectIds re: #8443 if (Array.isArray(val) && this.$__.populated[path]) { for (let i = 0; i < val.length; ++i) { if (val[i] instanceof Document) { val.set(i, val[i]._id, true); } } } delete this.$__.populated[path]; } if (schema.$isSingleNested && val != null) { _checkImmutableSubpaths(val, schema, priorVal); } this.$markValid(path); } catch (e) { if (e instanceof MongooseError.StrictModeError && e.isImmutableError) { this.invalidate(path, e); } else if (e instanceof MongooseError.CastError) { this.invalidate(e.path, e); if (e.$originalErrorPath) { this.invalidate(path, new MongooseError.CastError(schema.instance, val, path, e.$originalErrorPath)); } } else { this.invalidate(path, new MongooseError.CastError(schema.instance, val, path, e)); } shouldSet = false; } if (shouldSet) { const doc = this.$isSubdocument ? this.ownerDocument() : this; const savedState = doc.$__.savedState; const savedStatePath = this.$isSubdocument ? this.$__.fullPath + '.' + path : path; if (savedState != null) { const firstDot = savedStatePath.indexOf('.'); const topLevelPath = firstDot === -1 ? savedStatePath : savedStatePath.slice(0, firstDot); if (!savedState.hasOwnProperty(topLevelPath)) { savedState[topLevelPath] = utils.clone(doc.$__getValue(topLevelPath)); } } this.$__set(pathToMark, path, options, constructing, parts, schema, val, priorVal); if (savedState != null && savedState.hasOwnProperty(savedStatePath) && utils.deepEqual(val, savedState[savedStatePath])) { this.unmarkModified(path); } } if (schema.$isSingleNested && (this.isDirectModified(path) || val == null)) { cleanModifiedSubpaths(this, path); } return this; }; /*! * ignore */ function _isManuallyPopulatedArray(val, ref) { if (!Array.isArray(val)) { return false; } if (val.length === 0) { return false; } for (const el of val) { if (!(el instanceof Document)) { return false; } const modelName = el.constructor.modelName; if (modelName == null) { return false; } if (el.constructor.modelName != ref && el.constructor.baseModelName != ref) { return false; } } return true; } /** * Sets the value of a path, or many paths. * * ####Example: * * // path, value * doc.set(path, value) * * // object * doc.set({ * path : value * , path2 : { * path : value * } * }) * * // on-the-fly cast to number * doc.set(path, value, Number) * * // on-the-fly cast to string * doc.set(path, value, String) * * // changing strict mode behavior * doc.set(path, value, { strict: false }); * * @param {String|Object} path path or object of key/vals to set * @param {Any} val the value to set * @param {Schema|String|Number|Buffer|*} [type] optionally specify a type for "on-the-fly" attributes * @param {Object} [options] optionally specify options that modify the behavior of the set * @api public * @method set * @memberOf Document * @instance */ Document.prototype.set = Document.prototype.$set; /** * Determine if we should mark this change as modified. * * @return {Boolean} * @api private * @method $__shouldModify * @memberOf Document * @instance */ Document.prototype.$__shouldModify = function(pathToMark, path, options, constructing, parts, schema, val, priorVal) { if (options._skipMarkModified) { return false; } if (this.$isNew) { return true; } // Re: the note about gh-7196, `val` is the raw value without casting or // setters if the full path is under a single nested subdoc because we don't // want to double run setters. So don't set it as modified. See gh-7264. if (this.$__schema.singleNestedPaths[path] != null) { return false; } if (val === void 0 && !this.$__isSelected(path)) { // when a path is not selected in a query, its initial // value will be undefined. return true; } if (val === void 0 && path in this.$__.activePaths.states.default) { // we're just unsetting the default value which was never saved return false; } // gh-3992: if setting a populated field to a doc, don't mark modified // if they have the same _id if (this.$populated(path) && val instanceof Document && deepEqual(val._id, priorVal)) { return false; } if (!deepEqual(val, priorVal || utils.getValue(path, this))) { return true; } if (!constructing && val !== null && val !== undefined && path in this.$__.activePaths.states.default && deepEqual(val, schema.getDefault(this, constructing))) { // a path with a default was $unset on the server // and the user is setting it to the same value again return true; } return false; }; /** * Handles the actual setting of the value and marking the path modified if appropriate. * * @api private * @method $__set * @memberOf Document * @instance */ Document.prototype.$__set = function(pathToMark, path, options, constructing, parts, schema, val, priorVal) { Embedded = Embedded || require('./types/ArraySubdocument'); const shouldModify = this.$__shouldModify(pathToMark, path, options, constructing, parts, schema, val, priorVal); const _this = this; if (shouldModify) { this.markModified(pathToMark); // handle directly setting arrays (gh-1126) MongooseArray || (MongooseArray = require('./types/array')); if (val && val.isMongooseArray) { val._registerAtomic('$set', val); // Update embedded document parent references (gh-5189) if (val.isMongooseDocumentArray) { val.forEach(function(item) { item && item.__parentArray && (item.__parentArray = val); }); } // Small hack for gh-1638: if we're overwriting the entire array, ignore // paths that were modified before the array overwrite this.$__.activePaths.forEach(function(modifiedPath) { if (modifiedPath.startsWith(path + '.')) { _this.$__.activePaths.ignore(modifiedPath); } }); } } else if (Array.isArray(val) && val.isMongooseArray && Array.isArray(priorVal) && priorVal.isMongooseArray) { val[arrayAtomicsSymbol] = priorVal[arrayAtomicsSymbol]; val[arrayAtomicsBackupSymbol] = priorVal[arrayAtomicsBackupSymbol]; } let obj = this._doc; let i = 0; const l = parts.length; let cur = ''; for (; i < l; i++) { const next = i + 1; const last = next === l; cur += (cur ? '.' + parts[i] : parts[i]); if (specialProperties.has(parts[i])) { return; } if (last) { if (obj instanceof Map) { obj.set(parts[i], val); } else { obj[parts[i]] = val; } } else { if (utils.isPOJO(obj[parts[i]])) { obj = obj[parts[i]]; } else if (obj[parts[i]] && obj[parts[i]] instanceof Embedded) { obj = obj[parts[i]]; } else if (obj[parts[i]] && obj[parts[i]].$isSingleNested) { obj = obj[parts[i]]; } else if (obj[parts[i]] && Array.isArray(obj[parts[i]])) { obj = obj[parts[i]]; } else { obj[parts[i]] = obj[parts[i]] || {}; obj = obj[parts[i]]; } } } }; /** * Gets a raw value from a path (no getters) * * @param {String} path * @api private */ Document.prototype.$__getValue = function(path) { return utils.getValue(path, this._doc); }; /** * Sets a raw value for a path (no casting, setters, transformations) * * @param {String} path * @param {Object} value * @api private */ Document.prototype.$__setValue = function(path, val) { utils.setValue(path, val, this._doc); return this; }; /** * Returns the value of a path. * * ####Example * * // path * doc.get('age') // 47 * * // dynamic casting to a string * doc.get('age', String) // "47" * * @param {String} path * @param {Schema|String|Number|Buffer|*} [type] optionally specify a type for on-the-fly attributes * @param {Object} [options] * @param {Boolean} [options.virtuals=false] Apply virtuals before getting this path * @param {Boolean} [options.getters=true] If false, skip applying getters and just get the raw value * @api public */ Document.prototype.get = function(path, type, options) { let adhoc; options = options || {}; if (type) { adhoc = this.$__schema.interpretAsType(path, type, this.$__schema.options); } let schema = this.$__path(path); if (schema == null) { schema = this.$__schema.virtualpath(path); } if (schema instanceof MixedSchema) { const virtual = this.$__schema.virtualpath(path); if (virtual != null) { schema = virtual; } } const pieces = path.indexOf('.') === -1 ? [path] : path.split('.'); let obj = this._doc; if (schema instanceof VirtualType) { return schema.applyGetters(void 0, this); } // Might need to change path for top-level alias if (typeof this.$__schema.aliases[pieces[0]] == 'string') { pieces[0] = this.$__schema.aliases[pieces[0]]; } for (let i = 0, l = pieces.length; i < l; i++) { if (obj && obj._doc) { obj = obj._doc; } if (obj == null) { obj = void 0; } else if (obj instanceof Map) { obj = obj.get(pieces[i], { getters: false }); } else if (i === l - 1) { obj = utils.getValue(pieces[i], obj); } else { obj = obj[pieces[i]]; } } if (adhoc) { obj = adhoc.cast(obj); } if (schema != null && options.getters !== false) { obj = schema.applyGetters(obj, this); } else if (this.$__schema.nested[path] && options.virtuals) { // Might need to apply virtuals if this is a nested path return applyVirtuals(this, utils.clone(obj) || {}, { path: path }); } return obj; }; /*! * ignore */ Document.prototype[getSymbol] = Document.prototype.get; Document.prototype.$get = Document.prototype.get; /** * Returns the schematype for the given `path`. * * @param {String} path * @api private * @method $__path * @memberOf Document * @instance */ Document.prototype.$__path = function(path) { const adhocs = this.$__.adhocPaths; const adhocType = adhocs && adhocs.hasOwnProperty(path) ? adhocs[path] : null; if (adhocType) { return adhocType; } return this.$__schema.path(path); }; /** * Marks the path as having pending changes to write to the db. * * _Very helpful when using [Mixed](https://mongoosejs.com/docs/schematypes.html#mixed) types._ * * ####Example: * * doc.mixed.type = 'changed'; * doc.markModified('mixed.type'); * doc.save() // changes to mixed.type are now persisted * * @param {String} path the path to mark modified * @param {Document} [scope] the scope to run validators with * @api public */ Document.prototype.markModified = function(path, scope) { this.$__.activePaths.modify(path); if (scope != null && !this.$isSubdocument) { this.$__.pathsToScopes = this.$__pathsToScopes || {}; this.$__.pathsToScopes[path] = scope; } }; /** * Clears the modified state on the specified path. * * ####Example: * * doc.foo = 'bar'; * doc.unmarkModified('foo'); * doc.save(); // changes to foo will not be persisted * * @param {String} path the path to unmark modified * @api public */ Document.prototype.unmarkModified = function(path) { this.$__.activePaths.init(path); if (this.$__.pathsToScopes != null) { delete this.$__.pathsToScopes[path]; } }; /** * Don't run validation on this path or persist changes to this path. * * ####Example: * * doc.foo = null; * doc.$ignore('foo'); * doc.save(); // changes to foo will not be persisted and validators won't be run * * @memberOf Document * @instance * @method $ignore * @param {String} path the path to ignore * @api public */ Document.prototype.$ignore = function(path) { this.$__.activePaths.ignore(path); }; /** * Returns the list of paths that have been directly modified. A direct * modified path is a path that you explicitly set, whether via `doc.foo = 'bar'`, * `Object.assign(doc, { foo: 'bar' })`, or `doc.set('foo', 'bar')`. * * A path `a` may be in `modifiedPaths()` but not in `directModifiedPaths()` * because a child of `a` was directly modified. * * ####Example * const schema = new Schema({ foo: String, nested: { bar: String } }); * const Model = mongoose.model('Test', schema); * await Model.create({ foo: 'original', nested: { bar: 'original' } }); * * const doc = await Model.findOne(); * doc.nested.bar = 'modified'; * doc.directModifiedPaths(); // ['nested.bar'] * doc.modifiedPaths(); // ['nested', 'nested.bar'] * * @return {Array} * @api public */ Document.prototype.directModifiedPaths = function() { return Object.keys(this.$__.activePaths.states.modify); }; /** * Returns true if the given path is nullish or only contains empty objects. * Useful for determining whether this subdoc will get stripped out by the * [minimize option](/docs/guide.html#minimize). * * ####Example: * const schema = new Schema({ nested: { foo: String } }); * const Model = mongoose.model('Test', schema); * const doc = new Model({}); * doc.$isEmpty('nested'); // true * doc.nested.$isEmpty(); // true * * doc.nested.foo = 'bar'; * doc.$isEmpty('nested'); // false * doc.nested.$isEmpty(); // false * * @memberOf Document * @instance * @api public * @method $isEmpty * @return {Boolean} */ Document.prototype.$isEmpty = function(path) { const isEmptyOptions = { minimize: true, virtuals: false, getters: false, transform: false }; if (arguments.length > 0) { const v = this.$get(path); if (v == null) { return true; } if (typeof v !== 'object') { return false; } if (utils.isPOJO(v)) { return _isEmpty(v); } return Object.keys(v.toObject(isEmptyOptions)).length === 0; } return Object.keys(this.toObject(isEmptyOptions)).length === 0; }; function _isEmpty(v) { if (v == null) { return true; } if (typeof v !== 'object' || Array.isArray(v)) { return false; } for (const key of Object.keys(v)) { if (!_isEmpty(v[key])) { return false; } } return true; } /** * Returns the list of paths that have been modified. * * @param {Object} [options] * @param {Boolean} [options.includeChildren=false] if true, returns children of modified paths as well. For example, if false, the list of modified paths for `doc.colors = { primary: 'blue' };` will **not** contain `colors.primary`. If true, `modifiedPaths()` will return an array that contains `colors.primary`. * @return {Array} * @api public */ Document.prototype.modifiedPaths = function(options) { options = options || {}; const directModifiedPaths = Object.keys(this.$__.activePaths.states.modify); const _this = this; return directModifiedPaths.reduce(function(list, path) { const parts = path.split('.'); list = list.concat(parts.reduce(function(chains, part, i) { return chains.concat(parts.slice(0, i).concat(part).join('.')); }, []).filter(function(chain) { return (list.indexOf(chain) === -1); })); if (!options.includeChildren) { return list; } let cur = _this.$get(path); if (cur != null && typeof cur === 'object') { if (cur._doc) { cur = cur._doc; } if (Array.isArray(cur)) { const len = cur.length; for (let i = 0; i < len; ++i) { if (list.indexOf(path + '.' + i) === -1) { list.push(path + '.' + i); if (cur[i] != null && cur[i].$__) { const modified = cur[i].modifiedPaths(); for (const childPath of modified) { list.push(path + '.' + i + '.' + childPath); } } } } } else { Object.keys(cur). filter(function(key) { return list.indexOf(path + '.' + key) === -1; }). forEach(function(key) { list.push(path + '.' + key); }); } } return list; }, []); }; Document.prototype[documentModifiedPaths] = Document.prototype.modifiedPaths; /** * Returns true if any of the given paths is modified, else false. If no arguments, returns `true` if any path * in this document is modified. * * If `path` is given, checks if a path or any full path containing `path` as part of its path chain has been modified. * * ####Example * * doc.set('documents.0.title', 'changed'); * doc.isModified() // true * doc.isModified('documents') // true * doc.isModified('documents.0.title') // true * doc.isModified('documents otherProp') // true * doc.isDirectModified('documents') // false * * @param {String} [path] optional * @return {Boolean} * @api public */ Document.prototype.isModified = function(paths, modifiedPaths) { if (paths) { if (!Array.isArray(paths)) { paths = paths.split(' '); } const modified = modifiedPaths || this[documentModifiedPaths](); const directModifiedPaths = Object.keys(this.$__.activePaths.states.modify); const isModifiedChild = paths.some(function(path) { return !!~modified.indexOf(path); }); return isModifiedChild || paths.some(function(path) { return directModifiedPaths.some(function(mod) { return mod === path || path.startsWith(mod + '.'); }); }); } return this.$__.activePaths.some('modify'); }; Document.prototype.$isModified = Document.prototype.isModified; Document.prototype[documentIsModified] = Document.prototype.isModified; /** * Checks if a path is set to its default. * * ####Example * * MyModel = mongoose.model('test', { name: { type: String, default: 'Val '} }); * const m = new MyModel(); * m.$isDefault('name'); // true * * @memberOf Document * @instance * @method $isDefault * @param {String} [path] * @return {Boolean} * @api public */ Document.prototype.$isDefault = function(path) { if (path == null) { return this.$__.activePaths.some('default'); } if (typeof path === 'string' && path.indexOf(' ') === -1) { return this.$__.activePaths.states.default.hasOwnProperty(path); } let paths = path; if (!Array.isArray(paths)) { paths = paths.split(' '); } return paths.some(path => this.$__.activePaths.states.default.hasOwnProperty(path)); }; /** * Getter/setter, determines whether the document was removed or not. * * ####Example: * const product = await product.remove(); * product.$isDeleted(); // true * product.remove(); // no-op, doesn't send anything to the db * * product.$isDeleted(false); * product.$isDeleted(); // false * product.remove(); // will execute a remove against the db * * * @param {Boolean} [val] optional, overrides whether mongoose thinks the doc is deleted * @return {Boolean} whether mongoose thinks this doc is deleted. * @method $isDeleted * @memberOf Document * @instance * @api public */ Document.prototype.$isDeleted = function(val) { if (arguments.length === 0) { return !!this.$__.isDeleted; } this.$__.isDeleted = !!val; return this; }; /** * Returns true if `path` was directly set and modified, else false. * * ####Example * * doc.set('documents.0.title', 'changed'); * doc.isDirectModified('documents.0.title') // true * doc.isDirectModified('documents') // false * * @param {String|Array<String>} path * @return {Boolean} * @api public */ Document.prototype.isDirectModified = function(path) { if (path == null) { return this.$__.activePaths.some('modify'); } if (typeof path === 'string' && path.indexOf(' ') === -1) { return this.$__.activePaths.states.modify.hasOwnProperty(path); } let paths = path; if (!Array.isArray(paths)) { paths = paths.split(' '); } return paths.some(path => this.$__.activePaths.states.modify.hasOwnProperty(path)); }; /** * Checks if `path` is in the `init` state, that is, it was set by `Document#init()` and not modified since. * * @param {String} path * @return {Boolean} * @api public */ Document.prototype.isInit = function(path) { if (path == null) { return this.$__.activePaths.some('init'); } if (typeof path === 'string' && path.indexOf(' ') === -1) { return this.$__.activePaths.states.init.hasOwnProperty(path); } let paths = path; if (!Array.isArray(paths)) { paths = paths.split(' '); } return paths.some(path => this.$__.activePaths.states.init.hasOwnProperty(path)); }; /** * Checks if `path` was selected in the source query which initialized this document. * * ####Example * * const doc = await Thing.findOne().select('name'); * doc.isSelected('name') // true * doc.isSelected('age') // false * * @param {String|Array<String>} path * @return {Boolean} * @api public */ Document.prototype.isSelected = function isSelected(path) { if (this.$__.selected == null) { return true; } if (path === '_id') { return this.$__.selected._id !== 0; } if (path.indexOf(' ') !== -1) { path = path.split(' '); } if (Array.isArray(path)) { return path.some(p => this.$__isSelected(p)); } const paths = Object.keys(this.$__.selected); let inclusive = null; if (paths.length === 1 && paths[0] === '_id') { // only _id was selected. return this.$__.selected._id === 0; } for (const cur of paths) { if (cur === '_id') { continue; } if (!isDefiningProjection(this.$__.selected[cur])) { continue; } inclusive = !!this.$__.selected[cur]; break; } if (inclusive === null) { return true; } if (path in this.$__.selected) { return inclusive; } const pathDot = path + '.'; for (const cur of paths) { if (cur === '_id') { continue; } if (cur.startsWith(pathDot)) { return inclusive || cur !== pathDot; } if (pathDot.startsWith(cur + '.')) { return inclusive; } } return !inclusive; }; Document.prototype.$__isSelected = Document.prototype.isSelected; /** * Checks if `path` was explicitly selected. If no projection, always returns * true. * * ####Example * * Thing.findOne().select('nested.name').exec(function (err, doc) { * doc.isDirectSelected('nested.name') // true * doc.isDirectSelected('nested.otherName') // false * doc.isDirectSelected('nested') // false * }) * * @param {String} path * @return {Boolean} * @api public */ Document.prototype.isDirectSelected = function isDirectSelected(path) { if (this.$__.selected == null) { return true; } if (path === '_id') { return this.$__.selected._id !== 0; } if (path.indexOf(' ') !== -1) { path = path.split(' '); } if (Array.isArray(path)) { return path.some(p => this.isDirectSelected(p)); } const paths = Object.keys(this.$__.selected); let inclusive = null; if (paths.length === 1 && paths[0] === '_id') { // only _id was selected. return this.$__.selected._id === 0; } for (const cur of paths) { if (cur === '_id') { continue; } if (!isDefiningProjection(this.$__.selected[cur])) { continue; } inclusive = !!this.$__.selected[cur]; break; } if (inclusive === null) { return true; } if (this.$__.selected.hasOwnProperty(path)) { return inclusive; } return !inclusive; }; /** * Executes registered validation rules for this document. * * ####Note: * * This method is called `pre` save and if a validation rule is violated, [save](#model_Model-save) is aborted and the error is returned to your `callback`. * * ####Example: * * doc.validate(function (err) { * if (err) handleError(err); * else // validation passed * }); * * @param {Array|String} [pathsToValidate] list of paths to validate. If set, Mongoose will validate only the modified paths that are in the given list. * @param {Object} [options] internal options * @param {Boolean} [options.validateModifiedOnly=false] if `true` mongoose validates only modified paths. * @param {Array|string} [options.pathsToSkip] list of paths to skip. If set, Mongoose will validate every modified path that is not in this list. * @param {Function} [callback] optional callback called after validation completes, passing an error if one occurred * @return {Promise} Promise * @api public */ Document.prototype.validate = function(pathsToValidate, options, callback) { let parallelValidate; this.$op = 'validate'; if (this.$isSubdocument != null) { // Skip parallel validate check for subdocuments } else if (this.$__.validating) { parallelValidate = new ParallelValidateError(this, { parentStack: options && options.parentStack, conflictStack: this.$__.validating.stack }); } else { this.$__.validating = new ParallelValidateError(this, { parentStack: options && options.parentStack }); } if (arguments.length === 1) { if (typeof arguments[0] === 'object' && !Array.isArray(arguments[0])) { options = arguments[0]; callback = null; pathsToValidate = null; } else if (typeof arguments[0] === 'function') { callback = arguments[0]; options = null; pathsToValidate = null; } } else if (typeof pathsToValidate === 'function') { callback = pathsToValidate; options = null; pathsToValidate = null; } else if (typeof options === 'function') { callback = options; options = pathsToValidate; pathsToValidate = null; } if (options && typeof options.pathsToSkip === 'string') { const isOnePathOnly = options.pathsToSkip.indexOf(' ') === -1; options.pathsToSkip = isOnePathOnly ? [options.pathsToSkip] : options.pathsToSkip.split(' '); } return promiseOrCallback(callback, cb => { if (parallelValidate != null) { return cb(parallelValidate); } this.$__validate(pathsToValidate, options, (error) => { this.$op = null; cb(error); }); }, this.constructor.events); }; Document.prototype.$validate = Document.prototype.validate; /*! * ignore */ function _evaluateRequiredFunctions(doc) { Object.keys(doc.$__.activePaths.states.require).forEach(path => { const p = doc.$__schema.path(path); if (p != null && typeof p.originalRequiredValue === 'function') { doc.$__.cachedRequired = doc.$__.cachedRequired || {}; try { doc.$__.cachedRequired[path] = p.originalRequiredValue.call(doc, doc); } catch (err) { doc.invalidate(path, err); } } }); } /*! * ignore */ function _getPathsToValidate(doc) { const skipSchemaValidators = {}; _evaluateRequiredFunctions(doc); // only validate required fields when necessary let paths = new Set(Object.keys(doc.$__.activePaths.states.require).filter(function(path) { if (!doc.$__isSelected(path) && !doc.$isModified(path)) { return false; } if (doc.$__.cachedRequired != null && path in doc.$__.cachedRequired) { return doc.$__.cachedRequired[path]; } return true; })); Object.keys(doc.$__.activePaths.states.init).forEach(addToPaths); Object.keys(doc.$__.activePaths.states.modify).forEach(addToPaths); Object.keys(doc.$__.activePaths.states.default).forEach(addToPaths); function addToPaths(p) { paths.add(p); } const subdocs = doc.$getAllSubdocs(); const modifiedPaths = doc.modifiedPaths(); for (const subdoc of subdocs) { if (subdoc.$basePath) { // Remove child paths for now, because we'll be validating the whole // subdoc for (const p of paths) { if (p === null || p.startsWith(subdoc.$basePath + '.')) { paths.delete(p); } } if (doc.$isModified(subdoc.$basePath, modifiedPaths) && !doc.isDirectModified(subdoc.$basePath) && !doc.$isDefault(subdoc.$basePath)) { paths.add(subdoc.$basePath); skipSchemaValidators[subdoc.$basePath] = true; } } } // from here on we're not removing items from paths // gh-661: if a whole array is modified, make sure to run validation on all // the children as well for (const path of paths) { const _pathType = doc.$__schema.path(path); if (!_pathType || !_pathType.$isMongooseArray || // To avoid potential performance issues, skip doc arrays whose children // are not required. `getPositionalPathType()` may be slow, so avoid // it unless we have a case of #6364 (_pathType.$isMongooseDocumentArray && !get(_pathType, 'schemaOptions.required'))) { continue; } const val = doc.$__getValue(path); _pushNestedArrayPaths(val, paths, path); } function _pushNestedArrayPaths(val, paths, path) { if (val != null) { const numElements = val.length; for (let j = 0; j < numElements; ++j) { if (Array.isArray(val[j])) { _pushNestedArrayPaths(val[j], paths, path + '.' + j); } else { paths.add(path + '.' + j); } } } } const flattenOptions = { skipArrays: true }; for (const pathToCheck of paths) { if (doc.$__schema.nested[pathToCheck]) { let _v = doc.$__getValue(pathToCheck); if (isMongooseObject(_v)) { _v = _v.toObject({ transform: false }); } const flat = flatten(_v, pathToCheck, flattenOptions, doc.$__schema); Object.keys(flat).forEach(addToPaths); } } for (const path of paths) { // Single nested paths (paths embedded under single nested subdocs) will // be validated on their own when we call `validate()` on the subdoc itself. // Re: gh-8468 if (doc.$__schema.singleNestedPaths.hasOwnProperty(path)) { paths.delete(path); continue; } const _pathType = doc.$__schema.path(path); if (!_pathType || !_pathType.$isSchemaMap) { continue; } const val = doc.$__getValue(path); if (val == null) { continue; } for (const key of val.keys()) { paths.add(path + '.' + key); } } paths = Array.from(paths); return [paths, skipSchemaValidators]; } /*! * ignore */ Document.prototype.$__validate = function(pathsToValidate, options, callback) { if (typeof pathsToValidate === 'function') { callback = pathsToValidate; options = null; pathsToValidate = null; } else if (typeof options === 'function') { callback = options; options = null; } const hasValidateModifiedOnlyOption = options && (typeof options === 'object') && ('validateModifiedOnly' in options); const pathsToSkip = get(options, 'pathsToSkip', null); let shouldValidateModifiedOnly; if (hasValidateModifiedOnlyOption) { shouldValidateModifiedOnly = !!options.validateModifiedOnly; } else { shouldValidateModifiedOnly = this.$__schema.options.validateModifiedOnly; } const _this = this; const _complete = () => { let validationError = this.$__.validationError; this.$__.validationError = undefined; if (shouldValidateModifiedOnly && validationError != null) { // Remove any validation errors that aren't from modified paths const errors = Object.keys(validationError.errors); for (const errPath of errors) { if (!this.$isModified(errPath)) { delete validationError.errors[errPath]; } } if (Object.keys(validationError.errors).length === 0) { validationError = void 0; } } this.$__.cachedRequired = {}; this.$emit('validate', _this); this.constructor.emit('validate', _this); if (validationError) { for (const key in validationError.errors) { // Make sure cast errors persist if (!this[documentArrayParent] && validationError.errors[key] instanceof MongooseError.CastError) { this.invalidate(key, validationError.errors[key]); } } return validationError; } }; // only validate required fields when necessary const pathDetails = _getPathsToValidate(this); let paths = shouldValidateModifiedOnly ? pathDetails[0].filter((path) => this.$isModified(path)) : pathDetails[0]; const skipSchemaValidators = pathDetails[1]; if (typeof pathsToValidate === 'string') { pathsToValidate = pathsToValidate.split(' '); } if (Array.isArray(pathsToValidate)) { paths = _handlePathsToValidate(paths, pathsToValidate); } else if (pathsToSkip) { paths = _handlePathsToSkip(paths, pathsToSkip); } if (paths.length === 0) { return immediate(function() { const error = _complete(); if (error) { return _this.$__schema.s.hooks.execPost('validate:error', _this, [_this], { error: error }, function(error) { callback(error); }); } callback(null, _this); }); } const validated = {}; let total = 0; for (const path of paths) { validatePath(path); } function validatePath(path) { if (path == null || validated[path]) { return; } validated[path] = true; total++; immediate(function() { const schemaType = _this.$__schema.path(path); if (!schemaType) { return --total || complete(); } // If user marked as invalid or there was a cast error, don't validate if (!_this.$isValid(path)) { --total || complete(); return; } // If setting a path under a mixed path, avoid using the mixed path validator (gh-10141) if (schemaType[schemaMixedSymbol] != null && path !== schemaType.path) { return --total || complete(); } let val = _this.$__getValue(path); // If you `populate()` and get back a null value, required validators // shouldn't fail (gh-8018). We should always fall back to the populated // value. let pop; if ((pop = _this.$populated(path))) { val = pop; } else if (val != null && val.$__ != null && val.$__.wasPopulated) { // Array paths, like `somearray.1`, do not show up as populated with `$populated()`, // so in that case pull out the document's id val = val._id; } const scope = _this.$__.pathsToScopes != null && path in _this.$__.pathsToScopes ? _this.$__.pathsToScopes[path] : _this; const doValidateOptions = { skipSchemaValidators: skipSchemaValidators[path], path: path, validateModifiedOnly: shouldValidateModifiedOnly }; schemaType.doValidate(val, function(err) { if (err) { const isSubdoc = schemaType.$isSingleNested || schemaType.$isArraySubdocument || schemaType.$isMongooseDocumentArray; if (isSubdoc && err instanceof ValidationError) { return --total || complete(); } _this.invalidate(path, err, undefined, true); } --total || complete(); }, scope, doValidateOptions); }); } function complete() { const error = _complete(); if (error) { return _this.$__schema.s.hooks.execPost('validate:error', _this, [_this], { error: error }, function(error) { callback(error); }); } callback(null, _this); } }; /*! * ignore */ function _handlePathsToValidate(paths, pathsToValidate) { const _pathsToValidate = new Set(pathsToValidate); const parentPaths = new Map([]); for (const path of pathsToValidate) { if (path.indexOf('.') === -1) { continue; } const pieces = path.split('.'); let cur = pieces[0]; for (let i = 1; i < pieces.length; ++i) { // Since we skip subpaths under single nested subdocs to // avoid double validation, we need to add back the // single nested subpath if the user asked for it (gh-8626) parentPaths.set(cur, path); cur = cur + '.' + pieces[i]; } } const ret = []; for (const path of paths) { if (_pathsToValidate.has(path)) { ret.push(path); } else if (parentPaths.has(path)) { ret.push(parentPaths.get(path)); } } return ret; } /*! * ignore */ function _handlePathsToSkip(paths, pathsToSkip) { pathsToSkip = new Set(pathsToSkip); paths = paths.filter(p => !pathsToSkip.has(p)); return paths; } /** * Executes registered validation rules (skipping asynchronous validators) for this document. * * ####Note: * * This method is useful if you need synchronous validation. * * ####Example: * * const err = doc.validateSync(); * if (err) { * handleError(err); * } else { * // validation passed * } * * @param {Array|string} pathsToValidate only validate the given paths * @param {Object} [options] options for validation * @param {Boolean} [options.validateModifiedOnly=false] If `true`, Mongoose will only validate modified paths, as opposed to modified paths and `required` paths. * @param {Array|string} [options.pathsToSkip] list of paths to skip. If set, Mongoose will validate every modified path that is not in this list. * @return {ValidationError|undefined} ValidationError if there are errors during validation, or undefined if there is no error. * @api public */ Document.prototype.validateSync = function(pathsToValidate, options) { const _this = this; if (arguments.length === 1 && typeof arguments[0] === 'object' && !Array.isArray(arguments[0])) { options = arguments[0]; pathsToValidate = null; } const hasValidateModifiedOnlyOption = options && (typeof options === 'object') && ('validateModifiedOnly' in options); let shouldValidateModifiedOnly; if (hasValidateModifiedOnlyOption) { shouldValidateModifiedOnly = !!options.validateModifiedOnly; } else { shouldValidateModifiedOnly = this.$__schema.options.validateModifiedOnly; } let pathsToSkip = options && options.pathsToSkip; if (typeof pathsToValidate === 'string') { const isOnePathOnly = pathsToValidate.indexOf(' ') === -1; pathsToValidate = isOnePathOnly ? [pathsToValidate] : pathsToValidate.split(' '); } else if (typeof pathsToSkip === 'string' && pathsToSkip.indexOf(' ') !== -1) { pathsToSkip = pathsToSkip.split(' '); } // only validate required fields when necessary const pathDetails = _getPathsToValidate(this); let paths = shouldValidateModifiedOnly ? pathDetails[0].filter((path) => this.$isModified(path)) : pathDetails[0]; const skipSchemaValidators = pathDetails[1]; if (Array.isArray(pathsToValidate)) { paths = _handlePathsToValidate(paths, pathsToValidate); } else if (Array.isArray(pathsToSkip)) { paths = _handlePathsToSkip(paths, pathsToSkip); } const validating = {}; paths.forEach(function(path) { if (validating[path]) { return; } validating[path] = true; const p = _this.$__schema.path(path); if (!p) { return; } if (!_this.$isValid(path)) { return; } const val = _this.$__getValue(path); const err = p.doValidateSync(val, _this, { skipSchemaValidators: skipSchemaValidators[path], path: path, validateModifiedOnly: shouldValidateModifiedOnly }); if (err) { const isSubdoc = p.$isSingleNested || p.$isArraySubdocument || p.$isMongooseDocumentArray; if (isSubdoc && err instanceof ValidationError) { return; } _this.invalidate(path, err, undefined, true); } }); const err = _this.$__.validationError; _this.$__.validationError = undefined; _this.$emit('validate', _this); _this.constructor.emit('validate', _this); if (err) { for (const key in err.errors) { // Make sure cast errors persist if (err.errors[key] instanceof MongooseError.CastError) { _this.invalidate(key, err.errors[key]); } } } return err; }; /** * Marks a path as invalid, causing validation to fail. * * The `errorMsg` argument will become the message of the `ValidationError`. * * The `value` argument (if passed) will be available through the `ValidationError.value` property. * * doc.invalidate('size', 'must be less than 20', 14); * doc.validate(function (err) { * console.log(err) * // prints * { message: 'Validation failed', * name: 'ValidationError', * errors: * { size: * { message: 'must be less than 20', * name: 'ValidatorError', * path: 'size', * type: 'user defined', * value: 14 } } } * }) * * @param {String} path the field to invalidate. For array elements, use the `array.i.field` syntax, where `i` is the 0-based index in the array. * @param {String|Error} errorMsg the error which states the reason `path` was invalid * @param {Object|String|Number|any} value optional invalid value * @param {String} [kind] optional `kind` property for the error * @return {ValidationError} the current ValidationError, with all currently invalidated paths * @api public */ Document.prototype.invalidate = function(path, err, val, kind) { if (!this.$__.validationError) { this.$__.validationError = new ValidationError(this); } if (this.$__.validationError.errors[path]) { return; } if (!err || typeof err === 'string') { err = new ValidatorError({ path: path, message: err, type: kind || 'user defined', value: val }); } if (this.$__.validationError === err) { return this.$__.validationError; } this.$__.validationError.addError(path, err); return this.$__.validationError; }; /** * Marks a path as valid, removing existing validation errors. * * @param {String} path the field to mark as valid * @api public * @memberOf Document * @instance * @method $markValid */ Document.prototype.$markValid = function(path) { if (!this.$__.validationError || !this.$__.validationError.errors[path]) { return; } delete this.$__.validationError.errors[path]; if (Object.keys(this.$__.validationError.errors).length === 0) { this.$__.validationError = null; } }; /*! * ignore */ function _markValidSubpaths(doc, path) { if (!doc.$__.validationError) { return; } const keys = Object.keys(doc.$__.validationError.errors); for (const key of keys) { if (key.startsWith(path + '.')) { delete doc.$__.validationError.errors[key]; } } if (Object.keys(doc.$__.validationError.errors).length === 0) { doc.$__.validationError = null; } } /*! * ignore */ function _checkImmutableSubpaths(subdoc, schematype, priorVal) { const schema = schematype.schema; if (schema == null) { return; } for (const key of Object.keys(schema.paths)) { const path = schema.paths[key]; if (path.$immutableSetter == null) { continue; } const oldVal = priorVal == null ? void 0 : priorVal.$__getValue(key); // Calling immutableSetter with `oldVal` even though it expects `newVal` // is intentional. That's because `$immutableSetter` compares its param // to the current value. path.$immutableSetter.call(subdoc, oldVal); } } /** * Saves this document by inserting a new document into the database if [document.isNew](/docs/api.html#document_Document-isNew) is `true`, * or sends an [updateOne](/docs/api.html#document_Document-updateOne) operation **only** with the modifications to the database, it does not replace the whole document in the latter case. * * ####Example: * * product.sold = Date.now(); * product = await product.save(); * * If save is successful, the returned promise will fulfill with the document * saved. * * ####Example: * * const newProduct = await product.save(); * newProduct === product; // true * * @param {Object} [options] options optional options * @param {Session} [options.session=null] the [session](https://docs.mongodb.com/manual/reference/server-sessions/) associated with this save operation. If not specified, defaults to the [document's associated session](api.html#document_Document-$session). * @param {Object} [options.safe] (DEPRECATED) overrides [schema's safe option](http://mongoosejs.com//docs/guide.html#safe). Use the `w` option instead. * @param {Boolean} [options.validateBeforeSave] set to false to save without validating. * @param {Boolean} [options.validateModifiedOnly=false] If `true`, Mongoose will only validate modified paths, as opposed to modified paths and `required` paths. * @param {Number|String} [options.w] set the [write concern](https://docs.mongodb.com/manual/reference/write-concern/#w-option). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern) * @param {Boolean} [options.j] set to true for MongoDB to wait until this `save()` has been [journaled before resolving the returned promise](https://docs.mongodb.com/manual/reference/write-concern/#j-option). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern) * @param {Number} [options.wtimeout] sets a [timeout for the write concern](https://docs.mongodb.com/manual/reference/write-concern/#wtimeout). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern). * @param {Boolean} [options.checkKeys=true] the MongoDB driver prevents you from saving keys that start with '$' or contain '.' by default. Set this option to `false` to skip that check. See [restrictions on field names](https://docs.mongodb.com/manual/reference/limits/#Restrictions-on-Field-Names) * @param {Boolean} [options.timestamps=true] if `false` and [timestamps](./guide.html#timestamps) are enabled, skip timestamps for this `save()`. * @param {Function} [fn] optional callback * @method save * @memberOf Document * @instance * @throws {DocumentNotFoundError} if this [save updates an existing document](api.html#document_Document-isNew) but the document doesn't exist in the database. For example, you will get this error if the document is [deleted between when you retrieved the document and when you saved it](documents.html#updating). * @return {Promise|undefined} Returns undefined if used with callback or a Promise otherwise. * @api public * @see middleware http://mongoosejs.com/docs/middleware.html */ /** * Checks if a path is invalid * * @param {String|Array<String>} path the field to check * @method $isValid * @memberOf Document * @instance * @api private */ Document.prototype.$isValid = function(path) { if (this.$__.validationError == null || Object.keys(this.$__.validationError.errors).length === 0) { return true; } if (path == null) { return false; } if (path.indexOf(' ') !== -1) { path = path.split(' '); } if (Array.isArray(path)) { return path.some(p => this.$__.validationError.errors[p] == null); } return this.$__.validationError.errors[path] == null; }; /** * Resets the internal modified state of this document. * * @api private * @return {Document} * @method $__reset * @memberOf Document * @instance */ Document.prototype.$__reset = function reset() { let _this = this; DocumentArray || (DocumentArray = require('./types/DocumentArray')); this.$__.activePaths .map('init', 'modify', function(i) { return _this.$__getValue(i); }) .filter(function(val) { return val && val instanceof Array && val.isMongooseDocumentArray && val.length; }) .forEach(function(array) { let i = array.length; while (i--) { const doc = array[i]; if (!doc) { continue; } doc.$__reset(); } _this.$__.activePaths.init(array.$path()); array[arrayAtomicsBackupSymbol] = array[arrayAtomicsSymbol]; array[arrayAtomicsSymbol] = {}; }); this.$__.activePaths. map('init', 'modify', function(i) { return _this.$__getValue(i); }). filter(function(val) { return val && val.$isSingleNested; }). forEach(function(doc) { doc.$__reset(); if (doc.$parent() === _this) { _this.$__.activePaths.init(doc.$basePath); } else if (doc.$parent() != null && doc.$parent().$isSubdocument) { // If map path underneath subdocument, may end up with a case where // map path is modified but parent still needs to be reset. See gh-10295 doc.$parent().$__reset(); } }); // clear atomics this.$__dirty().forEach(function(dirt) { const type = dirt.value; if (type && type[arrayAtomicsSymbol]) { type[arrayAtomicsBackupSymbol] = type[arrayAtomicsSymbol]; type[arrayAtomicsSymbol] = {}; } }); this.$__.backup = {}; this.$__.backup.activePaths = { modify: Object.assign({}, this.$__.activePaths.states.modify), default: Object.assign({}, this.$__.activePaths.states.default) }; this.$__.backup.validationError = this.$__.validationError; this.$__.backup.errors = this.$errors; // Clear 'dirty' cache this.$__.activePaths.clear('modify'); this.$__.activePaths.clear('default'); this.$__.validationError = undefined; this.$errors = undefined; _this = this; this.$__schema.requiredPaths().forEach(function(path) { _this.$__.activePaths.require(path); }); return this; }; /*! * ignore */ Document.prototype.$__undoReset = function $__undoReset() { if (this.$__.backup == null || this.$__.backup.activePaths == null) { return; } this.$__.activePaths.states.modify = this.$__.backup.activePaths.modify; this.$__.activePaths.states.default = this.$__.backup.activePaths.default; this.$__.validationError = this.$__.backup.validationError; this.$errors = this.$__.backup.errors; for (const dirt of this.$__dirty()) { const type = dirt.value; if (type && type[arrayAtomicsSymbol] && type[arrayAtomicsBackupSymbol]) { type[arrayAtomicsSymbol] = type[arrayAtomicsBackupSymbol]; } } for (const subdoc of this.$getAllSubdocs()) { subdoc.$__undoReset(); } }; /** * Returns this documents dirty paths / vals. * * @api private * @method $__dirty * @memberOf Document * @instance */ Document.prototype.$__dirty = function() { const _this = this; let all = this.$__.activePaths.map('modify', function(path) { return { path: path, value: _this.$__getValue(path), schema: _this.$__path(path) }; }); // gh-2558: if we had to set a default and the value is not undefined, // we have to save as well all = all.concat(this.$__.activePaths.map('default', function(path) { if (path === '_id' || _this.$__getValue(path) == null) { return; } return { path: path, value: _this.$__getValue(path), schema: _this.$__path(path) }; })); const allPaths = new Map(all.filter((el) => el != null).map((el) => [el.path, el.value])); // Ignore "foo.a" if "foo" is dirty already. const minimal = []; all.forEach(function(item) { if (!item) { return; } let top = null; const array = parentPaths(item.path); for (let i = 0; i < array.length - 1; i++) { if (allPaths.has(array[i])) { top = allPaths.get(array[i]); break; } } if (top == null) { minimal.push(item); } else if (top != null && top[arrayAtomicsSymbol] != null && top.hasAtomics()) { // special case for top level MongooseArrays // the `top` array itself and a sub path of `top` are being set. // the only way to honor all of both modifications is through a $set // of entire array. top[arrayAtomicsSymbol] = {}; top[arrayAtomicsSymbol].$set = top; } }); return minimal; }; /** * Assigns/compiles `schema` into this documents prototype. * * @param {Schema} schema * @api private * @method $__setSchema * @memberOf Document * @instance */ Document.prototype.$__setSchema = function(schema) { compile(schema.tree, this, undefined, schema.options); // Apply default getters if virtual doesn't have any (gh-6262) for (const key of Object.keys(schema.virtuals)) { schema.virtuals[key]._applyDefaultGetters(); } if (schema.path('schema') == null) { this.schema = schema; } this.$__schema = schema; this[documentSchemaSymbol] = schema; }; /** * Get active path that were changed and are arrays * * @api private * @method $__getArrayPathsToValidate * @memberOf Document * @instance */ Document.prototype.$__getArrayPathsToValidate = function() { DocumentArray || (DocumentArray = require('./types/DocumentArray')); // validate all document arrays. return this.$__.activePaths .map('init', 'modify', function(i) { return this.$__getValue(i); }.bind(this)) .filter(function(val) { return val && val instanceof Array && val.isMongooseDocumentArray && val.length; }).reduce(function(seed, array) { return seed.concat(array); }, []) .filter(function(doc) { return doc; }); }; /** * Get all subdocs (by bfs) * * @api public * @method $getAllSubdocs * @memberOf Document * @instance */ Document.prototype.$getAllSubdocs = function() { DocumentArray || (DocumentArray = require('./types/DocumentArray')); Embedded = Embedded || require('./types/ArraySubdocument'); function docReducer(doc, seed, path) { let val = doc; let isNested = false; if (path) { if (doc instanceof Document && doc[documentSchemaSymbol].paths[path]) { val = doc._doc[path]; } else if (doc instanceof Document && doc[documentSchemaSymbol].nested[path]) { val = doc._doc[path]; isNested = true; } else { val = doc[path]; } } if (val instanceof Embedded) { seed.push(val); } else if (val instanceof Map) { seed = Array.from(val.keys()).reduce(function(seed, path) { return docReducer(val.get(path), seed, null); }, seed); } else if (val && val.$isSingleNested) { seed = Object.keys(val._doc).reduce(function(seed, path) { return docReducer(val._doc, seed, path); }, seed); seed.push(val); } else if (val && val.isMongooseDocumentArray) { val.forEach(function _docReduce(doc) { if (!doc || !doc._doc) { return; } seed = Object.keys(doc._doc).reduce(function(seed, path) { return docReducer(doc._doc, seed, path); }, seed); if (doc instanceof Embedded) { seed.push(doc); } }); } else if (isNested && val != null) { for (const path of Object.keys(val)) { docReducer(val, seed, path); } } return seed; } const subDocs = []; for (const path of Object.keys(this._doc)) { docReducer(this, subDocs, path); } return subDocs; }; /*! * Runs queued functions */ function applyQueue(doc) { const q = doc.$__schema && doc.$__schema.callQueue; if (!q.length) { return; } for (const pair of q) { if (pair[0] !== 'pre' && pair[0] !== 'post' && pair[0] !== 'on') { doc[pair[0]].apply(doc, pair[1]); } } } /*! * ignore */ Document.prototype.$__handleReject = function handleReject(err) { // emit on the Model if listening if (this.$listeners('error').length) { this.$emit('error', err); } else if (this.constructor.listeners && this.constructor.listeners('error').length) { this.constructor.emit('error', err); } }; /** * Internal helper for toObject() and toJSON() that doesn't manipulate options * * @api private * @method $toObject * @memberOf Document * @instance */ Document.prototype.$toObject = function(options, json) { let defaultOptions = { transform: true, flattenDecimals: true }; const path = json ? 'toJSON' : 'toObject'; const baseOptions = get(this, 'constructor.base.options.' + path, {}); const schemaOptions = get(this, '$__schema.options', {}); // merge base default options with Schema's set default options if available. // `clone` is necessary here because `utils.options` directly modifies the second input. defaultOptions = utils.options(defaultOptions, clone(baseOptions)); defaultOptions = utils.options(defaultOptions, clone(schemaOptions[path] || {})); // If options do not exist or is not an object, set it to empty object options = utils.isPOJO(options) ? clone(options) : {}; options._calledWithOptions = options._calledWithOptions || clone(options); let _minimize; if (options._calledWithOptions.minimize != null) { _minimize = options.minimize; } else if (defaultOptions.minimize != null) { _minimize = defaultOptions.minimize; } else { _minimize = schemaOptions.minimize; } let flattenMaps; if (options._calledWithOptions.flattenMaps != null) { flattenMaps = options.flattenMaps; } else if (defaultOptions.flattenMaps != null) { flattenMaps = defaultOptions.flattenMaps; } else { flattenMaps = schemaOptions.flattenMaps; } // The original options that will be passed to `clone()`. Important because // `clone()` will recursively call `$toObject()` on embedded docs, so we // need the original options the user passed in, plus `_isNested` and // `_parentOptions` for checking whether we need to depopulate. const cloneOptions = Object.assign(utils.clone(options), { _isNested: true, json: json, minimize: _minimize, flattenMaps: flattenMaps }); if (utils.hasUserDefinedProperty(options, 'getters')) { cloneOptions.getters = options.getters; } if (utils.hasUserDefinedProperty(options, 'virtuals')) { cloneOptions.virtuals = options.virtuals; } const depopulate = options.depopulate || get(options, '_parentOptions.depopulate', false); // _isNested will only be true if this is not the top level document, we // should never depopulate if (depopulate && options._isNested && this.$__.wasPopulated) { // populated paths that we set to a document return clone(this._id, cloneOptions); } // merge default options with input options. options = utils.options(defaultOptions, options); options._isNested = true; options.json = json; options.minimize = _minimize; cloneOptions._parentOptions = options; cloneOptions._skipSingleNestedGetters = true; const gettersOptions = Object.assign({}, cloneOptions); gettersOptions._skipSingleNestedGetters = false; // remember the root transform function // to save it from being overwritten by sub-transform functions const originalTransform = options.transform; let ret = clone(this._doc, cloneOptions) || {}; if (options.getters) { applyGetters(this, ret, gettersOptions); if (options.minimize) { ret = minimize(ret) || {}; } } if (options.virtuals || (options.getters && options.virtuals !== false)) { applyVirtuals(this, ret, gettersOptions, options); } if (options.versionKey === false && this.$__schema.options.versionKey) { delete ret[this.$__schema.options.versionKey]; } let transform = options.transform; // In the case where a subdocument has its own transform function, we need to // check and see if the parent has a transform (options.transform) and if the // child schema has a transform (this.schema.options.toObject) In this case, // we need to adjust options.transform to be the child schema's transform and // not the parent schema's if (transform) { applySchemaTypeTransforms(this, ret); } if (options.useProjection) { omitDeselectedFields(this, ret); } if (transform === true || (schemaOptions.toObject && transform)) { const opts = options.json ? schemaOptions.toJSON : schemaOptions.toObject; if (opts) { transform = (typeof options.transform === 'function' ? options.transform : opts.transform); } } else { options.transform = originalTransform; } if (typeof transform === 'function') { const xformed = transform(this, ret, options); if (typeof xformed !== 'undefined') { ret = xformed; } } return ret; }; /** * Converts this document into a plain-old JavaScript object ([POJO](https://masteringjs.io/tutorials/fundamentals/pojo)). * * Buffers are converted to instances of [mongodb.Binary](http://mongodb.github.com/node-mongodb-native/api-bson-generated/binary.html) for proper storage. * * ####Options: * * - `getters` apply all getters (path and virtual getters), defaults to false * - `aliases` apply all aliases if `virtuals=true`, defaults to true * - `virtuals` apply virtual getters (can override `getters` option), defaults to false * - `minimize` remove empty objects, defaults to true * - `transform` a transform function to apply to the resulting document before returning * - `depopulate` depopulate any populated paths, replacing them with their original refs, defaults to false * - `versionKey` whether to include the version key, defaults to true * - `flattenMaps` convert Maps to POJOs. Useful if you want to JSON.stringify() the result of toObject(), defaults to false * - `useProjection` set to `true` to omit fields that are excluded in this document's projection. Unless you specified a projection, this will omit any field that has `select: false` in the schema. * * ####Getters/Virtuals * * Example of only applying path getters * * doc.toObject({ getters: true, virtuals: false }) * * Example of only applying virtual getters * * doc.toObject({ virtuals: true }) * * Example of applying both path and virtual getters * * doc.toObject({ getters: true }) * * To apply these options to every document of your schema by default, set your [schemas](#schema_Schema) `toObject` option to the same argument. * * schema.set('toObject', { virtuals: true }) * * ####Transform * * We may need to perform a transformation of the resulting object based on some criteria, say to remove some sensitive information or return a custom object. In this case we set the optional `transform` function. * * Transform functions receive three arguments * * function (doc, ret, options) {} * * - `doc` The mongoose document which is being converted * - `ret` The plain object representation which has been converted * - `options` The options in use (either schema options or the options passed inline) * * ####Example * * // specify the transform schema option * if (!schema.options.toObject) schema.options.toObject = {}; * schema.options.toObject.transform = function (doc, ret, options) { * // remove the _id of every document before returning the result * delete ret._id; * return ret; * } * * // without the transformation in the schema * doc.toObject(); // { _id: 'anId', name: 'Wreck-it Ralph' } * * // with the transformation * doc.toObject(); // { name: 'Wreck-it Ralph' } * * With transformations we can do a lot more than remove properties. We can even return completely new customized objects: * * if (!schema.options.toObject) schema.options.toObject = {}; * schema.options.toObject.transform = function (doc, ret, options) { * return { movie: ret.name } * } * * // without the transformation in the schema * doc.toObject(); // { _id: 'anId', name: 'Wreck-it Ralph' } * * // with the transformation * doc.toObject(); // { movie: 'Wreck-it Ralph' } * * _Note: if a transform function returns `undefined`, the return value will be ignored._ * * Transformations may also be applied inline, overridding any transform set in the options: * * function xform (doc, ret, options) { * return { inline: ret.name, custom: true } * } * * // pass the transform as an inline option * doc.toObject({ transform: xform }); // { inline: 'Wreck-it Ralph', custom: true } * * If you want to skip transformations, use `transform: false`: * * schema.options.toObject.hide = '_id'; * schema.options.toObject.transform = function (doc, ret, options) { * if (options.hide) { * options.hide.split(' ').forEach(function (prop) { * delete ret[prop]; * }); * } * return ret; * } * * const doc = new Doc({ _id: 'anId', secret: 47, name: 'Wreck-it Ralph' }); * doc.toObject(); // { secret: 47, name: 'Wreck-it Ralph' } * doc.toObject({ hide: 'secret _id', transform: false });// { _id: 'anId', secret: 47, name: 'Wreck-it Ralph' } * doc.toObject({ hide: 'secret _id', transform: true }); // { name: 'Wreck-it Ralph' } * * If you pass a transform in `toObject()` options, Mongoose will apply the transform * to [subdocuments](/docs/subdocs.html) in addition to the top-level document. * Similarly, `transform: false` skips transforms for all subdocuments. * Note that this behavior is different for transforms defined in the schema: * if you define a transform in `schema.options.toObject.transform`, that transform * will **not** apply to subdocuments. * * const memberSchema = new Schema({ name: String, email: String }); * const groupSchema = new Schema({ members: [memberSchema], name: String, email }); * const Group = mongoose.model('Group', groupSchema); * * const doc = new Group({ * name: 'Engineering', * email: '[email protected]', * members: [{ name: 'Val', email: '[email protected]' }] * }); * * // Removes `email` from both top-level document **and** array elements * // { name: 'Engineering', members: [{ name: 'Val' }] } * doc.toObject({ transform: (doc, ret) => { delete ret.email; return ret; } }); * * Transforms, like all of these options, are also available for `toJSON`. See [this guide to `JSON.stringify()`](https://thecodebarbarian.com/the-80-20-guide-to-json-stringify-in-javascript.html) to learn why `toJSON()` and `toObject()` are separate functions. * * See [schema options](/docs/guide.html#toObject) for some more details. * * _During save, no custom options are applied to the document before being sent to the database._ * * @param {Object} [options] * @param {Boolean} [options.getters=false] if true, apply all getters, including virtuals * @param {Boolean} [options.virtuals=false] if true, apply virtuals, including aliases. Use `{ getters: true, virtuals: false }` to just apply getters, not virtuals * @param {Boolean} [options.aliases=true] if `options.virtuals = true`, you can set `options.aliases = false` to skip applying aliases. This option is a no-op if `options.virtuals = false`. * @param {Boolean} [options.minimize=true] if true, omit any empty objects from the output * @param {Function|null} [options.transform=null] if set, mongoose will call this function to allow you to transform the returned object * @param {Boolean} [options.depopulate=false] if true, replace any conventionally populated paths with the original id in the output. Has no affect on virtual populated paths. * @param {Boolean} [options.versionKey=true] if false, exclude the version key (`__v` by default) from the output * @param {Boolean} [options.flattenMaps=false] if true, convert Maps to POJOs. Useful if you want to `JSON.stringify()` the result of `toObject()`. * @param {Boolean} [options.useProjection=false] - If true, omits fields that are excluded in this document's projection. Unless you specified a projection, this will omit any field that has `select: false` in the schema. * @return {Object} js object * @see mongodb.Binary http://mongodb.github.com/node-mongodb-native/api-bson-generated/binary.html * @api public * @memberOf Document * @instance */ Document.prototype.toObject = function(options) { return this.$toObject(options); }; /*! * Minimizes an object, removing undefined values and empty objects * * @param {Object} object to minimize * @return {Object} */ function minimize(obj) { const keys = Object.keys(obj); let i = keys.length; let hasKeys; let key; let val; while (i--) { key = keys[i]; val = obj[key]; if (utils.isPOJO(val)) { obj[key] = minimize(val); } if (undefined === obj[key]) { delete obj[key]; continue; } hasKeys = true; } return hasKeys ? obj : undefined; } /*! * Applies virtuals properties to `json`. */ function applyVirtuals(self, json, options, toObjectOptions) { const schema = self.$__schema; const paths = Object.keys(schema.virtuals); let i = paths.length; const numPaths = i; let path; let assignPath; let cur = self._doc; let v; const aliases = get(toObjectOptions, 'aliases', true); let virtualsToApply = null; if (Array.isArray(options.virtuals)) { virtualsToApply = new Set(options.virtuals); } else if (options.virtuals && options.virtuals.pathsToSkip) { virtualsToApply = new Set(paths); for (let i = 0; i < options.virtuals.pathsToSkip.length; i++) { if (virtualsToApply.has(options.virtuals.pathsToSkip[i])) { virtualsToApply.delete(options.virtuals.pathsToSkip[i]); } } } if (!cur) { return json; } options = options || {}; for (i = 0; i < numPaths; ++i) { path = paths[i]; if (virtualsToApply != null && !virtualsToApply.has(path)) { continue; } // Allow skipping aliases with `toObject({ virtuals: true, aliases: false })` if (!aliases && schema.aliases.hasOwnProperty(path)) { continue; } // We may be applying virtuals to a nested object, for example if calling // `doc.nestedProp.toJSON()`. If so, the path we assign to, `assignPath`, // will be a trailing substring of the `path`. assignPath = path; if (options.path != null) { if (!path.startsWith(options.path + '.')) { continue; } assignPath = path.substr(options.path.length + 1); } const parts = assignPath.split('.'); v = clone(self.get(path), options); if (v === void 0) { continue; } const plen = parts.length; cur = json; for (let j = 0; j < plen - 1; ++j) { cur[parts[j]] = cur[parts[j]] || {}; cur = cur[parts[j]]; } cur[parts[plen - 1]] = v; } return json; } /*! * Applies virtuals properties to `json`. * * @param {Document} self * @param {Object} json * @return {Object} `json` */ function applyGetters(self, json, options) { const schema = self.$__schema; const paths = Object.keys(schema.paths); let i = paths.length; let path; let cur = self._doc; let v; if (!cur) { return json; } while (i--) { path = paths[i]; const parts = path.split('.'); const plen = parts.length; const last = plen - 1; let branch = json; let part; cur = self._doc; if (!self.$__isSelected(path)) { continue; } for (let ii = 0; ii < plen; ++ii) { part = parts[ii]; v = cur[part]; if (ii === last) { const val = self.$get(path); branch[part] = clone(val, options); } else if (v == null) { if (part in cur) { branch[part] = v; } break; } else { branch = branch[part] || (branch[part] = {}); } cur = v; } } return json; } /*! * Applies schema type transforms to `json`. * * @param {Document} self * @param {Object} json * @return {Object} `json` */ function applySchemaTypeTransforms(self, json) { const schema = self.$__schema; const paths = Object.keys(schema.paths || {}); const cur = self._doc; if (!cur) { return json; } for (const path of paths) { const schematype = schema.paths[path]; if (typeof schematype.options.transform === 'function') { const val = self.$get(path); const transformedValue = schematype.options.transform.call(self, val); throwErrorIfPromise(path, transformedValue); utils.setValue(path, transformedValue, json); } else if (schematype.$embeddedSchemaType != null && typeof schematype.$embeddedSchemaType.options.transform === 'function') { const vals = [].concat(self.$get(path)); const transform = schematype.$embeddedSchemaType.options.transform; for (let i = 0; i < vals.length; ++i) { const transformedValue = transform.call(self, vals[i]); vals[i] = transformedValue; throwErrorIfPromise(path, transformedValue); } json[path] = vals; } } return json; } function throwErrorIfPromise(path, transformedValue) { if (isPromise(transformedValue)) { throw new Error('`transform` function must be synchronous, but the transform on path `' + path + '` returned a promise.'); } } /*! * ignore */ function omitDeselectedFields(self, json) { const schema = self.$__schema; const paths = Object.keys(schema.paths || {}); const cur = self._doc; if (!cur) { return json; } let selected = self.$__.selected; if (selected === void 0) { selected = {}; queryhelpers.applyPaths(selected, schema); } if (selected == null || Object.keys(selected).length === 0) { return json; } for (const path of paths) { if (selected[path] != null && !selected[path]) { delete json[path]; } } return json; } /** * The return value of this method is used in calls to JSON.stringify(doc). * * This method accepts the same options as [Document#toObject](#document_Document-toObject). To apply the options to every document of your schema by default, set your [schemas](#schema_Schema) `toJSON` option to the same argument. * * schema.set('toJSON', { virtuals: true }) * * See [schema options](/docs/guide.html#toJSON) for details. * * @param {Object} options * @return {Object} * @see Document#toObject #document_Document-toObject * @see JSON.stringify() in JavaScript https://thecodebarbarian.com/the-80-20-guide-to-json-stringify-in-javascript.html * @api public * @memberOf Document * @instance */ Document.prototype.toJSON = function(options) { return this.$toObject(options, true); }; Document.prototype.ownerDocument = function() { return this; }; /** * If this document is a subdocument or populated document, returns the document's * parent. Returns the original document if there is no parent. * * @api public * @method parent * @memberOf Document * @instance */ Document.prototype.parent = function() { if (this.$isSubdocument || this.$__.wasPopulated) { return this.$__.parent; } return this; }; /** * Alias for `parent()`. If this document is a subdocument or populated * document, returns the document's parent. Returns `undefined` otherwise. * * @api public * @method $parent * @memberOf Document * @instance */ Document.prototype.$parent = Document.prototype.parent; /** * Helper for console.log * * @api public * @method inspect * @memberOf Document * @instance */ Document.prototype.inspect = function(options) { const isPOJO = utils.isPOJO(options); let opts; if (isPOJO) { opts = options; opts.minimize = false; } const ret = this.toObject(opts); if (ret == null) { // If `toObject()` returns null, `this` is still an object, so if `inspect()` // prints out null this can cause some serious confusion. See gh-7942. return 'MongooseDocument { ' + ret + ' }'; } return ret; }; if (inspect.custom) { /*! * Avoid Node deprecation warning DEP0079 */ Document.prototype[inspect.custom] = Document.prototype.inspect; } /** * Helper for console.log * * @api public * @method toString * @memberOf Document * @instance */ Document.prototype.toString = function() { const ret = this.inspect(); if (typeof ret === 'string') { return ret; } return inspect(ret); }; /** * Returns true if this document is equal to another document. * * Documents are considered equal when they have matching `_id`s, unless neither * document has an `_id`, in which case this function falls back to using * `deepEqual()`. * * @param {Document} doc a document to compare * @return {Boolean} * @api public * @memberOf Document * @instance */ Document.prototype.equals = function(doc) { if (!doc) { return false; } const tid = this.$__getValue('_id'); const docid = doc.$__ != null ? doc.$__getValue('_id') : doc; if (!tid && !docid) { return deepEqual(this, doc); } return tid && tid.equals ? tid.equals(docid) : tid === docid; }; /** * Populates paths on an existing document. * * ####Example: * * await doc.populate([ * 'stories', * { path: 'fans', sort: { name: -1 } } * ]); * doc.populated('stories'); // Array of ObjectIds * doc.stories[0].title; // 'Casino Royale' * doc.populated('fans'); // Array of ObjectIds * * await doc.populate('fans', '-email'); * doc.fans[0].email // not populated * * await doc.populate('author fans', '-email'); * doc.author.email // not populated * doc.fans[0].email // not populated * * @param {String|Object|Array} path either the path to populate or an object specifying all parameters, or either an array of those * @param {Object|String} [select] Field selection for the population query * @param {Model} [model] The model you wish to use for population. If not specified, populate will look up the model by the name in the Schema's `ref` field. * @param {Object} [match] Conditions for the population query * @param {Object} [options] Options for the population query (sort, etc) * @param {String} [options.path=null] The path to populate. * @param {boolean} [options.retainNullValues=false] by default, Mongoose removes null and undefined values from populated arrays. Use this option to make `populate()` retain `null` and `undefined` array entries. * @param {boolean} [options.getters=false] if true, Mongoose will call any getters defined on the `localField`. By default, Mongoose gets the raw value of `localField`. For example, you would need to set this option to `true` if you wanted to [add a `lowercase` getter to your `localField`](/docs/schematypes.html#schematype-options). * @param {boolean} [options.clone=false] When you do `BlogPost.find().populate('author')`, blog posts with the same author will share 1 copy of an `author` doc. Enable this option to make Mongoose clone populated docs before assigning them. * @param {Object|Function} [options.match=null] Add an additional filter to the populate query. Can be a filter object containing [MongoDB query syntax](https://docs.mongodb.com/manual/tutorial/query-documents/), or a function that returns a filter object. * @param {Function} [options.transform=null] Function that Mongoose will call on every populated document that allows you to transform the populated document. * @param {Object} [options.options=null] Additional options like `limit` and `lean`. * @param {Function} [callback] Callback * @see population ./populate.html * @see Query#select #query_Query-select * @see Model.populate #model_Model.populate * @memberOf Document * @instance * @return {Promise|null} * @api public */ Document.prototype.populate = function populate() { const pop = {}; const args = utils.args(arguments); let fn; if (args.length > 0) { if (typeof args[args.length - 1] === 'function') { fn = args.pop(); } // use hash to remove duplicate paths const res = utils.populate.apply(null, args); for (const populateOptions of res) { pop[populateOptions.path] = populateOptions; } } const paths = utils.object.vals(pop); let topLevelModel = this.constructor; if (this.$__isNested) { topLevelModel = this.$__[scopeSymbol].constructor; const nestedPath = this.$__.nestedPath; paths.forEach(function(populateOptions) { populateOptions.path = nestedPath + '.' + populateOptions.path; }); } // Use `$session()` by default if the document has an associated session // See gh-6754 if (this.$session() != null) { const session = this.$session(); paths.forEach(path => { if (path.options == null) { path.options = { session: session }; return; } if (!('session' in path.options)) { path.options.session = session; } }); } paths.forEach(p => { p._localModel = topLevelModel; }); return topLevelModel.populate(this, paths, fn); }; /** * Gets all populated documents associated with this document. * * @api public * @return {Array<Document>} array of populated documents. Empty array if there are no populated documents associated with this document. * @memberOf Document * @instance */ Document.prototype.$getPopulatedDocs = function $getPopulatedDocs() { let keys = []; if (this.$__.populated != null) { keys = keys.concat(Object.keys(this.$__.populated)); } let result = []; for (const key of keys) { const value = this.$get(key); if (Array.isArray(value)) { result = result.concat(value); } else if (value instanceof Document) { result.push(value); } } return result; }; /** * Gets _id(s) used during population of the given `path`. * * ####Example: * * Model.findOne().populate('author').exec(function (err, doc) { * console.log(doc.author.name) // Dr.Seuss * console.log(doc.populated('author')) // '5144cf8050f071d979c118a7' * }) * * If the path was not populated, returns `undefined`. * * @param {String} path * @return {Array|ObjectId|Number|Buffer|String|undefined} * @memberOf Document * @instance * @api public */ Document.prototype.populated = function(path, val, options) { // val and options are internal if (val == null || val === true) { if (!this.$__.populated) { return undefined; } if (typeof path !== 'string') { return undefined; } // Map paths can be populated with either `path.$*` or just `path` const _path = path.endsWith('.$*') ? path.replace(/\.\$\*$/, '') : path; const v = this.$__.populated[_path]; if (v) { return val === true ? v : v.value; } return undefined; } this.$__.populated || (this.$__.populated = {}); this.$__.populated[path] = { value: val, options: options }; // If this was a nested populate, make sure each populated doc knows // about its populated children (gh-7685) const pieces = path.split('.'); for (let i = 0; i < pieces.length - 1; ++i) { const subpath = pieces.slice(0, i + 1).join('.'); const subdoc = this.$get(subpath); if (subdoc != null && subdoc.$__ != null && this.$populated(subpath)) { const rest = pieces.slice(i + 1).join('.'); subdoc.$populated(rest, val, options); // No need to continue because the above recursion should take care of // marking the rest of the docs as populated break; } } return val; }; Document.prototype.$populated = Document.prototype.populated; /** * Takes a populated field and returns it to its unpopulated state. * * ####Example: * * Model.findOne().populate('author').exec(function (err, doc) { * console.log(doc.author.name); // Dr.Seuss * console.log(doc.depopulate('author')); * console.log(doc.author); // '5144cf8050f071d979c118a7' * }) * * If the path was not provided, then all populated fields are returned to their unpopulated state. * * @param {String} path * @return {Document} this * @see Document.populate #document_Document-populate * @api public * @memberOf Document * @instance */ Document.prototype.depopulate = function(path) { if (typeof path === 'string') { path = path.indexOf(' ') === -1 ? [path] : path.split(' '); } let populatedIds; const virtualKeys = this.$$populatedVirtuals ? Object.keys(this.$$populatedVirtuals) : []; const populated = get(this, '$__.populated', {}); if (arguments.length === 0) { // Depopulate all for (const virtualKey of virtualKeys) { delete this.$$populatedVirtuals[virtualKey]; delete this._doc[virtualKey]; delete populated[virtualKey]; } const keys = Object.keys(populated); for (const key of keys) { populatedIds = this.$populated(key); if (!populatedIds) { continue; } delete populated[key]; utils.setValue(key, populatedIds, this._doc); } return this; } for (const singlePath of path) { populatedIds = this.$populated(singlePath); delete populated[singlePath]; if (virtualKeys.indexOf(singlePath) !== -1) { delete this.$$populatedVirtuals[singlePath]; delete this._doc[singlePath]; } else if (populatedIds) { utils.setValue(singlePath, populatedIds, this._doc); } } return this; }; /** * Returns the full path to this document. * * @param {String} [path] * @return {String} * @api private * @method $__fullPath * @memberOf Document * @instance */ Document.prototype.$__fullPath = function(path) { // overridden in SubDocuments return path || ''; }; /** * Returns the changes that happened to the document * in the format that will be sent to MongoDB. * * #### Example: * * const userSchema = new Schema({ * name: String, * age: Number, * country: String * }); * const User = mongoose.model('User', userSchema); * const user = await User.create({ * name: 'Hafez', * age: 25, * country: 'Egypt' * }); * * // returns an empty object, no changes happened yet * user.getChanges(); // { } * * user.country = undefined; * user.age = 26; * * user.getChanges(); // { $set: { age: 26 }, { $unset: { country: 1 } } } * * await user.save(); * * user.getChanges(); // { } * * Modifying the object that `getChanges()` returns does not affect the document's * change tracking state. Even if you `delete user.getChanges().$set`, Mongoose * will still send a `$set` to the server. * * @return {Object} * @api public * @method getChanges * @memberOf Document * @instance */ Document.prototype.getChanges = function() { const delta = this.$__delta(); const changes = delta ? delta[1] : {}; return changes; }; /*! * Module exports. */ Document.ValidationError = ValidationError; module.exports = exports = Document;
1
14,918
This is a coarse solution. This check is helpful, but you should also add a check in `$__version()` to avoid calling `isSelected()` if `key === false`
Automattic-mongoose
js
@@ -37,8 +37,10 @@ type Planner interface { } type Input struct { - // Readonly deployment model. - Deployment *model.Deployment + ApplicationID string + ApplicationName string + GitPath model.ApplicationGitPath + Trigger model.DeploymentTrigger MostRecentSuccessfulCommitHash string PipedConfig *config.PipedSpec TargetDSP deploysource.Provider
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package planner provides a piped component // that decides the deployment pipeline of a deployment. // The planner bases on the changes from git commits // then builds the deployment manifests to know the behavior of the deployment. // From that behavior the planner can decides which pipeline should be applied. package planner import ( "context" "strings" "go.uber.org/zap" "github.com/pipe-cd/pipe/pkg/app/piped/deploysource" "github.com/pipe-cd/pipe/pkg/cache" "github.com/pipe-cd/pipe/pkg/config" "github.com/pipe-cd/pipe/pkg/model" "github.com/pipe-cd/pipe/pkg/regexpool" ) type Planner interface { Plan(ctx context.Context, in Input) (Output, error) } type Input struct { // Readonly deployment model. Deployment *model.Deployment MostRecentSuccessfulCommitHash string PipedConfig *config.PipedSpec TargetDSP deploysource.Provider RunningDSP deploysource.Provider AppManifestsCache cache.Cache RegexPool *regexpool.Pool Logger *zap.Logger } type Output struct { Version string SyncStrategy model.SyncStrategy Summary string Stages []*model.PipelineStage } // MakeInitialStageMetadata makes the initial metadata for the given state configuration. func MakeInitialStageMetadata(cfg config.PipelineStage) map[string]string { switch cfg.Name { case model.StageWaitApproval: return map[string]string{ "Approvers": strings.Join(cfg.WaitApprovalStageOptions.Approvers, ","), } default: return nil } }
1
17,742
Passing only needed data to make it can be reused by `planpreview` package where there is no deployment data.
pipe-cd-pipe
go
@@ -71,6 +71,9 @@ type Options struct { TLSCaCert string `json:"-"` TLSConfig *tls.Config `json:"-"` WriteDeadline time.Duration `json:"-"` + + CustomClientAuth Auth `json:"-"` + CustomRouterAuth Auth `json:"-"` } // Clone performs a deep copy of the Options struct, returning a new clone
1
// Copyright 2012-2017 Apcera Inc. All rights reserved. package server import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "net/url" "os" "strconv" "strings" "time" "github.com/nats-io/gnatsd/conf" "github.com/nats-io/gnatsd/util" ) // Options for clusters. type ClusterOpts struct { Host string `json:"addr"` Port int `json:"cluster_port"` Username string `json:"-"` Password string `json:"-"` AuthTimeout float64 `json:"auth_timeout"` TLSTimeout float64 `json:"-"` TLSConfig *tls.Config `json:"-"` ListenStr string `json:"-"` NoAdvertise bool `json:"-"` ConnectRetries int `json:"-"` } // Options block for gnatsd server. type Options struct { ConfigFile string `json:"-"` Host string `json:"addr"` Port int `json:"port"` Trace bool `json:"-"` Debug bool `json:"-"` NoLog bool `json:"-"` NoSigs bool `json:"-"` Logtime bool `json:"-"` MaxConn int `json:"max_connections"` Users []*User `json:"-"` Username string `json:"-"` Password string `json:"-"` Authorization string `json:"-"` PingInterval time.Duration `json:"ping_interval"` MaxPingsOut int `json:"ping_max"` HTTPHost string `json:"http_host"` HTTPPort int `json:"http_port"` HTTPSPort int `json:"https_port"` AuthTimeout float64 `json:"auth_timeout"` MaxControlLine int `json:"max_control_line"` MaxPayload int `json:"max_payload"` Cluster ClusterOpts `json:"cluster"` ProfPort int `json:"-"` PidFile string `json:"-"` LogFile string `json:"-"` Syslog bool `json:"-"` RemoteSyslog string `json:"-"` Routes []*url.URL `json:"-"` RoutesStr string `json:"-"` TLSTimeout float64 `json:"tls_timeout"` TLS bool `json:"-"` TLSVerify bool `json:"-"` TLSCert string `json:"-"` TLSKey string `json:"-"` TLSCaCert string `json:"-"` TLSConfig *tls.Config `json:"-"` WriteDeadline time.Duration `json:"-"` } // Clone performs a deep copy of the Options struct, returning a new clone // with all values copied. func (o *Options) Clone() *Options { if o == nil { return nil } clone := &Options{} *clone = *o if o.Users != nil { clone.Users = make([]*User, len(o.Users)) for i, user := range o.Users { clone.Users[i] = user.clone() } } if o.Routes != nil { clone.Routes = make([]*url.URL, len(o.Routes)) for i, route := range o.Routes { routeCopy := &url.URL{} *routeCopy = *route clone.Routes[i] = routeCopy } } if o.TLSConfig != nil { clone.TLSConfig = util.CloneTLSConfig(o.TLSConfig) } if o.Cluster.TLSConfig != nil { clone.Cluster.TLSConfig = util.CloneTLSConfig(o.Cluster.TLSConfig) } return clone } // Configuration file authorization section. type authorization struct { // Singles user string pass string token string // Multiple Users users []*User timeout float64 defaultPermissions *Permissions } // TLSConfigOpts holds the parsed tls config information, // used with flag parsing type TLSConfigOpts struct { CertFile string KeyFile string CaFile string Verify bool Timeout float64 Ciphers []uint16 CurvePreferences []tls.CurveID } var tlsUsage = ` TLS configuration is specified in the tls section of a configuration file: e.g. tls { cert_file: "./certs/server-cert.pem" key_file: "./certs/server-key.pem" ca_file: "./certs/ca.pem" verify: true cipher_suites: [ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" ] curve_preferences: [ "CurveP256", "CurveP384", "CurveP521" ] } Available cipher suites include: ` // ProcessConfigFile processes a configuration file. // FIXME(dlc): Hacky func ProcessConfigFile(configFile string) (*Options, error) { opts := &Options{ConfigFile: configFile} if configFile == "" { return opts, nil } m, err := conf.ParseFile(configFile) if err != nil { return nil, err } for k, v := range m { switch strings.ToLower(k) { case "listen": hp, err := parseListen(v) if err != nil { return nil, err } opts.Host = hp.host opts.Port = hp.port case "port": opts.Port = int(v.(int64)) case "host", "net": opts.Host = v.(string) case "debug": opts.Debug = v.(bool) case "trace": opts.Trace = v.(bool) case "logtime": opts.Logtime = v.(bool) case "authorization": am := v.(map[string]interface{}) auth, err := parseAuthorization(am) if err != nil { return nil, err } opts.Username = auth.user opts.Password = auth.pass opts.Authorization = auth.token if (auth.user != "" || auth.pass != "") && auth.token != "" { return nil, fmt.Errorf("Cannot have a user/pass and token") } opts.AuthTimeout = auth.timeout // Check for multiple users defined if auth.users != nil { if auth.user != "" { return nil, fmt.Errorf("Can not have a single user/pass and a users array") } if auth.token != "" { return nil, fmt.Errorf("Can not have a token and a users array") } opts.Users = auth.users } case "http": hp, err := parseListen(v) if err != nil { return nil, err } opts.HTTPHost = hp.host opts.HTTPPort = hp.port case "https": hp, err := parseListen(v) if err != nil { return nil, err } opts.HTTPHost = hp.host opts.HTTPSPort = hp.port case "http_port", "monitor_port": opts.HTTPPort = int(v.(int64)) case "https_port": opts.HTTPSPort = int(v.(int64)) case "cluster": cm := v.(map[string]interface{}) if err := parseCluster(cm, opts); err != nil { return nil, err } case "logfile", "log_file": opts.LogFile = v.(string) case "syslog": opts.Syslog = v.(bool) case "remote_syslog": opts.RemoteSyslog = v.(string) case "pidfile", "pid_file": opts.PidFile = v.(string) case "prof_port": opts.ProfPort = int(v.(int64)) case "max_control_line": opts.MaxControlLine = int(v.(int64)) case "max_payload": opts.MaxPayload = int(v.(int64)) case "max_connections", "max_conn": opts.MaxConn = int(v.(int64)) case "ping_interval": opts.PingInterval = time.Duration(int(v.(int64))) * time.Second case "ping_max": opts.MaxPingsOut = int(v.(int64)) case "tls": tlsm := v.(map[string]interface{}) tc, err := parseTLS(tlsm) if err != nil { return nil, err } if opts.TLSConfig, err = GenTLSConfig(tc); err != nil { return nil, err } opts.TLSTimeout = tc.Timeout case "write_deadline": wd, ok := v.(string) if ok { dur, err := time.ParseDuration(wd) if err != nil { return nil, fmt.Errorf("error parsing write_deadline: %v", err) } opts.WriteDeadline = dur } else { // Backward compatible with old type, assume this is the // number of seconds. opts.WriteDeadline = time.Duration(v.(int64)) * time.Second fmt.Printf("WARNING: write_deadline should be converted to a duration\n") } } } return opts, nil } // hostPort is simple struct to hold parsed listen/addr strings. type hostPort struct { host string port int } // parseListen will parse listen option which is replacing host/net and port func parseListen(v interface{}) (*hostPort, error) { hp := &hostPort{} switch v.(type) { // Only a port case int64: hp.port = int(v.(int64)) case string: host, port, err := net.SplitHostPort(v.(string)) if err != nil { return nil, fmt.Errorf("Could not parse address string %q", v) } hp.port, err = strconv.Atoi(port) if err != nil { return nil, fmt.Errorf("Could not parse port %q", port) } hp.host = host } return hp, nil } // parseCluster will parse the cluster config. func parseCluster(cm map[string]interface{}, opts *Options) error { for mk, mv := range cm { switch strings.ToLower(mk) { case "listen": hp, err := parseListen(mv) if err != nil { return err } opts.Cluster.Host = hp.host opts.Cluster.Port = hp.port case "port": opts.Cluster.Port = int(mv.(int64)) case "host", "net": opts.Cluster.Host = mv.(string) case "authorization": am := mv.(map[string]interface{}) auth, err := parseAuthorization(am) if err != nil { return err } if auth.users != nil { return fmt.Errorf("Cluster authorization does not allow multiple users") } opts.Cluster.Username = auth.user opts.Cluster.Password = auth.pass opts.Cluster.AuthTimeout = auth.timeout case "routes": ra := mv.([]interface{}) opts.Routes = make([]*url.URL, 0, len(ra)) for _, r := range ra { routeURL := r.(string) url, err := url.Parse(routeURL) if err != nil { return fmt.Errorf("error parsing route url [%q]", routeURL) } opts.Routes = append(opts.Routes, url) } case "tls": tlsm := mv.(map[string]interface{}) tc, err := parseTLS(tlsm) if err != nil { return err } if opts.Cluster.TLSConfig, err = GenTLSConfig(tc); err != nil { return err } // For clusters, we will force strict verification. We also act // as both client and server, so will mirror the rootCA to the // clientCA pool. opts.Cluster.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert opts.Cluster.TLSConfig.RootCAs = opts.Cluster.TLSConfig.ClientCAs opts.Cluster.TLSTimeout = tc.Timeout case "no_advertise": opts.Cluster.NoAdvertise = mv.(bool) case "connect_retries": opts.Cluster.ConnectRetries = int(mv.(int64)) } } return nil } // Helper function to parse Authorization configs. func parseAuthorization(am map[string]interface{}) (*authorization, error) { auth := &authorization{} for mk, mv := range am { switch strings.ToLower(mk) { case "user", "username": auth.user = mv.(string) case "pass", "password": auth.pass = mv.(string) case "token": auth.token = mv.(string) case "timeout": at := float64(1) switch mv.(type) { case int64: at = float64(mv.(int64)) case float64: at = mv.(float64) } auth.timeout = at case "users": users, err := parseUsers(mv) if err != nil { return nil, err } auth.users = users case "default_permission", "default_permissions": pm, ok := mv.(map[string]interface{}) if !ok { return nil, fmt.Errorf("Expected default permissions to be a map/struct, got %+v", mv) } permissions, err := parseUserPermissions(pm) if err != nil { return nil, err } auth.defaultPermissions = permissions } // Now check for permission defaults with multiple users, etc. if auth.users != nil && auth.defaultPermissions != nil { for _, user := range auth.users { if user.Permissions == nil { user.Permissions = auth.defaultPermissions } } } } return auth, nil } // Helper function to parse multiple users array with optional permissions. func parseUsers(mv interface{}) ([]*User, error) { // Make sure we have an array uv, ok := mv.([]interface{}) if !ok { return nil, fmt.Errorf("Expected users field to be an array, got %v", mv) } users := []*User{} for _, u := range uv { // Check its a map/struct um, ok := u.(map[string]interface{}) if !ok { return nil, fmt.Errorf("Expected user entry to be a map/struct, got %v", u) } user := &User{} for k, v := range um { switch strings.ToLower(k) { case "user", "username": user.Username = v.(string) case "pass", "password": user.Password = v.(string) case "permission", "permissions", "authorization": pm, ok := v.(map[string]interface{}) if !ok { return nil, fmt.Errorf("Expected user permissions to be a map/struct, got %+v", v) } permissions, err := parseUserPermissions(pm) if err != nil { return nil, err } user.Permissions = permissions } } // Check to make sure we have at least username and password if user.Username == "" || user.Password == "" { return nil, fmt.Errorf("User entry requires a user and a password") } users = append(users, user) } return users, nil } // Helper function to parse user/account permissions func parseUserPermissions(pm map[string]interface{}) (*Permissions, error) { p := &Permissions{} for k, v := range pm { switch strings.ToLower(k) { case "pub", "publish": subjects, err := parseSubjects(v) if err != nil { return nil, err } p.Publish = subjects case "sub", "subscribe": subjects, err := parseSubjects(v) if err != nil { return nil, err } p.Subscribe = subjects default: return nil, fmt.Errorf("Unknown field %s parsing permissions", k) } } return p, nil } // Helper function to parse subject singeltons and/or arrays func parseSubjects(v interface{}) ([]string, error) { var subjects []string switch v.(type) { case string: subjects = append(subjects, v.(string)) case []string: subjects = v.([]string) case []interface{}: for _, i := range v.([]interface{}) { subject, ok := i.(string) if !ok { return nil, fmt.Errorf("Subject in permissions array cannot be cast to string") } subjects = append(subjects, subject) } default: return nil, fmt.Errorf("Expected subject permissions to be a subject, or array of subjects, got %T", v) } return checkSubjectArray(subjects) } // Helper function to validate subjects, etc for account permissioning. func checkSubjectArray(sa []string) ([]string, error) { for _, s := range sa { if !IsValidSubject(s) { return nil, fmt.Errorf("Subject %q is not a valid subject", s) } } return sa, nil } // PrintTLSHelpAndDie prints TLS usage and exits. func PrintTLSHelpAndDie() { fmt.Printf("%s", tlsUsage) for k := range cipherMap { fmt.Printf(" %s\n", k) } fmt.Printf("\nAvailable curve preferences include:\n") for k := range curvePreferenceMap { fmt.Printf(" %s\n", k) } os.Exit(0) } func parseCipher(cipherName string) (uint16, error) { cipher, exists := cipherMap[cipherName] if !exists { return 0, fmt.Errorf("Unrecognized cipher %s", cipherName) } return cipher, nil } func parseCurvePreferences(curveName string) (tls.CurveID, error) { curve, exists := curvePreferenceMap[curveName] if !exists { return 0, fmt.Errorf("Unrecognized curve preference %s", curveName) } return curve, nil } // Helper function to parse TLS configs. func parseTLS(tlsm map[string]interface{}) (*TLSConfigOpts, error) { tc := TLSConfigOpts{} for mk, mv := range tlsm { switch strings.ToLower(mk) { case "cert_file": certFile, ok := mv.(string) if !ok { return nil, fmt.Errorf("error parsing tls config, expected 'cert_file' to be filename") } tc.CertFile = certFile case "key_file": keyFile, ok := mv.(string) if !ok { return nil, fmt.Errorf("error parsing tls config, expected 'key_file' to be filename") } tc.KeyFile = keyFile case "ca_file": caFile, ok := mv.(string) if !ok { return nil, fmt.Errorf("error parsing tls config, expected 'ca_file' to be filename") } tc.CaFile = caFile case "verify": verify, ok := mv.(bool) if !ok { return nil, fmt.Errorf("error parsing tls config, expected 'verify' to be a boolean") } tc.Verify = verify case "cipher_suites": ra := mv.([]interface{}) if len(ra) == 0 { return nil, fmt.Errorf("error parsing tls config, 'cipher_suites' cannot be empty") } tc.Ciphers = make([]uint16, 0, len(ra)) for _, r := range ra { cipher, err := parseCipher(r.(string)) if err != nil { return nil, err } tc.Ciphers = append(tc.Ciphers, cipher) } case "curve_preferences": ra := mv.([]interface{}) if len(ra) == 0 { return nil, fmt.Errorf("error parsing tls config, 'curve_preferences' cannot be empty") } tc.CurvePreferences = make([]tls.CurveID, 0, len(ra)) for _, r := range ra { cps, err := parseCurvePreferences(r.(string)) if err != nil { return nil, err } tc.CurvePreferences = append(tc.CurvePreferences, cps) } case "timeout": at := float64(0) switch mv.(type) { case int64: at = float64(mv.(int64)) case float64: at = mv.(float64) } tc.Timeout = at default: return nil, fmt.Errorf("error parsing tls config, unknown field [%q]", mk) } } // If cipher suites were not specified then use the defaults if tc.Ciphers == nil { tc.Ciphers = defaultCipherSuites() } // If curve preferences were not specified, then use the defaults if tc.CurvePreferences == nil { tc.CurvePreferences = defaultCurvePreferences() } return &tc, nil } // GenTLSConfig loads TLS related configuration parameters. func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) { // Now load in cert and private key cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile) if err != nil { return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err) } cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { return nil, fmt.Errorf("error parsing certificate: %v", err) } // Create TLSConfig // We will determine the cipher suites that we prefer. config := tls.Config{ CurvePreferences: tc.CurvePreferences, Certificates: []tls.Certificate{cert}, PreferServerCipherSuites: true, MinVersion: tls.VersionTLS12, CipherSuites: tc.Ciphers, } // Require client certificates as needed if tc.Verify { config.ClientAuth = tls.RequireAndVerifyClientCert } // Add in CAs if applicable. if tc.CaFile != "" { rootPEM, err := ioutil.ReadFile(tc.CaFile) if err != nil || rootPEM == nil { return nil, err } pool := x509.NewCertPool() ok := pool.AppendCertsFromPEM(rootPEM) if !ok { return nil, fmt.Errorf("failed to parse root ca certificate") } config.ClientCAs = pool } return &config, nil } // MergeOptions will merge two options giving preference to the flagOpts // if the item is present. func MergeOptions(fileOpts, flagOpts *Options) *Options { if fileOpts == nil { return flagOpts } if flagOpts == nil { return fileOpts } // Merge the two, flagOpts override opts := *fileOpts if flagOpts.Port != 0 { opts.Port = flagOpts.Port } if flagOpts.Host != "" { opts.Host = flagOpts.Host } if flagOpts.Username != "" { opts.Username = flagOpts.Username } if flagOpts.Password != "" { opts.Password = flagOpts.Password } if flagOpts.Authorization != "" { opts.Authorization = flagOpts.Authorization } if flagOpts.HTTPPort != 0 { opts.HTTPPort = flagOpts.HTTPPort } if flagOpts.Debug { opts.Debug = true } if flagOpts.Trace { opts.Trace = true } if flagOpts.Logtime { opts.Logtime = true } if flagOpts.LogFile != "" { opts.LogFile = flagOpts.LogFile } if flagOpts.PidFile != "" { opts.PidFile = flagOpts.PidFile } if flagOpts.ProfPort != 0 { opts.ProfPort = flagOpts.ProfPort } if flagOpts.Cluster.ListenStr != "" { opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr } if flagOpts.Cluster.NoAdvertise { opts.Cluster.NoAdvertise = true } if flagOpts.Cluster.ConnectRetries != 0 { opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries } if flagOpts.RoutesStr != "" { mergeRoutes(&opts, flagOpts) } return &opts } // RoutesFromStr parses route URLs from a string func RoutesFromStr(routesStr string) []*url.URL { routes := strings.Split(routesStr, ",") if len(routes) == 0 { return nil } routeUrls := []*url.URL{} for _, r := range routes { r = strings.TrimSpace(r) u, _ := url.Parse(r) routeUrls = append(routeUrls, u) } return routeUrls } // This will merge the flag routes and override anything that was present. func mergeRoutes(opts, flagOpts *Options) { routeUrls := RoutesFromStr(flagOpts.RoutesStr) if routeUrls == nil { return } opts.Routes = routeUrls opts.RoutesStr = flagOpts.RoutesStr } // RemoveSelfReference removes this server from an array of routes func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) { var cleanRoutes []*url.URL cport := strconv.Itoa(clusterPort) selfIPs, err := getInterfaceIPs() if err != nil { return nil, err } for _, r := range routes { host, port, err := net.SplitHostPort(r.Host) if err != nil { return nil, err } ipList, err := getURLIP(host) if err != nil { return nil, err } if cport == port && isIPInList(selfIPs, ipList) { continue } cleanRoutes = append(cleanRoutes, r) } return cleanRoutes, nil } func isIPInList(list1 []net.IP, list2 []net.IP) bool { for _, ip1 := range list1 { for _, ip2 := range list2 { if ip1.Equal(ip2) { return true } } } return false } func getURLIP(ipStr string) ([]net.IP, error) { ipList := []net.IP{} ip := net.ParseIP(ipStr) if ip != nil { ipList = append(ipList, ip) return ipList, nil } hostAddr, err := net.LookupHost(ipStr) if err != nil { return nil, fmt.Errorf("Error looking up host with route hostname: %v", err) } for _, addr := range hostAddr { ip = net.ParseIP(addr) if ip != nil { ipList = append(ipList, ip) } } return ipList, nil } func getInterfaceIPs() ([]net.IP, error) { var localIPs []net.IP interfaceAddr, err := net.InterfaceAddrs() if err != nil { return nil, fmt.Errorf("Error getting self referencing address: %v", err) } for i := 0; i < len(interfaceAddr); i++ { interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String()) if net.ParseIP(interfaceIP.String()) != nil { localIPs = append(localIPs, interfaceIP) } else { return nil, fmt.Errorf("Error parsing self referencing address: %v", err) } } return localIPs, nil } func processOptions(opts *Options) { // Setup non-standard Go defaults if opts.Host == "" { opts.Host = DEFAULT_HOST } if opts.HTTPHost == "" { // Default to same bind from server if left undefined opts.HTTPHost = opts.Host } if opts.Port == 0 { opts.Port = DEFAULT_PORT } else if opts.Port == RANDOM_PORT { // Choose randomly inside of net.Listen opts.Port = 0 } if opts.MaxConn == 0 { opts.MaxConn = DEFAULT_MAX_CONNECTIONS } if opts.PingInterval == 0 { opts.PingInterval = DEFAULT_PING_INTERVAL } if opts.MaxPingsOut == 0 { opts.MaxPingsOut = DEFAULT_PING_MAX_OUT } if opts.TLSTimeout == 0 { opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second) } if opts.AuthTimeout == 0 { opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second) } if opts.Cluster.Host == "" { opts.Cluster.Host = DEFAULT_HOST } if opts.Cluster.TLSTimeout == 0 { opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second) } if opts.Cluster.AuthTimeout == 0 { opts.Cluster.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second) } if opts.MaxControlLine == 0 { opts.MaxControlLine = MAX_CONTROL_LINE_SIZE } if opts.MaxPayload == 0 { opts.MaxPayload = MAX_PAYLOAD_SIZE } if opts.WriteDeadline == time.Duration(0) { opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE } }
1
7,272
Could we rename those to `CustomClientAuthentication` (same for Router) to remove ambiguity between Authentication and Authorization (permissions)?
nats-io-nats-server
go
@@ -1,11 +1,15 @@ using System; +using MvvmCross.Core.Views; using MvvmCross.Forms.Platform; namespace MvvmCross.Forms.Views { - public interface IMvxFormsViewPresenter + public interface IMvxFormsViewPresenter : IMvxAttributeViewPresenter { MvxFormsApplication FormsApplication { get; set; } IMvxFormsPagePresenter FormsPagePresenter { get; set; } + + bool ClosePlatformViews(); + bool ShowPlatformHost(Type hostViewModel = null); } }
1
using System; using MvvmCross.Forms.Platform; namespace MvvmCross.Forms.Views { public interface IMvxFormsViewPresenter { MvxFormsApplication FormsApplication { get; set; } IMvxFormsPagePresenter FormsPagePresenter { get; set; } } }
1
13,646
Is the name "IMvxFormsViewPresenter" too close to "IMvxFormsPagePresenter"? Perhaps something like "IMvxFormsNativeViewPresenter" or "IMvxFormsPlatformViewPresenter" or inline with Forms naming "IMvxFormsOnPlatformViewPresenter"
MvvmCross-MvvmCross
.cs
@@ -28,8 +28,8 @@ export function h(nodeName, attributes) { if ((child = stack.pop()) instanceof Array) { for (i=child.length; i--; ) stack.push(child[i]); } - else if (child!=null && child!==false) { - if (typeof child=='number' || child===true) child = String(child); + else if (child!=null && child!==true && child!==false) { + if (typeof child=='number') child = String(child); simple = typeof child=='string'; if (simple && lastSimple) { children[children.length-1] += child;
1
import { VNode } from './vnode'; import options from './options'; const stack = []; /** JSX/hyperscript reviver * Benchmarks: https://esbench.com/bench/57ee8f8e330ab09900a1a1a0 * @see http://jasonformat.com/wtf-is-jsx * @public * @example * /** @jsx h *\/ * import { render, h } from 'preact'; * render(<span>foo</span>, document.body); */ export function h(nodeName, attributes) { let children = [], lastSimple, child, simple, i; for (i=arguments.length; i-- > 2; ) { stack.push(arguments[i]); } if (attributes && attributes.children) { if (!stack.length) stack.push(attributes.children); delete attributes.children; } while (stack.length) { if ((child = stack.pop()) instanceof Array) { for (i=child.length; i--; ) stack.push(child[i]); } else if (child!=null && child!==false) { if (typeof child=='number' || child===true) child = String(child); simple = typeof child=='string'; if (simple && lastSimple) { children[children.length-1] += child; } else { children.push(child); lastSimple = simple; } } } let p = new VNode(nodeName, attributes || undefined, children); // if a "vnode hook" is defined, pass every created VNode to it if (options.vnode) options.vnode(p); return p; }
1
10,285
`typeof child != 'boolean'` maybe?
preactjs-preact
js
@@ -220,7 +220,7 @@ public class ErrorHandler { private Throwable rebuildServerError(Map<String, Object> rawErrorData, int responseStatus) { - if (!rawErrorData.containsKey(CLASS) && !rawErrorData.containsKey(STACK_TRACE)) { + if (rawErrorData.get(CLASS) == null || rawErrorData.get(STACK_TRACE) == null) { // Not enough information for us to try to rebuild an error. return null; }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote; import static org.openqa.selenium.remote.ErrorCodes.SUCCESS; import com.google.common.base.Throwables; import com.google.common.primitives.Ints; import org.openqa.selenium.UnhandledAlertException; import org.openqa.selenium.WebDriverException; import java.lang.reflect.Constructor; import java.math.BigDecimal; import java.math.RoundingMode; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.function.Function; /** * Maps exceptions to status codes for sending over the wire. */ public class ErrorHandler { private static final String MESSAGE = "message"; private static final String SCREEN_SHOT = "screen"; private static final String CLASS = "class"; private static final String STACK_TRACE = "stackTrace"; private static final String LINE_NUMBER = "lineNumber"; private static final String METHOD_NAME = "methodName"; private static final String CLASS_NAME = "className"; private static final String FILE_NAME = "fileName"; private static final String UNKNOWN_CLASS = "<anonymous class>"; private static final String UNKNOWN_METHOD = "<anonymous method>"; private static final String UNKNOWN_FILE = null; private ErrorCodes errorCodes; private boolean includeServerErrors; public ErrorHandler() { this(true); } /** * @param includeServerErrors Whether to include server-side details in thrown exceptions if the * information is available. */ public ErrorHandler(boolean includeServerErrors) { this.includeServerErrors = includeServerErrors; this.errorCodes = new ErrorCodes(); } /** * @param includeServerErrors Whether to include server-side details in thrown exceptions if the * information is available. * @param codes The ErrorCodes object to use for linking error codes to exceptions. */ public ErrorHandler(ErrorCodes codes, boolean includeServerErrors) { this.includeServerErrors = includeServerErrors; this.errorCodes = codes; } public boolean isIncludeServerErrors() { return includeServerErrors; } public void setIncludeServerErrors(boolean includeServerErrors) { this.includeServerErrors = includeServerErrors; } @SuppressWarnings("unchecked") public Response throwIfResponseFailed(Response response, long duration) throws RuntimeException { if (response.getStatus() == null || response.getStatus() == SUCCESS) { return response; } if (response.getValue() instanceof Throwable) { Throwable throwable = (Throwable) response.getValue(); Throwables.throwIfUnchecked(throwable); throw new RuntimeException(throwable); } Class<? extends WebDriverException> outerErrorType = errorCodes.getExceptionType(response.getStatus()); Object value = response.getValue(); String message = null; Throwable cause = null; if (value instanceof Map) { Map<String, Object> rawErrorData = (Map<String, Object>) value; if (!rawErrorData.containsKey(MESSAGE) && rawErrorData.containsKey("value")) { try { rawErrorData = (Map<String, Object>) rawErrorData.get("value"); } catch (ClassCastException cce) {} } try { message = (String) rawErrorData.get(MESSAGE); } catch (ClassCastException e) { // Ok, try to recover gracefully. message = String.valueOf(e); } Throwable serverError = rebuildServerError(rawErrorData, response.getStatus()); // If serverError is null, then the server did not provide a className (only expected if // the server is a Java process) or a stack trace. The lack of a className is OK, but // not having a stacktrace really hurts our ability to debug problems. if (serverError == null) { if (includeServerErrors) { // TODO: this should probably link to a wiki article with more info. message += " (WARNING: The server did not provide any stacktrace information)"; } } else if (!includeServerErrors) { // TODO: wiki article with more info. message += " (WARNING: The client has suppressed server-side stacktraces)"; } else { cause = serverError; } if (rawErrorData.get(SCREEN_SHOT) != null) { cause = new ScreenshotException(String.valueOf(rawErrorData.get(SCREEN_SHOT)), cause); } } else if (value != null) { message = String.valueOf(value); } String duration1 = duration(duration); if (message != null && !message.contains(duration1)) { message = message + duration1; } WebDriverException toThrow = null; if (outerErrorType.equals(UnhandledAlertException.class) && value instanceof Map) { toThrow = createUnhandledAlertException(value); } if (toThrow == null) { toThrow = createThrowable(outerErrorType, new Class<?>[] {String.class, Throwable.class, Integer.class}, new Object[] {message, cause, response.getStatus()}); } if (toThrow == null) { toThrow = createThrowable(outerErrorType, new Class<?>[] {String.class, Throwable.class}, new Object[] {message, cause}); } if (toThrow == null) { toThrow = createThrowable(outerErrorType, new Class<?>[] {String.class}, new Object[] {message}); } if (toThrow == null) { toThrow = new WebDriverException(message, cause); } throw toThrow; } @SuppressWarnings("unchecked") private UnhandledAlertException createUnhandledAlertException(Object value) { Map<String, Object> rawErrorData = (Map<String, Object>) value; if (rawErrorData.containsKey("alert") || rawErrorData.containsKey("alertText")) { Object alertText = rawErrorData.get("alertText"); if (alertText == null) { Map<String, Object> alert = (Map<String, Object>) rawErrorData.get("alert"); if (alert != null) { alertText = alert.get("text"); } } return createThrowable(UnhandledAlertException.class, new Class<?>[] {String.class, String.class}, new Object[] {rawErrorData.get("message"), alertText}); } return null; } private String duration(long duration) { String prefix = "\nCommand duration or timeout: "; if (duration < 1000) { return prefix + duration + " milliseconds"; } return prefix + (new BigDecimal(duration).divide(new BigDecimal(1000)).setScale(2, RoundingMode.HALF_UP)) + " seconds"; } private <T extends Throwable> T createThrowable( Class<T> clazz, Class<?>[] parameterTypes, Object[] parameters) { try { Constructor<T> constructor = clazz.getConstructor(parameterTypes); return constructor.newInstance(parameters); } catch (OutOfMemoryError | ReflectiveOperationException e) { // Do nothing - fall through. } return null; } private Throwable rebuildServerError(Map<String, Object> rawErrorData, int responseStatus) { if (!rawErrorData.containsKey(CLASS) && !rawErrorData.containsKey(STACK_TRACE)) { // Not enough information for us to try to rebuild an error. return null; } Throwable toReturn = null; String message = (String) rawErrorData.get(MESSAGE); Class<?> clazz = null; // First: allow Remote Driver to specify the Selenium Server internal exception if (rawErrorData.containsKey(CLASS)) { String className = (String) rawErrorData.get(CLASS); try { clazz = Class.forName(className); } catch (ClassNotFoundException ignored) { // Ok, fall-through } } // If the above fails, map Response Status to Exception class if (null == clazz) { clazz = errorCodes.getExceptionType(responseStatus); } if (clazz.equals(UnhandledAlertException.class)) { toReturn = createUnhandledAlertException(rawErrorData); } else if (Throwable.class.isAssignableFrom(clazz)) { @SuppressWarnings({"unchecked"}) Class<? extends Throwable> throwableType = (Class<? extends Throwable>) clazz; toReturn = createThrowable( throwableType, new Class<?>[] {String.class}, new Object[] {message}); } if (toReturn == null) { toReturn = new UnknownServerException(message); } // Note: if we have a class name above, we should always have a stack trace. // The inverse is not always true. StackTraceElement[] stackTrace = new StackTraceElement[0]; if (rawErrorData.containsKey(STACK_TRACE)) { @SuppressWarnings({"unchecked"}) List<Map<String, Object>> stackTraceInfo = (List<Map<String, Object>>) rawErrorData.get(STACK_TRACE); stackTrace = stackTraceInfo.stream() .map(entry -> new FrameInfoToStackFrame().apply(entry)) .filter(Objects::nonNull) .toArray(StackTraceElement[]::new); } toReturn.setStackTrace(stackTrace); return toReturn; } /** * Exception used as a place holder if the server returns an error without a stack trace. */ public static class UnknownServerException extends WebDriverException { private UnknownServerException(String s) { super(s); } } /** * Function that can rebuild a {@link StackTraceElement} from the frame info included with a * WebDriver JSON response. */ private static class FrameInfoToStackFrame implements Function<Map<String, Object>, StackTraceElement> { public StackTraceElement apply(Map<String, Object> frameInfo) { if (frameInfo == null) { return null; } Optional<Number> maybeLineNumberInteger = Optional.empty(); final Object lineNumberObject = frameInfo.get(LINE_NUMBER); if (lineNumberObject instanceof Number) { maybeLineNumberInteger = Optional.of((Number) lineNumberObject); } else if (lineNumberObject != null) { // might be a Number as a String maybeLineNumberInteger = Optional.ofNullable(Ints.tryParse(lineNumberObject.toString())); } // default -1 for unknown, see StackTraceElement constructor javadoc final int lineNumber = maybeLineNumberInteger.orElse(-1).intValue(); // Gracefully handle remote servers that don't (or can't) send back // complete stack trace info. At least some of this information should // be included... String className = frameInfo.containsKey(CLASS_NAME) ? toStringOrNull(frameInfo.get(CLASS_NAME)) : UNKNOWN_CLASS; String methodName = frameInfo.containsKey(METHOD_NAME) ? toStringOrNull(frameInfo.get(METHOD_NAME)) : UNKNOWN_METHOD; String fileName = frameInfo.containsKey(FILE_NAME) ? toStringOrNull(frameInfo.get(FILE_NAME)) : UNKNOWN_FILE; return new StackTraceElement( className, methodName, fileName, lineNumber); } private static String toStringOrNull(Object o) { return o == null ? null : o.toString(); } } }
1
15,006
Why && changed to || ?
SeleniumHQ-selenium
js
@@ -26,6 +26,11 @@ func init() { panic(err) } + InitAddress, err = NewIDAddress(4) + if err != nil { + panic(err) + } + StorageMarketAddress, err = NewIDAddress(2) if err != nil { panic(err)
1
package address import ( "encoding/base32" "github.com/minio/blake2b-simd" errors "github.com/pkg/errors" ) func init() { var err error TestAddress, err = NewActorAddress([]byte("satoshi")) if err != nil { panic(err) } TestAddress2, err = NewActorAddress([]byte("nakamoto")) if err != nil { panic(err) } NetworkAddress, err = NewIDAddress(1) if err != nil { panic(err) } StorageMarketAddress, err = NewIDAddress(2) if err != nil { panic(err) } PaymentBrokerAddress, err = NewIDAddress(3) if err != nil { panic(err) } BurntFundsAddress, err = NewIDAddress(99) if err != nil { panic(err) } } var ( // TestAddress is an account with some initial funds in it. TestAddress Address // TestAddress2 is an account with some initial funds in it. TestAddress2 Address // NetworkAddress is the filecoin network treasury. NetworkAddress Address // StorageMarketAddress is the hard-coded address of the filecoin storage market actor. StorageMarketAddress Address // PaymentBrokerAddress is the hard-coded address of the filecoin payment broker actor. PaymentBrokerAddress Address // BurntFundsAddress is the hard-coded address of the burnt funds account actor. BurntFundsAddress Address ) var ( // ErrUnknownNetwork is returned when encountering an unknown network in an address. ErrUnknownNetwork = errors.New("unknown address network") // ErrUnknownProtocol is returned when encountering an unknown protocol in an address. ErrUnknownProtocol = errors.New("unknown address protocol") // ErrInvalidPayload is returned when encountering an invalid address payload. ErrInvalidPayload = errors.New("invalid address payload") // ErrInvalidLength is returned when encountering an address of invalid length. ErrInvalidLength = errors.New("invalid address length") // ErrInvalidChecksum is returned when encountering an invalid address checksum. ErrInvalidChecksum = errors.New("invalid address checksum") ) // UndefAddressString is the string used to represent an empty address when encoded to a string. var UndefAddressString = "empty" // PayloadHashLength defines the hash length taken over addresses using the Actor and SECP256K1 protocols. const PayloadHashLength = 20 // ChecksumHashLength defines the hash length used for calculating address checksums. const ChecksumHashLength = 4 // MaxAddressStringLength is the max length of an address encoded as a string // it include the network prefx, protocol, and bls publickey const MaxAddressStringLength = 2 + 84 var payloadHashConfig = &blake2b.Config{Size: PayloadHashLength} var checksumHashConfig = &blake2b.Config{Size: ChecksumHashLength} const encodeStd = "abcdefghijklmnopqrstuvwxyz234567" // AddressEncoding defines the base32 config used for address encoding and decoding. var AddressEncoding = base32.NewEncoding(encodeStd)
1
21,018
nit: not a big deal right now but spec assigns ID 0 to InitAddress
filecoin-project-venus
go
@@ -533,13 +533,13 @@ nsCommandProcessor.prototype.execute = function(jsonCommandString, * Changes the context of the caller to the specified window. * @param {fxdriver.CommandResponse} response The response object to send the * command response in. - * @param {{name: string}} parameters The command parameters. + * @param {{handle: string}} parameters The command parameters. * @param {number} opt_searchAttempt Which attempt this is at finding the * window to switch to. */ nsCommandProcessor.prototype.switchToWindow = function(response, parameters, opt_searchAttempt) { - var lookFor = parameters.name; + var lookFor = parameters.handle; var matches = function(win, lookFor) { return !win.closed && (win.top && win.top.fxdriver) &&
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview Contains a Javascript implementation for * nsICommandProcessor.idl. The implemented XPCOM component is exposed to * the content page as a global property so that it can be used from * unpriviledged code. */ goog.provide('fxdriver.CommandResponse'); goog.require('FirefoxDriver'); goog.require('Utils'); goog.require('WebElement'); goog.require('bot.ErrorCode'); goog.require('bot.locators'); goog.require('bot.userAgent'); goog.require('fxdriver.Timer'); goog.require('fxdriver.error'); goog.require('fxdriver.logging'); goog.require('fxdriver.modals'); goog.require('fxdriver.moz'); goog.require('fxdriver.profiler'); goog.require('goog.array'); goog.require('goog.log'); goog.require('wdSessionStoreService'); /** * Encapsulates the result of a command to the {@code nsCommandProcessor}. * @param {Object} command JSON object describing the command to execute. * @param {nsIResponseHandler} responseHandler The handler to send the response * to. * @constructor */ fxdriver.CommandResponse = function(command, responseHandler) { this.statusBarLabel_ = null; this.responseHandler_ = responseHandler; this.json_ = { name: command ? command.name : 'Unknown command', sessionId: command['sessionId'], status: bot.ErrorCode.SUCCESS, value: '' }; if (goog.isObject(this.json_['sessionId'])) { this.json_['sessionId'] = this.json_['sessionId']['value']; } this.session = null; }; fxdriver.CommandResponse.prototype = { /** * Updates the extension status label to indicate we are about to execute a * command. * @param {window} win The content window that the command will be executed on. */ startCommand: function(win) { this.statusBarLabel_ = win.document.getElementById('fxdriver-label'); if (this.statusBarLabel_) { this.statusBarLabel_.style.color = 'red'; } }, /** * Sends the encapsulated response to the registered callback. */ send: function() { if (this.responseSent_) { // We shouldn't ever send the same response twice. return; } // Indicate that we are no longer executing a command. if (this.statusBarLabel_) { this.statusBarLabel_.style.color = 'black'; } this.responseHandler_.handleResponse(JSON.stringify(this.json_)); // Neuter ourselves this.responseSent_ = true; }, /** * Sends a WebDriver error response. * @param {WebDriverError} e The error to send. */ sendError: function(e) { // if (e instanceof WebDriverError) won't work here since // WebDriverError is defined in the utils.js subscript which is // loaded independently in this component and in the main driver // component. this.status = e.isWebDriverError ? e.code : bot.ErrorCode.UNKNOWN_ERROR; this.value = fxdriver.error.toJSON(e); this.send(); }, set name(name) { this.json_.name = name; }, get name() { return this.json_.name; }, get sessionId() { return this.json_.sessionId; }, set sessionId(sessionId) { this.json_.sessionId = sessionId; }, set status(newStatus) { this.json_.status = newStatus; }, get status() { return this.json_.status; }, set value(val) { this.json_.value = val; }, get value() { return this.json_.value; } }; /** * Handles executing a command from the {@code CommandProcessor} once the window * has fully loaded. * @param {FirefoxDriver} driver The FirefoxDriver instance to execute the * command with. * @param {Object} command JSON object describing the command to execute. * @param {fxdriver.CommandResponse} response The response object to send the * command response in. * @param {Number} opt_sleepDelay The amount of time to wait before attempting * the command again if the window is not ready. * @constructor */ var DelayedCommand = function(driver, command, response, opt_sleepDelay) { this.driver_ = driver; this.command_ = command; this.response_ = response; this.onBlank_ = false; this.sleepDelay_ = opt_sleepDelay || DelayedCommand.DEFAULT_SLEEP_DELAY; var activeWindow = response.session.getWindow(); try { if (!activeWindow || activeWindow.closed) { this.loadGroup_ = { isPending: function() { return false; } }; } else { var webNav = activeWindow. QueryInterface(Components.interfaces.nsIInterfaceRequestor). getInterface(Components.interfaces.nsIWebNavigation); this.loadGroup_ = webNav. QueryInterface(Components.interfaces.nsIInterfaceRequestor). getInterface(Components.interfaces.nsILoadGroup); } } catch (ex) { // Well this sucks. This can happen if the DOM gets trashed or if the window // is unexpectedly closed. We need to report this error to the user so they // can let us (webdriver-eng) know that the FirefoxDriver is busted. response.sendError(ex); // Re-throw the error so the command will be aborted. throw ex; } }; /** * Default amount of time, in milliseconds, to wait before (re)attempting a * {@code DelayedCommand}. * @type {Number} */ DelayedCommand.DEFAULT_SLEEP_DELAY = 100; /** * @private {goog.log.Logger} * @const */ DelayedCommand.LOG_ = fxdriver.logging.getLogger('fxdriver.DelayedCommand'); /** * Executes the command after the specified delay. * @param {Number} ms The delay in milliseconds. */ DelayedCommand.prototype.execute = function(ms) { if (this.response_.session.getWaitForPageLoad() && !this.yieldedForBackgroundExecution_) { this.yieldedForBackgroundExecution_ = true; fxdriver.profiler.log( {'event': 'YIELD_TO_PAGE_LOAD', 'startorend': 'start'}); } var self = this; this.driver_.window.setTimeout(function() { self.executeInternal_(); }, ms); }; /** * @return {boolean} Whether this instance should delay execution of its * command for a pending request in the current window's nsILoadGroup. */ DelayedCommand.prototype.shouldDelayExecutionForPendingRequest_ = function() { if (!this.response_.session.getWaitForPageLoad()) { return false; } if (this.loadGroup_.isPending()) { var hasOnLoadBlocker = false; var numPending = 0; var requests = this.loadGroup_.requests; while (requests.hasMoreElements()) { var request = null; var rawRequest = requests.getNext(); try { request = rawRequest.QueryInterface(Components.interfaces.nsIRequest); } catch (e) { // This may happen for pages that use WebSockets. // See https://bugzilla.mozilla.org/show_bug.cgi?id=765618 goog.log.info(DelayedCommand.LOG_, 'Ignoring non-nsIRequest: ' + rawRequest); continue; } var isPending = false; try { isPending = request.isPending(); } catch (e) { // Normal during page load, which means we should just return "true" return true; } if (isPending) { numPending += 1; hasOnLoadBlocker = hasOnLoadBlocker || (request.name == 'about:document-onload-blocker'); if (numPending > 1) { // More than one pending request, need to wait. return true; } } } if (numPending && !hasOnLoadBlocker) { goog.log.info(DelayedCommand.LOG_, 'Ignoring pending about:document-onload-blocker ' + 'request'); // If we only have one pending request and it is not a // document-onload-blocker, we need to wait. We do not wait for // document-onload-blocker requests since these are created when // one of document.[open|write|writeln] is called. If document.close is // never called, the document-onload-blocker request will not be // completed. return true; } } fxdriver.profiler.log( {'event': 'YIELD_TO_PAGE_LOAD', 'startorend': 'end'}); return false; }; DelayedCommand.prototype.checkPreconditions_ = function(preconditions, respond, parameters) { if (!preconditions) { return; } var toThrow = null; var length = preconditions.length; for (var i = 0; i < length; i++) { toThrow = preconditions[i](respond.session.getDocument(), parameters); if (toThrow) { throw toThrow; } } }; /** * Attempts to execute the command. If the window is not ready for the command * to execute, will set a timeout to try again. * @private */ DelayedCommand.prototype.executeInternal_ = function() { if (this.shouldDelayExecutionForPendingRequest_()) { return this.execute(this.sleepDelay_); } // Ugh! New windows open on "about:blank" before going to their // destination URL. This check attempts to tell the difference between a // newly opened window and someone actually wanting to do something on // about:blank. if (this.driver_.window.location == 'about:blank' && !this.onBlank_) { this.onBlank_ = true; return this.execute(this.sleepDelay_); } else { try { this.response_.name = this.command_.name; // TODO(simon): This is rampantly ugly, but allows an alert to kill the command // TODO(simon): This is never cleared, but _should_ be okay, because send wipes itself this.driver_.response_ = this.response_; var response = this.response_; DelayedCommand.execTimer = new fxdriver.Timer(); var startTime = new Date().getTime(); var endTime = startTime + this.response_.session.getImplicitWait(); var name = this.command_.name; var driverFunction = this.driver_[name] || WebElement[name]; var parameters = this.command_.parameters; var func = goog.bind(driverFunction, this.driver_, this.response_, parameters); var guards = goog.bind(this.checkPreconditions_, this, driverFunction.preconditions, this.response_, parameters); var toExecute = function() { try { guards(); func(); } catch (e) { if (new Date().getTime() < endTime) { DelayedCommand.execTimer.setTimeout(toExecute, 100); } else { if (!e.isWebDriverError) { goog.log.error( DelayedCommand.LOG_, 'Exception caught by driver: ' + name + '(' + parameters + ')', e); } response.sendError(e); } } }; toExecute(); } catch (e) { if (!e.isWebDriverError) { goog.log.error(DelayedCommand.LOG_, 'Exception caught by driver: ' + this.command_.name + '(' + this.command_.parameters + ')', e); } this.response_.sendError(e); } } }; /** * Class for dispatching WebDriver requests. Handles window locating commands * (e.g. switching, searching, etc.), all other commands are executed with the * {@code FirefoxDriver} through reflection. Note this is a singleton class. * @constructor */ var nsCommandProcessor = function() { this.wrappedJSObject = this; this.wm = Components.classes['@mozilla.org/appshell/window-mediator;1']. getService(Components.interfaces.nsIWindowMediator); }; /** * @private {goog.log.Logger} * @const */ nsCommandProcessor.LOG_ = fxdriver.logging.getLogger( 'fxdriver.nsCommandProcessor'); /** * Flags for the {@code nsIClassInfo} interface. * @type {Number} */ nsCommandProcessor.prototype.flags = Components.interfaces.nsIClassInfo.DOM_OBJECT; /** * Implementaiton language detail for the {@code nsIClassInfo} interface. * @type {String} */ nsCommandProcessor.prototype.implementationLanguage = Components.interfaces.nsIProgrammingLanguage.JAVASCRIPT; /** * Processes a command request for the {@code FirefoxDriver}. * @param {string} jsonCommandString The command to execute, specified in a * JSON string. * @param {nsIResponseHandler} responseHandler The callback to send the response * to. */ nsCommandProcessor.prototype.execute = function(jsonCommandString, responseHandler) { var command, response; try { command = JSON.parse(jsonCommandString); } catch (ex) { response = JSON.stringify({ 'status': bot.ErrorCode.UNKNOWN_ERROR, 'value': 'Error parsing command: "' + jsonCommandString + '"' }); responseHandler.handleResponse(response); return; } response = new fxdriver.CommandResponse(command, responseHandler); // These commands do not require a session. if (command.name == 'newSession' || command.name == 'quit' || command.name == 'getStatus' || command.name == 'getWindowHandles') { goog.log.info(nsCommandProcessor.LOG_, 'Received command: ' + command.name); try { this[command.name](response, command.parameters); } catch (ex) { response.sendError(ex); } return; } var sessionId = command.sessionId; if (!sessionId) { response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR, 'No session ID specified')); return; } try { response.session = Components. classes['@googlecode.com/webdriver/wdsessionstoreservice;1']. getService(Components.interfaces.nsISupports). wrappedJSObject. getSession(sessionId). wrappedJSObject; } catch (ex) { response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR, 'Session not found: ' + sessionId)); return; } goog.log.info(nsCommandProcessor.LOG_, 'Received command: ' + command.name); if (command.name == 'getSessionCapabilities' || command.name == 'switchToWindow' || command.name == 'getLog' || command.name == 'getAvailableLogTypes') { return this[command.name](response, command.parameters); } var sessionWindow = response.session.getChromeWindow(); var driver = sessionWindow.fxdriver; // TODO(jmleyba): We only need to store an ID on the window! if (!driver) { response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR, 'Session [' + response.session.getId() + '] has no driver.' + ' The browser window may have been closed.')); return; } try { var contentWindow = sessionWindow.getBrowser().contentWindow; if (!contentWindow) { response.sendError(new WebDriverError(bot.ErrorCode.NO_SUCH_WINDOW, 'Window not found. The browser window may have been closed.')); return; } } catch (ff45) { response.sendError(new WebDriverError(bot.ErrorCode.NO_SUCH_WINDOW, 'Window not found. The browser window may have been closed.')); return; } if (driver.modalOpen) { if (command.name != 'getAlertText' && command.name != 'setAlertValue' && command.name != 'acceptAlert' && command.name != 'dismissAlert') { var modalText = driver.modalOpen; var unexpectedAlertBehaviour = fxdriver.modals.getUnexpectedAlertBehaviour(); switch (unexpectedAlertBehaviour) { case 'accept': fxdriver.modals.closeUnhandledAlert(response, driver, true); break; case 'ignore': // do nothing, ignore the alert response.sendError(new WebDriverError(bot.ErrorCode.UNEXPECTED_ALERT_OPEN, 'Modal dialog present', {alert: {text: modalText}})); break; // Dismiss is the default case 'dismiss': default: fxdriver.modals.closeUnhandledAlert(response, driver, false); break; } return; } } if (typeof driver[command.name] != 'function' && typeof WebElement[command.name] != 'function') { response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_COMMAND, 'Unrecognised command: ' + command.name)); goog.log.error(nsCommandProcessor.LOG_, 'Unknown command: ' + command.name); return; } if(command.name == 'get' || command.name == 'refresh') { response.session.setWaitForPageLoad(false); } // TODO: should we delay commands if the page is reloaded on itself? // var pageLoadTimeout = response.session.getPageLoadTimeout(); // var shouldWaitForPageLoad = response.session.getWaitForPageLoad(); // if (pageLoadTimeout != 0 && shouldWaitForPageLoad) { // driver.window.setTimeout(function () { // response.session.setWaitForPageLoad(false); // }, pageLoadTimeout); // } response.startCommand(sessionWindow); new DelayedCommand(driver, command, response).execute(0); }; /** * Changes the context of the caller to the specified window. * @param {fxdriver.CommandResponse} response The response object to send the * command response in. * @param {{name: string}} parameters The command parameters. * @param {number} opt_searchAttempt Which attempt this is at finding the * window to switch to. */ nsCommandProcessor.prototype.switchToWindow = function(response, parameters, opt_searchAttempt) { var lookFor = parameters.name; var matches = function(win, lookFor) { return !win.closed && (win.top && win.top.fxdriver) && (win.content && win.content.name == lookFor) || (win.top && win.top.fxdriver && win.top.fxdriver.id == lookFor); }; var windowFound = this.searchWindows_('navigator:browser', function(win) { if (matches(win, lookFor)) { win.focus(); if (win.top.fxdriver) { response.session.setChromeWindow(win.top); response.value = response.session.getId(); response.send(); } else { response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR, 'No driver found attached to top window!')); } // Found the desired window, stop the search. return true; } }); // It is possible that the window won't be found on the first attempt. This is // typically true for anchors with a target attribute set. This search could // execute before the target window has finished loaded, meaning the content // window won't have a name or FirefoxDriver instance yet (see matches above). // If we don't find the window, set a timeout and try again. if (!windowFound) { // TODO(jmleyba): We should be sniffing the current windows to detect if // one is still loading vs. a brute force "try again" var searchAttempt = opt_searchAttempt || 0; if (searchAttempt > 3) { response.sendError(new WebDriverError(bot.ErrorCode.NO_SUCH_WINDOW, 'Unable to locate window "' + lookFor + '"')); } else { var self = this; this.wm.getMostRecentWindow('navigator:browser'). setTimeout(function() { self.switchToWindow(response, parameters, (searchAttempt + 1)); }, 500); } } }; /** * Retrieves a list of all known FirefoxDriver windows. * @param {fxdriver.CommandResponse} response The response object to send the * command response in. */ nsCommandProcessor.prototype.getWindowHandles = function(response) { var res = []; this.searchWindows_('navigator:browser', function(win) { if (win.top && win.top.fxdriver) { res.push(win.top.fxdriver.id); } }); response.value = res; response.send(); }; /** * Retrieves the log for the given type. * * @param {!fxdriver.CommandResponse} response The response object to send the * response in. * @param {!Object.<string, *>} parameters The parameters for the call. */ nsCommandProcessor.prototype.getLog = function(response, parameters) { var res = fxdriver.logging.getLog(parameters.type); // Convert log level object to string goog.array.forEach(res, function(entry) { entry.level = entry.level.name; }); response.value = res; response.send(); }; /** * Retrieves available log types. * * @param {!fxdriver.CommandResponse} response The response object to send the * response in. * @param {Object.<string, *>} parameters The parameters for the call. */ nsCommandProcessor.prototype.getAvailableLogTypes = function(response, parameters) { response.value = fxdriver.logging.getAvailableLogTypes(); response.send(); }; /** * Searches over a selection of windows, calling a visitor function on each * window found in the search. * @param {?string} search_criteria The category of windows to search or * {@code null} to search all windows. * @param {function(!Window)} visitor_fn A visitor function to call with each * window. The function may return true to indicate that the window search * should abort early. * @return {boolean} Whether the visitor function short circuited the search. */ nsCommandProcessor.prototype.searchWindows_ = function(search_criteria, visitor_fn) { var allWindows = this.wm.getEnumerator(search_criteria); while (allWindows.hasMoreElements()) { var win = allWindows.getNext(); if (visitor_fn(win)) { return true; } } return false; }; /** * Responds with general status information about this process. * @param {fxdriver.CommandResponse} response The object to send the command * response in. */ nsCommandProcessor.prototype.getStatus = function(response) { var xulRuntime = Components.classes['@mozilla.org/xre/app-info;1']. getService(Components.interfaces.nsIXULRuntime); response.value = { 'os': { 'arch': (function() { try { // See https://developer.mozilla.org/en/XPCOM_ABI return (xulRuntime.XPCOMABI || 'unknown').split('-')[0]; } catch (ignored) { return 'unknown'; } })(), // See https://developer.mozilla.org/en/OS_TARGET 'name': xulRuntime.OS, 'version': 'unknown' }, // TODO: load these values from build.properties 'build': { 'revision': 'unknown', 'time': 'unknown', 'version': 'unknown' } }; response.send(); }; /** * Locates the most recently used FirefoxDriver window. * @param {fxdriver.CommandResponse} response The object to send the command * response in. */ nsCommandProcessor.prototype.newSession = function(response, parameters) { var win = this.wm.getMostRecentWindow('navigator:browser'); var driver = win.fxdriver; if (!driver) { response.sendError(new WebDriverError(bot.ErrorCode.UNKNOWN_ERROR, 'No drivers associated with the window')); } else { var sessionStore = Components. classes['@googlecode.com/webdriver/wdsessionstoreservice;1']. getService(Components.interfaces.nsISupports); var desiredCapabilities = parameters['desiredCapabilities']; var requiredCapabilities = parameters['requiredCapabilities']; var session = sessionStore.wrappedJSObject.createSession(response, desiredCapabilities, requiredCapabilities, driver); session = session.wrappedJSObject; // XPConnect... session.setChromeWindow(win); if ('elementScrollBehavior' in desiredCapabilities) { session.elementScrollBehavior = desiredCapabilities['elementScrollBehavior']; } response.session = session; response.sessionId = session.getId(); goog.log.info(nsCommandProcessor.LOG_, 'Created a new session with id: ' + session.getId()); this.getSessionCapabilities(response); } response.send(); }; /** * Describes a session. * @param {fxdriver.CommandResponse} response The object to send the command * response in. */ nsCommandProcessor.prototype.getSessionCapabilities = function(response) { var appInfo = Components.classes['@mozilla.org/xre/app-info;1']. getService(Components.interfaces.nsIXULAppInfo); var xulRuntime = Components.classes['@mozilla.org/xre/app-info;1']. getService(Components.interfaces.nsIXULRuntime); response.value = { 'cssSelectorsEnabled': true, 'browserName': 'firefox', 'handlesAlerts': true, 'javascriptEnabled': true, 'nativeEvents': false, // See https://developer.mozilla.org/en/OS_TARGET 'platform': (xulRuntime.OS == 'WINNT' ? 'WINDOWS' : xulRuntime.OS), 'rotatable': false, 'takesScreenshot': true, 'version': appInfo.version }; var prefStore = fxdriver.moz.getService('@mozilla.org/preferences-service;1', 'nsIPrefService'); for (var cap in wdSessionStoreService.CAPABILITY_PREFERENCE_MAPPING) { var pref = wdSessionStoreService.CAPABILITY_PREFERENCE_MAPPING[cap]; try { response.value[cap] = prefStore.getBoolPref(pref); } catch (e) { try { response.value[cap] = prefStore.getIntPref(pref); } catch (e) { try { response.value[cap] = prefStore.getCharPref(pref); } catch (e) { } } } } response.send(); }; /** * Forcefully shuts down the Firefox application. * @param {fxdriver.CommandResponse} response The object to send the command * response in. */ nsCommandProcessor.prototype.quit = function(response) { // Go ahead and respond to the command request to acknowledge that we are // shutting down. We do this because once we force a quit, there's no way // to respond. Clients will just have to trust that this shutdown didn't // fail. Or they could monitor the PID. Either way, not much we can do about // it in here. response.send(); wdSession.quitBrowser(500); }; nsCommandProcessor.prototype.getInterfaces = function(count) { var ifaces = [ Components.interfaces.nsICommandProcessor, Components.interfaces.nsISupports ]; count.value = ifaces.length; return ifaces; }; nsCommandProcessor.prototype.QueryInterface = function(aIID) { if (!aIID.equals(Components.interfaces.nsICommandProcessor) && !aIID.equals(Components.interfaces.nsISupports)) { throw Components.results.NS_ERROR_NO_INTERFACE; } return this; }; nsCommandProcessor.CLASS_ID = Components.ID('{692e5117-a4a2-4b00-99f7-0685285b4db5}'); nsCommandProcessor.CLASS_NAME = 'Firefox WebDriver CommandProcessor'; nsCommandProcessor.CONTRACT_ID = '@googlecode.com/webdriver/command-processor;1'; /** * Factory object for obtaining a reference to the singleton instance of * {@code CommandProcessor}. */ nsCommandProcessor.Factory = { instance_: null, createInstance: function(aOuter, aIID) { if (aOuter != null) { throw Components.results.NS_ERROR_NO_AGGREGATION; } if (!this.instance_) { this.instance_ = new nsCommandProcessor(); } return this.instance_.QueryInterface(aIID); } }; /** * Module definition for registering this XPCOM component. */ nsCommandProcessor.Module = { firstTime_: true, registerSelf: function(aCompMgr, aFileSpec, aLocation, aType) { if (this.firstTime_) { this.firstTime_ = false; throw Components.results.NS_ERROR_FACTORY_REGISTER_AGAIN; } aCompMgr.QueryInterface(Components.interfaces.nsIComponentRegistrar). registerFactoryLocation( nsCommandProcessor.CLASS_ID, nsCommandProcessor.CLASS_NAME, nsCommandProcessor.CONTRACT_ID, aFileSpec, aLocation, aType); }, unregisterSelf: function(aCompMgr, aLocation) { aCompMgr.QueryInterface(Components.interfaces.nsIComponentRegistrar). unregisterFactoryLocation(nsCommandProcessor.CLASS_ID, aLocation); }, getClassObject: function(aCompMgr, aCID, aIID) { if (!aIID.equals(Components.interfaces.nsIFactory)) { throw Components.results.NS_ERROR_NOT_IMPLEMENTED; } else if (!aCID.equals(nsCommandProcessor.CLASS_ID)) { throw Components.results.NS_ERROR_NO_INTERFACE; } return nsCommandProcessor.Factory; }, canUnload: function() { return true; } }; /** * Module initialization. */ NSGetModule = function() { return nsCommandProcessor.Module; }; nsCommandProcessor.prototype.classID = nsCommandProcessor.CLASS_ID; fxdriver.moz.load('resource://gre/modules/XPCOMUtils.jsm'); if (XPCOMUtils.generateNSGetFactory) { /** @const */ NSGetFactory = XPCOMUtils.generateNSGetFactory([nsCommandProcessor]); }
1
13,586
/javascript/firefox-driver is the Selenium implementation of a WebDriver for Firefox. Since it generally isn't W3C compatible, it shouldn't change. We can just drop this change.
SeleniumHQ-selenium
rb
@@ -361,6 +361,11 @@ class WebView(QWebView): message.info(self.win_id, "Zoom level: {}%".format(perc)) self._default_zoom_changed = True + def setZoomFactor(self, fact): + self._zoom.fuzzyval = int(fact * 100) + super().setZoomFactor(fact) + self._default_zoom_changed = True + def zoom(self, offset): """Increase/Decrease the zoom level.
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The main browser widgets.""" import sys import itertools import functools from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QTimer, QUrl from PyQt5.QtWidgets import QApplication, QStyleFactory from PyQt5.QtWebKit import QWebSettings from PyQt5.QtWebKitWidgets import QWebView, QWebPage from qutebrowser.config import config from qutebrowser.keyinput import modeman from qutebrowser.utils import message, log, usertypes, utils, qtutils, objreg from qutebrowser.browser import webpage, hints, webelem from qutebrowser.commands import cmdexc LoadStatus = usertypes.enum('LoadStatus', ['none', 'success', 'error', 'warn', 'loading']) tab_id_gen = itertools.count(0) class WebView(QWebView): """One browser tab in TabbedBrowser. Our own subclass of a QWebView with some added bells and whistles. Attributes: hintmanager: The HintManager instance for this view. progress: loading progress of this page. scroll_pos: The current scroll position as (x%, y%) tuple. statusbar_message: The current javascript statusbar message. inspector: The QWebInspector used for this webview. load_status: loading status of this page (index into LoadStatus) viewing_source: Whether the webview is currently displaying source code. keep_icon: Whether the (e.g. cloned) icon should not be cleared on page load. registry: The ObjectRegistry associated with this tab. tab_id: The tab ID of the view. win_id: The window ID of the view. search_text: The text of the last search. search_flags: The search flags of the last search. _cur_url: The current URL (accessed via cur_url property). _has_ssl_errors: Whether SSL errors occurred during loading. _zoom: A NeighborList with the zoom levels. _old_scroll_pos: The old scroll position. _check_insertmode: If True, in mouseReleaseEvent we should check if we need to enter/leave insert mode. _default_zoom_changed: Whether the zoom was changed from the default. Signals: scroll_pos_changed: Scroll percentage of current tab changed. arg 1: x-position in %. arg 2: y-position in %. linkHovered: QWebPages linkHovered signal exposed. load_status_changed: The loading status changed url_text_changed: Current URL string changed. shutting_down: Emitted when the view is shutting down. """ scroll_pos_changed = pyqtSignal(int, int) linkHovered = pyqtSignal(str, str, str) load_status_changed = pyqtSignal(str) url_text_changed = pyqtSignal(str) shutting_down = pyqtSignal() def __init__(self, win_id, parent=None): super().__init__(parent) if sys.platform == 'darwin' and qtutils.version_check('5.4'): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-42948 # See https://github.com/The-Compiler/qutebrowser/issues/462 self.setStyle(QStyleFactory.create('Fusion')) self.win_id = win_id self.load_status = LoadStatus.none self._check_insertmode = False self.inspector = None self.scroll_pos = (-1, -1) self.statusbar_message = '' self._old_scroll_pos = (-1, -1) self._zoom = None self._has_ssl_errors = False self.keep_icon = False self.search_text = None self.search_flags = 0 self.selection_enabled = False self.init_neighborlist() cfg = objreg.get('config') cfg.changed.connect(self.init_neighborlist) # For some reason, this signal doesn't get disconnected automatically # when the WebView is destroyed on older PyQt versions. # See https://github.com/The-Compiler/qutebrowser/issues/390 self.destroyed.connect(functools.partial( cfg.changed.disconnect, self.init_neighborlist)) self._cur_url = None self.cur_url = QUrl() self.progress = 0 self.registry = objreg.ObjectRegistry() self.tab_id = next(tab_id_gen) tab_registry = objreg.get('tab-registry', scope='window', window=win_id) tab_registry[self.tab_id] = self objreg.register('webview', self, registry=self.registry) page = self._init_page() hintmanager = hints.HintManager(win_id, self.tab_id, self) hintmanager.mouse_event.connect(self.on_mouse_event) hintmanager.start_hinting.connect(page.on_start_hinting) hintmanager.stop_hinting.connect(page.on_stop_hinting) objreg.register('hintmanager', hintmanager, registry=self.registry) mode_manager = objreg.get('mode-manager', scope='window', window=win_id) mode_manager.entered.connect(self.on_mode_entered) mode_manager.left.connect(self.on_mode_left) self.viewing_source = False self.setZoomFactor(float(config.get('ui', 'default-zoom')) / 100) self._default_zoom_changed = False if config.get('input', 'rocker-gestures'): self.setContextMenuPolicy(Qt.PreventContextMenu) self.urlChanged.connect(self.on_url_changed) self.loadProgress.connect(lambda p: setattr(self, 'progress', p)) objreg.get('config').changed.connect(self.on_config_changed) def _init_page(self): """Initialize the QWebPage used by this view.""" page = webpage.BrowserPage(self.win_id, self.tab_id, self) self.setPage(page) page.linkHovered.connect(self.linkHovered) page.mainFrame().loadStarted.connect(self.on_load_started) page.mainFrame().loadFinished.connect(self.on_load_finished) page.statusBarMessage.connect( lambda msg: setattr(self, 'statusbar_message', msg)) page.networkAccessManager().sslErrors.connect( lambda *args: setattr(self, '_has_ssl_errors', True)) return page def __repr__(self): url = utils.elide(self.url().toDisplayString(), 50) return utils.get_repr(self, tab_id=self.tab_id, url=url) def __del__(self): # Explicitely releasing the page here seems to prevent some segfaults # when quitting. # Copied from: # https://code.google.com/p/webscraping/source/browse/webkit.py#325 try: self.setPage(None) except RuntimeError: # It seems sometimes Qt has already deleted the QWebView and we # get: RuntimeError: wrapped C/C++ object of type WebView has been # deleted pass def _set_load_status(self, val): """Setter for load_status.""" if not isinstance(val, LoadStatus): raise TypeError("Type {} is no LoadStatus member!".format(val)) log.webview.debug("load status for {}: {}".format(repr(self), val)) self.load_status = val self.load_status_changed.emit(val.name) @pyqtSlot(str, str) def on_config_changed(self, section, option): """Reinitialize the zoom neighborlist if related config changed.""" if section == 'ui' and option in ('zoom-levels', 'default-zoom'): if not self._default_zoom_changed: self.setZoomFactor(float(config.get('ui', 'default-zoom')) / 100) self._default_zoom_changed = False self.init_neighborlist() elif section == 'input' and option == 'rocker-gestures': if config.get('input', 'rocker-gestures'): self.setContextMenuPolicy(Qt.PreventContextMenu) else: self.setContextMenuPolicy(Qt.DefaultContextMenu) def init_neighborlist(self): """Initialize the _zoom neighborlist.""" levels = config.get('ui', 'zoom-levels') self._zoom = usertypes.NeighborList( levels, mode=usertypes.NeighborList.Modes.block) self._zoom.fuzzyval = config.get('ui', 'default-zoom') def _mousepress_backforward(self, e): """Handle back/forward mouse button presses. Args: e: The QMouseEvent. """ if e.button() in (Qt.XButton1, Qt.LeftButton): # Back button on mice which have it, or rocker gesture if self.page().history().canGoBack(): self.back() else: message.error(self.win_id, "At beginning of history.", immediately=True) elif e.button() in (Qt.XButton2, Qt.RightButton): # Forward button on mice which have it, or rocker gesture if self.page().history().canGoForward(): self.forward() else: message.error(self.win_id, "At end of history.", immediately=True) def _mousepress_insertmode(self, e): """Switch to insert mode when an editable element was clicked. Args: e: The QMouseEvent. """ pos = e.pos() frame = self.page().frameAt(pos) if frame is None: # This happens when we click inside the webview, but not actually # on the QWebPage - for example when clicking the scrollbar # sometimes. log.mouse.debug("Clicked at {} but frame is None!".format(pos)) return # You'd think we have to subtract frame.geometry().topLeft() from the # position, but it seems QWebFrame::hitTestContent wants a position # relative to the QWebView, not to the frame. This makes no sense to # me, but it works this way. hitresult = frame.hitTestContent(pos) if hitresult.isNull(): # For some reason, the whole hit result can be null sometimes (e.g. # on doodle menu links). If this is the case, we schedule a check # later (in mouseReleaseEvent) which uses webelem.focus_elem. log.mouse.debug("Hitresult is null!") self._check_insertmode = True return try: elem = webelem.WebElementWrapper(hitresult.element()) except webelem.IsNullError: # For some reason, the hit result element can be a null element # sometimes (e.g. when clicking the timetable fields on # http://www.sbb.ch/ ). If this is the case, we schedule a check # later (in mouseReleaseEvent) which uses webelem.focus_elem. log.mouse.debug("Hitresult element is null!") self._check_insertmode = True return if ((hitresult.isContentEditable() and elem.is_writable()) or elem.is_editable()): log.mouse.debug("Clicked editable element!") modeman.enter(self.win_id, usertypes.KeyMode.insert, 'click', only_if_normal=True) else: log.mouse.debug("Clicked non-editable element!") if config.get('input', 'auto-leave-insert-mode'): modeman.maybe_leave(self.win_id, usertypes.KeyMode.insert, 'click') def mouserelease_insertmode(self): """If we have an insertmode check scheduled, handle it.""" if not self._check_insertmode: return self._check_insertmode = False try: elem = webelem.focus_elem(self.page().currentFrame()) except (webelem.IsNullError, RuntimeError): log.mouse.warning("Element/page vanished!") return if elem.is_editable(): log.mouse.debug("Clicked editable element (delayed)!") modeman.enter(self.win_id, usertypes.KeyMode.insert, 'click-delayed', only_if_normal=True) else: log.mouse.debug("Clicked non-editable element (delayed)!") if config.get('input', 'auto-leave-insert-mode'): modeman.maybe_leave(self.win_id, usertypes.KeyMode.insert, 'click-delayed') def _mousepress_opentarget(self, e): """Set the open target when something was clicked. Args: e: The QMouseEvent. """ if e.button() == Qt.MidButton or e.modifiers() & Qt.ControlModifier: background_tabs = config.get('tabs', 'background-tabs') if e.modifiers() & Qt.ShiftModifier: background_tabs = not background_tabs if background_tabs: target = usertypes.ClickTarget.tab_bg else: target = usertypes.ClickTarget.tab self.page().open_target = target log.mouse.debug("Middle click, setting target: {}".format(target)) else: self.page().open_target = usertypes.ClickTarget.normal log.mouse.debug("Normal click, setting normal target") def shutdown(self): """Shut down the webview.""" self.shutting_down.emit() # We disable javascript because that prevents some segfaults when # quitting it seems. log.destroy.debug("Shutting down {!r}.".format(self)) settings = self.settings() settings.setAttribute(QWebSettings.JavascriptEnabled, False) self.stop() self.page().shutdown() def openurl(self, url): """Open a URL in the browser. Args: url: The URL to load as QUrl """ qtutils.ensure_valid(url) urlstr = url.toDisplayString() log.webview.debug("New title: {}".format(urlstr)) self.titleChanged.emit(urlstr) self.cur_url = url self.url_text_changed.emit(url.toDisplayString()) self.load(url) if url.scheme() == 'qute': frame = self.page().mainFrame() frame.javaScriptWindowObjectCleared.connect(self.add_js_bridge) def add_js_bridge(self): """Add the javascript bridge for qute:... pages.""" frame = self.sender() if frame.url().scheme() == 'qute': bridge = objreg.get('js-bridge') frame.addToJavaScriptWindowObject('qute', bridge) def zoom_perc(self, perc, fuzzyval=True): """Zoom to a given zoom percentage. Args: perc: The zoom percentage as int. fuzzyval: Whether to set the NeighborLists fuzzyval. """ if fuzzyval: self._zoom.fuzzyval = int(perc) if perc < 0: raise cmdexc.CommandError("Can't zoom {}%!".format(perc)) self.setZoomFactor(float(perc) / 100) message.info(self.win_id, "Zoom level: {}%".format(perc)) self._default_zoom_changed = True def zoom(self, offset): """Increase/Decrease the zoom level. Args: offset: The offset in the zoom level list. """ level = self._zoom.getitem(offset) self.zoom_perc(level, fuzzyval=False) @pyqtSlot('QUrl') def on_url_changed(self, url): """Update cur_url when URL has changed. If the URL is invalid, we just ignore it here. """ if url.isValid(): self.cur_url = url self.url_text_changed.emit(url.toDisplayString()) if not self.title(): self.titleChanged.emit(self.url().toDisplayString()) @pyqtSlot('QMouseEvent') def on_mouse_event(self, evt): """Post a new mouse event from a hintmanager.""" log.modes.debug("Hint triggered, focusing {!r}".format(self)) self.setFocus() QApplication.postEvent(self, evt) @pyqtSlot() def on_load_started(self): """Leave insert/hint mode and set vars when a new page is loading.""" self.progress = 0 self.viewing_source = False self._has_ssl_errors = False self._set_load_status(LoadStatus.loading) @pyqtSlot() def on_load_finished(self): """Handle a finished page load. We don't take loadFinished's ok argument here as it always seems to be true when the QWebPage has an ErrorPageExtension implemented. See https://github.com/The-Compiler/qutebrowser/issues/84 """ ok = not self.page().error_occurred if ok and not self._has_ssl_errors: self._set_load_status(LoadStatus.success) elif ok: self._set_load_status(LoadStatus.warn) else: self._set_load_status(LoadStatus.error) if not self.title(): self.titleChanged.emit(self.url().toDisplayString()) self._handle_auto_insert_mode(ok) def _handle_auto_insert_mode(self, ok): """Handle auto-insert-mode after loading finished.""" if not config.get('input', 'auto-insert-mode'): return mode_manager = objreg.get('mode-manager', scope='window', window=self.win_id) cur_mode = mode_manager.mode if cur_mode == usertypes.KeyMode.insert or not ok: return frame = self.page().currentFrame() try: elem = webelem.WebElementWrapper(frame.findFirstElement(':focus')) except webelem.IsNullError: log.webview.debug("Focused element is null!") return log.modes.debug("focus element: {}".format(repr(elem))) if elem.is_editable(): modeman.enter(self.win_id, usertypes.KeyMode.insert, 'load finished', only_if_normal=True) @pyqtSlot(usertypes.KeyMode) def on_mode_entered(self, mode): """Ignore attempts to focus the widget if in any status-input mode.""" if mode in (usertypes.KeyMode.command, usertypes.KeyMode.prompt, usertypes.KeyMode.yesno): log.webview.debug("Ignoring focus because mode {} was " "entered.".format(mode)) self.setFocusPolicy(Qt.NoFocus) elif mode == usertypes.KeyMode.caret: settings = self.settings() settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True) self.selection_enabled = False if self.isVisible(): # Sometimes the caret isn't immediately visible, but unfocusing # and refocusing it fixes that. self.clearFocus() self.setFocus(Qt.OtherFocusReason) self.page().currentFrame().evaluateJavaScript( utils.read_file('javascript/position_caret.js')) @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Restore focus policy if status-input modes were left.""" if mode in (usertypes.KeyMode.command, usertypes.KeyMode.prompt, usertypes.KeyMode.yesno): log.webview.debug("Restoring focus policy because mode {} was " "left.".format(mode)) elif mode == usertypes.KeyMode.caret: settings = self.settings() if settings.testAttribute(QWebSettings.CaretBrowsingEnabled): if self.selection_enabled and self.hasSelection(): # Remove selection if it exists self.triggerPageAction(QWebPage.MoveToNextChar) settings.setAttribute(QWebSettings.CaretBrowsingEnabled, False) self.selection_enabled = False self.setFocusPolicy(Qt.WheelFocus) def search(self, text, flags): """Search for text in the current page. Args: text: The text to search for. flags: The QWebPage::FindFlags. """ log.webview.debug("Searching with text '{}' and flags " "0x{:04x}.".format(text, int(flags))) old_scroll_pos = self.scroll_pos flags = QWebPage.FindFlags(flags) found = self.findText(text, flags) backward = flags & QWebPage.FindBackward if not found and not flags & QWebPage.HighlightAllOccurrences and text: # User disabled wrapping; but findText() just returns False. If we # have a selection, we know there's a match *somewhere* on the page if (not flags & QWebPage.FindWrapsAroundDocument and self.hasSelection()): if not backward: message.warning(self.win_id, "Search hit BOTTOM without " "match for: {}".format(text), immediately=True) else: message.warning(self.win_id, "Search hit TOP without " "match for: {}".format(text), immediately=True) else: message.error(self.win_id, "Text '{}' not found on " "page!".format(text), immediately=True) else: def check_scroll_pos(): """Check if the scroll position got smaller and show info.""" if not backward and self.scroll_pos < old_scroll_pos: message.info(self.win_id, "Search hit BOTTOM, continuing " "at TOP", immediately=True) elif backward and self.scroll_pos > old_scroll_pos: message.info(self.win_id, "Search hit TOP, continuing at " "BOTTOM", immediately=True) # We first want QWebPage to refresh. QTimer.singleShot(0, check_scroll_pos) def createWindow(self, wintype): """Called by Qt when a page wants to create a new window. This function is called from the createWindow() method of the associated QWebPage, each time the page wants to create a new window of the given type. This might be the result, for example, of a JavaScript request to open a document in a new window. Args: wintype: This enum describes the types of window that can be created by the createWindow() function. QWebPage::WebBrowserWindow: The window is a regular web browser window. QWebPage::WebModalDialog: The window acts as modal dialog. Return: The new QWebView object. """ if wintype == QWebPage.WebModalDialog: log.webview.warning("WebModalDialog requested, but we don't " "support that!") tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self.win_id) return tabbed_browser.tabopen(background=False) def paintEvent(self, e): """Extend paintEvent to emit a signal if the scroll position changed. This is a bit of a hack: We listen to repaint requests here, in the hope a repaint will always be requested when scrolling, and if the scroll position actually changed, we emit a signal. Args: e: The QPaintEvent. Return: The superclass event return value. """ frame = self.page().mainFrame() new_pos = (frame.scrollBarValue(Qt.Horizontal), frame.scrollBarValue(Qt.Vertical)) if self._old_scroll_pos != new_pos: self._old_scroll_pos = new_pos m = (frame.scrollBarMaximum(Qt.Horizontal), frame.scrollBarMaximum(Qt.Vertical)) perc = (round(100 * new_pos[0] / m[0]) if m[0] != 0 else 0, round(100 * new_pos[1] / m[1]) if m[1] != 0 else 0) self.scroll_pos = perc self.scroll_pos_changed.emit(*perc) # Let superclass handle the event super().paintEvent(e) def mousePressEvent(self, e): """Extend QWidget::mousePressEvent(). This does the following things: - Check if a link was clicked with the middle button or Ctrl and set the page's open_target attribute accordingly. - Emit the editable_elem_selected signal if an editable element was clicked. Args: e: The arrived event. Return: The superclass return value. """ is_rocker_gesture = (config.get('input', 'rocker-gestures') and e.buttons() == Qt.LeftButton | Qt.RightButton) if e.button() in (Qt.XButton1, Qt.XButton2) or is_rocker_gesture: self._mousepress_backforward(e) super().mousePressEvent(e) return self._mousepress_insertmode(e) self._mousepress_opentarget(e) super().mousePressEvent(e) def mouseReleaseEvent(self, e): """Extend mouseReleaseEvent to enter insert mode if needed.""" super().mouseReleaseEvent(e) # We want to make sure we check the focus element after the WebView is # updated completely. QTimer.singleShot(0, self.mouserelease_insertmode) def contextMenuEvent(self, e): """Save a reference to the context menu so we can close it.""" menu = self.page().createStandardContextMenu() self.shutting_down.connect(menu.close) menu.exec_(e.globalPos()) def wheelEvent(self, e): """Zoom on Ctrl-Mousewheel. Args: e: The QWheelEvent. """ if e.modifiers() & Qt.ControlModifier: e.accept() divider = config.get('input', 'mouse-zoom-divider') factor = self.zoomFactor() + e.angleDelta().y() / divider if factor < 0: return perc = int(100 * factor) message.info(self.win_id, "Zoom level: {}%".format(perc)) self._zoom.fuzzyval = perc self.setZoomFactor(factor) self._default_zoom_changed = True else: super().wheelEvent(e)
1
13,096
There are various places where `setZoomFactor` is used but `fuzzyval` isn't set: - `__init__` and `on_config_changed` (should be okay as `init_neighborlist` gets called which essentially does the same) - `zoom` (calls `zoom_perc` with `fuzzyval=False`), which is used by `:zoom-in` and `:zoom-out`. Are you sure this won't break those calls? (Yes, I know there should be tests for this - I'm working on that.) Other existing places probably should get simplified.
qutebrowser-qutebrowser
py
@@ -889,12 +889,12 @@ namespace pwiz.Skyline.Model string pathForLibraryFiles, // In case we translate libraries etc ConvertToSmallMoleculesMode mode = ConvertToSmallMoleculesMode.formulas, ConvertToSmallMoleculesChargesMode invertChargesMode = ConvertToSmallMoleculesChargesMode.none, - bool ignoreDecoys=false) + bool ignoreDecoys=false, bool addAnnotations = true) { if (mode == ConvertToSmallMoleculesMode.none) return document; var newdoc = new SrmDocument(document.Settings); - var note = new Annotations(TestingConvertedFromProteomic, null, 1); // Mark this as a testing node so we don't sort it + var note = addAnnotations ? new Annotations(TestingConvertedFromProteomic, null, 1) : Annotations.EMPTY; // Optionally mark this as a testing node so we don't sort it var precursorMap = new Dictionary<LibKey, LibKey>(); // Map int,modSeq to adduct,molecule var invertCharges = invertChargesMode == ConvertToSmallMoleculesChargesMode.invert;
1
/* * Original author: Brendan MacLean <brendanx .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2009 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using pwiz.Common.DataBinding; using pwiz.Common.SystemUtil; using pwiz.Skyline.Controls.Graphs; using pwiz.Skyline.Model.AuditLog; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.DocSettings.Extensions; using pwiz.Skyline.Model.IonMobility; using pwiz.Skyline.Model.Lib; using pwiz.Skyline.Model.Lib.BlibData; using pwiz.Skyline.Model.Results; using pwiz.Skyline.Model.Results.Scoring; using pwiz.Skyline.Properties; using pwiz.Skyline.Util; namespace pwiz.Skyline.Model { [Flags] public enum PickLevel { peptides = 0x1, precursors = 0x2, transitions = 0x4, all = peptides | precursors | transitions } public static class DecoyGeneration { public static string ADD_RANDOM { get { return Resources.DecoyGeneration_ADD_RANDOM_Random_Mass_Shift; } } public static string SHUFFLE_SEQUENCE { get { return Resources.DecoyGeneration_SHUFFLE_SEQUENCE_Shuffle_Sequence; } } public static string REVERSE_SEQUENCE { get { return Resources.DecoyGeneration_REVERSE_SEQUENCE_Reverse_Sequence; } } public static IEnumerable<string> Methods { get { return new[] { SHUFFLE_SEQUENCE, REVERSE_SEQUENCE, ADD_RANDOM }; } } } public sealed class RefinementSettings : AuditLogOperationSettings<RefinementSettings>, IAuditLogComparable { private bool _removeDuplicatePeptides; public RefinementSettings() { NormalizationMethod = AreaCVNormalizationMethod.none; MSLevel = AreaCVMsLevel.products; } public override MessageInfo MessageInfo { get { return new MessageInfo(MessageType.refined_targets, SrmDocument.DOCUMENT_TYPE.none); } } public struct PeptideCharge { public PeptideCharge(Target sequence, Adduct charge) : this() { Sequence = sequence; Charge = charge; } public Target Sequence { get; private set; } public Adduct Charge { get; private set; } } public enum ProteinSpecType { name, accession, preferred } public object GetDefaultObject(ObjectInfo<object> info) { return new RefinementSettings(); } // Document [Track] public int? MinPeptidesPerProtein { get; set; } [Track] public bool RemoveRepeatedPeptides { get; set; } [Track] public bool RemoveDuplicatePeptides { get { return _removeDuplicatePeptides; } set { _removeDuplicatePeptides = value; // Removing duplicate peptides implies removing // repeated peptids. if (_removeDuplicatePeptides) RemoveRepeatedPeptides = true; } } [Track] public bool RemoveMissingLibrary { get; set; } [Track] public int? MinTransitionsPepPrecursor { get; set; } private class RefineLabelTypeLocalizer : CustomPropertyLocalizer { private static readonly string ADD_LABEL_TYPE = @"AddRefineLabelType"; private static readonly string REMOVE_LABEL_TYPE = @"RefineLabelType"; public RefineLabelTypeLocalizer() : base(PropertyPath.Parse(@"AddLabelType"), true) { } private string LocalizeInternal(object obj) { if (obj == null || obj.GetType() != typeof(bool)) return null; return (bool) obj ? ADD_LABEL_TYPE : REMOVE_LABEL_TYPE; } protected override string Localize(ObjectPair<object> objectPair) { return LocalizeInternal(objectPair.NewObject) ?? LocalizeInternal(objectPair.OldObject); } public override string[] PossibleResourceNames => new[] {ADD_LABEL_TYPE, REMOVE_LABEL_TYPE}; } [Track(customLocalizer:typeof(RefineLabelTypeLocalizer))] public IsotopeLabelType RefineLabelType { get; set; } public bool AddLabelType { get; set; } public PickLevel AutoPickChildrenAll { get; set; } [Track] public bool AutoPickPeptidesAll { get { return (AutoPickChildrenAll & PickLevel.peptides) != 0; } } [Track] public bool AutoPickPrecursorsAll { get { return (AutoPickChildrenAll & PickLevel.precursors) != 0; } } [Track] public bool AutoPickTransitionsAll { get { return (AutoPickChildrenAll & PickLevel.transitions) != 0; } } // Results [Track] public double? MinPeakFoundRatio { get; set; } [Track] public double? MaxPeakFoundRatio { get; set; } [Track] public double? MaxPepPeakRank { get; set; } [Track] public bool MaxPrecursorPeakOnly { get; set; } [Track] public double? MaxPeakRank { get; set; } public IEnumerable<LibraryKey> AcceptedPeptides { get; set; } public IEnumerable<string> AcceptedProteins { get; set; } public ProteinSpecType AcceptProteinType { get; set; } public bool AcceptModified { get; set; } // Some properties, including this one are not tracked, // since they are not used by the Edit > Refine > Advanced dialog. // These properties create their own log messages public int? MinPrecursorsPerPeptide { get; set; } [Track] public bool PreferLargeIons { get; set; } [Track] public bool RemoveMissingResults { get; set; } [Track] public double? RTRegressionThreshold { get; set; } public int? RTRegressionPrecision { get; set; } [Track] public double? DotProductThreshold { get; set; } [Track] public double? IdotProductThreshold { get; set; } [Track] ReplicateInclusion ReplInclusion { get { return UseBestResult ? ReplicateInclusion.best : ReplicateInclusion.all; } } public bool UseBestResult { get; set; } public bool AutoPickChildrenOff { get; set; } public int NumberOfDecoys { get; set; } public string DecoysMethod { get; set; } public enum ReplicateInclusion { all, best } // Consistency [Track] public double? CVCutoff { get; set; } [Track] public double? QValueCutoff { get; set; } [Track] public int? MinimumDetections { get; set; } [Track] public AreaCVNormalizationMethod NormalizationMethod { get; set; } [Track] public IsotopeLabelType NormalizationLabelType { get; set; } [Track] public AreaCVTransitions Transitions { get; set; } [Track] public int? CountTransitions { get; set; } [Track] public AreaCVMsLevel MSLevel { get; set; } public SrmDocument Refine(SrmDocument document) { return Refine(document, null); } public SrmDocument Refine(SrmDocument document, SrmSettingsChangeMonitor progressMonitor) { HashSet<int> outlierIds = new HashSet<int>(); if (RTRegressionThreshold.HasValue) { // TODO: Move necessary code into Model. var outliers = RTLinearRegressionGraphPane.CalcOutliers(document, RTRegressionThreshold.Value, RTRegressionPrecision, UseBestResult); foreach (var nodePep in outliers) outlierIds.Add(nodePep.Id.GlobalIndex); } HashSet<RefinementIdentity> includedPeptides = (RemoveRepeatedPeptides ? new HashSet<RefinementIdentity>() : null); HashSet<RefinementIdentity> repeatedPeptides = (RemoveDuplicatePeptides ? new HashSet<RefinementIdentity>() : null); TargetMap<List<Adduct>> acceptedPeptides = null; if (AcceptedPeptides != null) { var acceptedPeptidesDict = new Dictionary<Target, List<Adduct>>(); foreach (var peptideCharge in AcceptedPeptides) { List<Adduct> charges; if (!acceptedPeptidesDict.TryGetValue(peptideCharge.Target, out charges)) { charges = !peptideCharge.Adduct.IsEmpty ? new List<Adduct> {peptideCharge.Adduct} : null; acceptedPeptidesDict.Add(peptideCharge.Target, charges); } else if (charges != null) { if (!peptideCharge.Adduct.IsEmpty) charges.Add(peptideCharge.Adduct); else acceptedPeptidesDict[peptideCharge.Target] = null; } } acceptedPeptides = new TargetMap<List<Adduct>>(acceptedPeptidesDict); } HashSet<string> acceptedProteins = (AcceptedProteins != null ? new HashSet<string>(AcceptedProteins) : null); var listPepGroups = new List<PeptideGroupDocNode>(); // Excluding proteins with too few peptides, since they can impact results // of the duplicate peptide check. int minPeptides = MinPeptidesPerProtein ?? 0; foreach (PeptideGroupDocNode nodePepGroup in document.Children) { if (progressMonitor != null) progressMonitor.ProcessGroup(nodePepGroup); if (acceptedProteins != null && !acceptedProteins.Contains(GetAcceptProteinKey(nodePepGroup))) continue; PeptideGroupDocNode nodePepGroupRefined = nodePepGroup; // If auto-managing all peptides, make sure this flag is set correctly, // and update the peptides list, if necessary. if (AutoPickPeptidesAll && nodePepGroup.AutoManageChildren == AutoPickChildrenOff) { nodePepGroupRefined = (PeptideGroupDocNode) nodePepGroupRefined.ChangeAutoManageChildren(!AutoPickChildrenOff); var settings = document.Settings; if (!AutoPickChildrenOff && !settings.PeptideSettings.Filter.AutoSelect) settings = settings.ChangePeptideFilter(filter => filter.ChangeAutoSelect(true)); nodePepGroupRefined = nodePepGroupRefined.ChangeSettings(settings, new SrmSettingsDiff(true, false, false, false, false, false, progressMonitor)); } nodePepGroupRefined = Refine(nodePepGroupRefined, document, outlierIds, includedPeptides, repeatedPeptides, acceptedPeptides, progressMonitor); if (nodePepGroupRefined.Children.Count < minPeptides) continue; listPepGroups.Add(nodePepGroupRefined); } // Need a second pass, if all duplicate peptides should be removed, // and duplicates were found. if (repeatedPeptides != null && repeatedPeptides.Count > 0) { var listPepGroupsFiltered = new List<PeptideGroupDocNode>(); foreach (PeptideGroupDocNode nodePepGroup in listPepGroups) { var listPeptides = new List<PeptideDocNode>(); foreach (PeptideDocNode nodePep in nodePepGroup.Children) { var identity = nodePep.Peptide.IsCustomMolecule ? new RefinementIdentity(nodePep.Peptide.CustomMolecule) : new RefinementIdentity(document.Settings.GetModifiedSequence(nodePep)); if (!repeatedPeptides.Contains(identity)) listPeptides.Add(nodePep); } PeptideGroupDocNode nodePepGroupRefined = (PeptideGroupDocNode) nodePepGroup.ChangeChildrenChecked(listPeptides.ToArray(), true); if (nodePepGroupRefined.Children.Count < minPeptides) continue; listPepGroupsFiltered.Add(nodePepGroupRefined); } listPepGroups = listPepGroupsFiltered; } var refined = (SrmDocument)document.ChangeChildrenChecked(listPepGroups.ToArray(), true); if (CVCutoff.HasValue || QValueCutoff.HasValue) { if (!document.Settings.HasResults || document.MeasuredResults.Chromatograms.Count < 2) { throw new Exception( Resources.RefinementSettings_Refine_The_document_must_contain_at_least_2_replicates_to_refine_based_on_consistency_); } if (NormalizationMethod == AreaCVNormalizationMethod.global_standards && !document.Settings.HasGlobalStandardArea) { // error throw new Exception(Resources.RefinementSettings_Refine_The_document_does_not_have_a_global_standard_to_normalize_by_); } double cvcutoff = CVCutoff.HasValue ? CVCutoff.Value : double.NaN; double qvalue = QValueCutoff.HasValue ? QValueCutoff.Value : double.NaN; int minDetections = MinimumDetections.HasValue ? MinimumDetections.Value : -1; int ratioIndex = GetLabelIndex(NormalizationLabelType, document); int countTransitions = CountTransitions.HasValue ? CountTransitions.Value : -1; var data = new AreaCVRefinementData(refined, new AreaCVRefinementSettings(cvcutoff, qvalue, minDetections, NormalizationMethod, ratioIndex, Transitions, countTransitions, MSLevel)); refined = data.RemoveAboveCVCutoff(refined); } return refined; } private int GetLabelIndex(IsotopeLabelType type, SrmDocument doc) { if (type != null) { var mods = doc.Settings.PeptideSettings.Modifications.RatioInternalStandardTypes; var idx = mods.IndexOf(mod => Equals(mod.Name, type.Name)); if (idx == -1) { // error throw new Exception(Resources.RefinementSettings_GetLabelIndex_The_document_does_not_contain_the_given_reference_type_); } return idx; } return -1; } private string GetAcceptProteinKey(PeptideGroupDocNode nodePepGroup) { switch (AcceptProteinType) { case ProteinSpecType.accession: return nodePepGroup.ProteinMetadata.Accession; case ProteinSpecType.preferred: return nodePepGroup.ProteinMetadata.PreferredName; } return nodePepGroup.Name; } private PeptideGroupDocNode Refine(PeptideGroupDocNode nodePepGroup, SrmDocument document, ICollection<int> outlierIds, ICollection<RefinementIdentity> includedPeptides, ICollection<RefinementIdentity> repeatedPeptides, TargetMap<List<Adduct>> acceptedPeptides, SrmSettingsChangeMonitor progressMonitor) { var listPeptides = new List<PeptideDocNode>(); int minPrecursors = MinPrecursorsPerPeptide ?? 0; foreach (PeptideDocNode nodePep in nodePepGroup.Children) { if (progressMonitor != null) progressMonitor.ProcessMolecule(nodePep); // Avoid removing standards as part of refinement if (nodePep.GlobalStandardType != null) { listPeptides.Add(nodePep); continue; } if (outlierIds.Contains(nodePep.Id.GlobalIndex)) continue; // If there is a set of accepted peptides, and this is not one of them // then skip it. List<Adduct> acceptedCharges = null; if (acceptedPeptides != null && !acceptedPeptides.TryGetValue(AcceptModified ? nodePep.ModifiedTarget : nodePep.Target, out acceptedCharges)) { continue; } if (RemoveMissingLibrary && !nodePep.HasLibInfo) { continue; } int bestResultIndex = (UseBestResult ? nodePep.BestResult : -1); float? peakFoundRatio = nodePep.GetPeakCountRatio(bestResultIndex); if (!peakFoundRatio.HasValue) { if (RemoveMissingResults) continue; } else { if (MinPeakFoundRatio.HasValue) { if (peakFoundRatio < MinPeakFoundRatio.Value) continue; } if (MaxPeakFoundRatio.HasValue) { if (peakFoundRatio > MaxPeakFoundRatio.Value) continue; } } PeptideDocNode nodePepRefined = nodePep; if (AutoPickPrecursorsAll && nodePep.AutoManageChildren == AutoPickChildrenOff) { nodePepRefined = (PeptideDocNode) nodePepRefined.ChangeAutoManageChildren(!AutoPickChildrenOff); var settings = document.Settings; if (!settings.TransitionSettings.Filter.AutoSelect && !AutoPickChildrenOff) settings = settings.ChangeTransitionFilter(filter => filter.ChangeAutoSelect(!AutoPickChildrenOff)); nodePepRefined = nodePepRefined.ChangeSettings(settings, new SrmSettingsDiff(false, false, true, false, AutoPickTransitionsAll, false)); } nodePepRefined = Refine(nodePepRefined, document, bestResultIndex, acceptedCharges); // Always remove peptides if all precursors have been removed by refinement if (!ReferenceEquals(nodePep, nodePepRefined) && nodePepRefined.Children.Count == 0) continue; if (nodePepRefined.Children.Count < minPrecursors) continue; if (includedPeptides != null) { var identity = nodePepRefined.Peptide.IsCustomMolecule ? new RefinementIdentity(nodePep.Peptide.CustomMolecule) : new RefinementIdentity(document.Settings.GetModifiedSequence(nodePepRefined)); // Skip peptides already added if (includedPeptides.Contains(identity)) { // Record repeated peptides for removing duplicate peptides later if (repeatedPeptides != null) repeatedPeptides.Add(identity); continue; } // Record all peptides seen includedPeptides.Add(identity); } listPeptides.Add(nodePepRefined); } if (MaxPepPeakRank.HasValue) { // Calculate the average peak area for each peptide int countPeps = listPeptides.Count; var listAreaIndexes = new List<PepAreaSortInfo>(); var internalStandardTypes = document.Settings.PeptideSettings.Modifications.InternalStandardTypes; var unrankedPeptides = new List<PeptideDocNode>(); for (int i = 0; i < countPeps; i++) { var nodePep = listPeptides[i]; // Only peptides with children can possible be ranked by area // Those without should be removed by this operation if (nodePep.Children.Count == 0) continue; if (nodePep.GlobalStandardType != null || nodePep.TransitionGroups.All(tranGroup=>internalStandardTypes.Contains(tranGroup.LabelType))) { // Peptides which are internal standards get added back no matter what unrankedPeptides.Add(nodePep); continue; } int bestResultIndex = (UseBestResult ? nodePep.BestResult : -1); var sortInfo = new PepAreaSortInfo(nodePep, internalStandardTypes, bestResultIndex, listAreaIndexes.Count); listAreaIndexes.Add(sortInfo); } listAreaIndexes.Sort((p1, p2) => Comparer.Default.Compare(p2.Area, p1.Area)); // Store area ranks var arrayAreaIndexes = new PepAreaSortInfo[listAreaIndexes.Count]; int iRank = 1; foreach (var areaIndex in listAreaIndexes) { areaIndex.Rank = iRank++; arrayAreaIndexes[areaIndex.Index] = areaIndex; } // Add back all peptides with low enough rank. listPeptides.Clear(); listPeptides.AddRange(unrankedPeptides); foreach (var areaIndex in arrayAreaIndexes) { if (areaIndex.Area == 0 || areaIndex.Rank > MaxPepPeakRank.Value) continue; listPeptides.Add(areaIndex.Peptide); } } // Change the children, but only change auto-management, if the child // identities have changed, not if their contents changed. var childrenNew = listPeptides.ToArray(); bool updateAutoManage = !PeptideGroupDocNode.AreEquivalentChildren(nodePepGroup.Children, childrenNew); return (PeptideGroupDocNode)nodePepGroup.ChangeChildrenChecked(childrenNew, updateAutoManage); } private PeptideDocNode Refine(PeptideDocNode nodePep, SrmDocument document, int bestResultIndex, List<Adduct> acceptedCharges) { int minTrans = MinTransitionsPepPrecursor ?? 0; bool addedGroups = false; var listGroups = new List<TransitionGroupDocNode>(); foreach (TransitionGroupDocNode nodeGroup in nodePep.Children) { if (acceptedCharges != null && !acceptedCharges.Contains(nodeGroup.TransitionGroup.PrecursorAdduct)) continue; if (!AddLabelType && RefineLabelType != null && Equals(RefineLabelType, nodeGroup.TransitionGroup.LabelType)) continue; double? peakFoundRatio = nodeGroup.GetPeakCountRatio(bestResultIndex); if (!peakFoundRatio.HasValue) { if (RemoveMissingResults) continue; } else { if (MinPeakFoundRatio.HasValue) { if (peakFoundRatio < MinPeakFoundRatio.Value) continue; } if (MaxPeakFoundRatio.HasValue) { if (peakFoundRatio > MaxPeakFoundRatio.Value) continue; } } TransitionGroupDocNode nodeGroupRefined = nodeGroup; if (AutoPickTransitionsAll && nodeGroup.AutoManageChildren == AutoPickChildrenOff) { nodeGroupRefined = (TransitionGroupDocNode) nodeGroupRefined.ChangeAutoManageChildren(!AutoPickChildrenOff); var settings = document.Settings; if (!settings.TransitionSettings.Filter.AutoSelect && !AutoPickChildrenOff) settings = settings.ChangeTransitionFilter(filter => filter.ChangeAutoSelect(!AutoPickChildrenOff)); nodeGroupRefined = nodeGroupRefined.ChangeSettings(settings, nodePep, nodePep.ExplicitMods, new SrmSettingsDiff(false, false, false, false, true, false)); } nodeGroupRefined = Refine(nodeGroupRefined, bestResultIndex, document.Settings.TransitionSettings.Integration.IsIntegrateAll); // Avoid removing a standard precursor because it lacks the minimum number of transitions if (nodeGroupRefined.Children.Count < minTrans && nodePep.GlobalStandardType == null) continue; if (peakFoundRatio.HasValue) { if (DotProductThreshold.HasValue) { float? dotProduct = nodeGroupRefined.GetLibraryDotProduct(bestResultIndex); if (dotProduct.HasValue && dotProduct.Value < DotProductThreshold.Value) continue; } if (IdotProductThreshold.HasValue) { float? idotProduct = nodeGroupRefined.GetIsotopeDotProduct(bestResultIndex); if (idotProduct.HasValue && idotProduct.Value < IdotProductThreshold.Value) continue; } } // If this precursor node is going to be added, check to see if it // should be added with another matching isotope label type. var explicitMods = nodePep.ExplicitMods; if (IsLabelTypeRequired(nodePep, nodeGroup, listGroups) && document.Settings.TryGetPrecursorCalc(RefineLabelType, explicitMods) != null) { // CONSIDER: This is a lot like some code in PeptideDocNode.ChangeSettings Debug.Assert(RefineLabelType != null); // Keep ReSharper from warning var tranGroup = new TransitionGroup(nodePep.Peptide, nodeGroup.TransitionGroup.PrecursorAdduct, RefineLabelType, false, nodeGroup.TransitionGroup.DecoyMassShift); var settings = document.Settings; // string sequence = nodePep.Peptide.Sequence; TransitionDocNode[] transitions = nodePep.GetMatchingTransitions( tranGroup, settings, explicitMods); var nodeGroupMatch = new TransitionGroupDocNode(tranGroup, Annotations.EMPTY, settings, explicitMods, nodeGroup.LibInfo, nodeGroup.ExplicitValues, null, // results transitions, transitions == null); nodeGroupMatch = nodeGroupMatch.ChangeSettings(settings, nodePep, explicitMods, SrmSettingsDiff.ALL); // Make sure it is measurable before adding it if (settings.TransitionSettings.IsMeasurablePrecursor(nodeGroupMatch.PrecursorMz)) { listGroups.Add(nodeGroupMatch); addedGroups = true; } } listGroups.Add(nodeGroupRefined); } // If groups were added, make sure everything is in the right order. if (addedGroups) listGroups.Sort(Peptide.CompareGroups); if (MaxPrecursorPeakOnly && listGroups.Count > 1 && listGroups.Select(g => g.PrecursorAdduct.Unlabeled).Distinct().Count() > 1) { var chargeGroups = (from g in listGroups group g by g.TransitionGroup.PrecursorAdduct.Unlabeled into ga select new {Adduct = ga.Key, Area = ga.Sum(gg => gg.AveragePeakArea)}).ToArray(); if (chargeGroups.Any(n => n.Area > 0)) { // Assume that the probability of two measured areas being exactly equal is low // enough that taking just one is not an issue. var bestCharge = chargeGroups.Aggregate((n1, n2) => n1.Area > n2.Area ? n1 : n2); listGroups = listGroups.Where(g => Equals(g.PrecursorAdduct.Unlabeled, bestCharge.Adduct)).ToList(); } } // Change the children, but only change auto-management, if the child // identities have changed, not if their contents changed. var childrenNew = listGroups.ToArray(); bool updateAutoManage = !PeptideDocNode.AreEquivalentChildren(nodePep.Children, childrenNew); return (PeptideDocNode) nodePep.ChangeChildrenChecked(childrenNew, updateAutoManage); } // ReSharper disable SuggestBaseTypeForParameter private bool IsLabelTypeRequired(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, IEnumerable<TransitionGroupDocNode> listGroups) // ReSharper restore SuggestBaseTypeForParameter { // If not adding a label type, or this precursor is already the label type being added, // then no further work is required if (!AddLabelType || RefineLabelType == null || Equals(RefineLabelType, nodeGroup.TransitionGroup.LabelType)) return false; // If either the peptide or the list of new groups already contains the // label type to be added, then do not add foreach (TransitionGroupDocNode nodeGroupChild in nodePep.Children) { if (nodeGroupChild.TransitionGroup.PrecursorAdduct.Equals(nodeGroup.TransitionGroup.PrecursorAdduct) && Equals(RefineLabelType, nodeGroupChild.TransitionGroup.LabelType)) return false; } foreach (TransitionGroupDocNode nodeGroupAdded in listGroups) { if (nodeGroupAdded.TransitionGroup.PrecursorAdduct.Equals(nodeGroup.TransitionGroup.PrecursorAdduct) && Equals(RefineLabelType, nodeGroupAdded.TransitionGroup.LabelType)) return false; } return true; } // ReSharper disable SuggestBaseTypeForParameter private TransitionGroupDocNode Refine(TransitionGroupDocNode nodeGroup, int bestResultIndex, bool integrateAll) // ReSharper restore SuggestBaseTypeForParameter { var listTrans = new List<TransitionDocNode>(); foreach (TransitionDocNode nodeTran in nodeGroup.Children) { double? peakFoundRatio = nodeTran.GetPeakCountRatio(bestResultIndex, integrateAll); if (!peakFoundRatio.HasValue) { if (RemoveMissingResults) continue; } else { if (MinPeakFoundRatio.HasValue) { if (peakFoundRatio < MinPeakFoundRatio.Value) continue; } if (MaxPeakFoundRatio.HasValue) { if (peakFoundRatio > MaxPeakFoundRatio.Value) continue; } } listTrans.Add(nodeTran); } TransitionGroupDocNode nodeGroupRefined = (TransitionGroupDocNode) nodeGroup.ChangeChildrenChecked(listTrans.ToArray(), true); if (MaxPeakRank.HasValue) { // Calculate the average peak area for each transition int countTrans = nodeGroupRefined.Children.Count; var listAreaIndexes = new List<AreaSortInfo>(); for (int i = 0; i < countTrans; i++) { var nodeTran = (TransitionDocNode) nodeGroupRefined.Children[i]; var sortInfo = new AreaSortInfo(nodeTran.GetPeakArea(bestResultIndex) ?? 0, nodeTran.Transition.Ordinal, nodeTran.Mz > nodeGroup.PrecursorMz, i); listAreaIndexes.Add(sortInfo); } // Sort to area order descending if (PreferLargeIons) { // If prefering large ions, then larger ions get a slight area // advantage over smaller ones listAreaIndexes.Sort((p1, p2) => { float areaAdjusted1 = p1.Area; // If either transition is below the precursor m/z value, // apply the fragment size correction. if (!p1.AbovePrecusorMz || !p2.AbovePrecusorMz) { int deltaOrdinal = Math.Max(-5, Math.Min(5, p1.Ordinal - p2.Ordinal)); if (deltaOrdinal != 0) deltaOrdinal += (deltaOrdinal > 0 ? 1 : -1); areaAdjusted1 += areaAdjusted1 * 0.05f * deltaOrdinal; } return Comparer.Default.Compare(p2.Area, areaAdjusted1); }); } else { listAreaIndexes.Sort((p1, p2) => Comparer.Default.Compare(p2.Area, p1.Area)); } // Store area ranks by transition index var ranks = new int[countTrans]; for (int i = 0, iRank = 1; i < countTrans; i++) { var areaIndex = listAreaIndexes[i]; // Never keep a transition with no peak area ranks[areaIndex.Index] = (areaIndex.Area > 0 ? iRank++ : int.MaxValue); } // Add back all transitions with low enough rank. listTrans.Clear(); for (int i = 0; i < countTrans; i++) { if (ranks[i] > MaxPeakRank.Value) continue; listTrans.Add((TransitionDocNode) nodeGroupRefined.Children[i]); } nodeGroupRefined = (TransitionGroupDocNode) nodeGroupRefined.ChangeChildrenChecked(listTrans.ToArray(), true); } return nodeGroupRefined; } public enum ConvertToSmallMoleculesMode { none, // No conversion - call to ConvertToSmallMolecules is a no-op formulas, // Convert peptides to custom ions with ion formulas masses_and_names, // Convert peptides to custom ions but retain just the masses, and names for use in ratio calcs masses_only // Convert peptides to custom ions but retain just the masses, no formulas or names so ratio calcs have to work on sorted mz }; public enum ConvertToSmallMoleculesChargesMode { none, // Leave charges alone invert, // Invert charges invert_some // Invert every other transition group } /// <summary> /// Adjust library info for small molecules /// </summary> public static Results<TransitionGroupChromInfo>ConvertTransitionGroupChromInfoLibraryInfoToSmallMolecules(TransitionGroupDocNode transitionGroupDocNode, ConvertToSmallMoleculesMode mode, ConvertToSmallMoleculesChargesMode invertChargesMode) { if (transitionGroupDocNode.Results == null) return null; if (invertChargesMode == ConvertToSmallMoleculesChargesMode.none && mode != ConvertToSmallMoleculesMode.masses_only) { return transitionGroupDocNode.Results; } // No libraries for small molecules without IDs, or when inverting polarity in conversion (too much bother adjusting mz in libs), so lose the dotp var listResultsNew = new List<ChromInfoList<TransitionGroupChromInfo>>(); foreach (var info in transitionGroupDocNode.Results) { var infoNew = new List<TransitionGroupChromInfo>(); foreach (var result in info) { infoNew.Add(result.ChangeLibraryDotProduct(null)); } listResultsNew.Add(new ChromInfoList<TransitionGroupChromInfo>(infoNew)); } var resultsNew = new Results<TransitionGroupChromInfo>(listResultsNew); return resultsNew; } public static CustomMolecule ConvertToSmallMolecule(ConvertToSmallMoleculesMode mode, SrmDocument document, PeptideDocNode nodePep, Dictionary<LibKey, LibKey> smallMoleculeConversionPrecursorMap = null) { Adduct adduct; return ConvertToSmallMolecule(mode, document, nodePep, out adduct, 0, null, smallMoleculeConversionPrecursorMap); } public static CustomMolecule ConvertToSmallMolecule(ConvertToSmallMoleculesMode mode, SrmDocument document, PeptideDocNode nodePep, out Adduct adduct, int precursorCharge, IsotopeLabelType isotopeLabelType, Dictionary<LibKey, LibKey> smallMoleculeConversionPrecursorMap = null) { // We're just using this masscalc to get the ion formula, so mono vs average doesn't matter isotopeLabelType = isotopeLabelType ?? IsotopeLabelType.light; var peptideTarget = nodePep.Peptide.Target; var masscalc = document.Settings.TryGetPrecursorCalc(isotopeLabelType, nodePep.ExplicitMods); if (masscalc == null) { // No support in mods for this label type masscalc = new SequenceMassCalc(MassType.Monoisotopic); } // Determine the molecular formula of the charged/labeled peptide var moleculeFormula = masscalc.GetMolecularFormula(peptideTarget.Sequence); // Get molecular formula, possibly with isotopes in it (as with iTraq) adduct = Adduct.NonProteomicProtonatedFromCharge(precursorCharge, BioMassCalc.MONOISOTOPIC.FindIsotopeLabelsInFormula(moleculeFormula)); var customMolecule = new CustomMolecule(moleculeFormula, TestingConvertedFromProteomicPeptideNameDecorator + masscalc.GetModifiedSequence(peptideTarget, false)); // Make sure name isn't a valid peptide seq if (mode == ConvertToSmallMoleculesMode.masses_only) { // No formulas or names, just masses - see how we handle that customMolecule = new CustomMolecule(customMolecule.MonoisotopicMass, customMolecule.AverageMass); } else if (mode == ConvertToSmallMoleculesMode.masses_and_names) { // Just masses and names - see how we handle that customMolecule = new CustomMolecule(customMolecule.MonoisotopicMass, customMolecule.AverageMass, customMolecule.Name); } // Collect information for converting libraries var chargeAndModifiedSequence = new LibKey(masscalc.GetModifiedSequence(peptideTarget, SequenceModFormatType.lib_precision, false), precursorCharge); if (smallMoleculeConversionPrecursorMap != null && !smallMoleculeConversionPrecursorMap.ContainsKey(chargeAndModifiedSequence)) { smallMoleculeConversionPrecursorMap.Add(chargeAndModifiedSequence, new LibKey(customMolecule.GetSmallMoleculeLibraryAttributes(), adduct)); } return customMolecule; } public const string TestingConvertedFromProteomic = "zzzTestingConvertedFromProteomic"; public static string TestingConvertedFromProteomicPeptideNameDecorator = @"pep_"; // Testing aid: use this to make sure name of a converted peptide isn't a valid peptide seq public SrmDocument ConvertToSmallMolecules(SrmDocument document, string pathForLibraryFiles, // In case we translate libraries etc ConvertToSmallMoleculesMode mode = ConvertToSmallMoleculesMode.formulas, ConvertToSmallMoleculesChargesMode invertChargesMode = ConvertToSmallMoleculesChargesMode.none, bool ignoreDecoys=false) { if (mode == ConvertToSmallMoleculesMode.none) return document; var newdoc = new SrmDocument(document.Settings); var note = new Annotations(TestingConvertedFromProteomic, null, 1); // Mark this as a testing node so we don't sort it var precursorMap = new Dictionary<LibKey, LibKey>(); // Map int,modSeq to adduct,molecule var invertCharges = invertChargesMode == ConvertToSmallMoleculesChargesMode.invert; var canConvertLibraries = invertChargesMode == ConvertToSmallMoleculesChargesMode.none && // Too much trouble adjusting mz in libs mode != ConvertToSmallMoleculesMode.masses_only && // Need a proper ID for libraries document.Settings.PeptideSettings.Libraries.IsLoaded; // If original doc never loaded libraries, don't worry about converting // Make small molecule filter settings look like peptide filter settings var ionTypes = new List<IonType>(); foreach (var ionType in document.Settings.TransitionSettings.Filter.PeptideIonTypes) { if (ionType == IonType.precursor) ionTypes.Add(ionType); else if (!ionTypes.Contains(IonType.custom)) ionTypes.Add(IonType.custom); } // Precursor charges var precursorAdducts = new List<Adduct>(); foreach (var charge in document.Settings.TransitionSettings.Filter.PeptidePrecursorCharges) { switch (invertChargesMode) { case ConvertToSmallMoleculesChargesMode.invert: precursorAdducts.Add(Adduct.FromCharge(-charge.AdductCharge, Adduct.ADDUCT_TYPE.non_proteomic)); break; case ConvertToSmallMoleculesChargesMode.invert_some: precursorAdducts.Add(Adduct.FromCharge(charge.AdductCharge, Adduct.ADDUCT_TYPE.non_proteomic)); precursorAdducts.Add(Adduct.FromCharge(-charge.AdductCharge, Adduct.ADDUCT_TYPE.non_proteomic)); break; default: precursorAdducts.Add(Adduct.FromCharge(charge.AdductCharge, Adduct.ADDUCT_TYPE.non_proteomic)); break; } } // Fragment charges var fragmentAdducts = new List<Adduct>(); foreach (var charge in document.Settings.TransitionSettings.Filter.PeptideProductCharges) { switch (invertChargesMode) { case ConvertToSmallMoleculesChargesMode.invert: fragmentAdducts.Add(Adduct.FromChargeNoMass(-charge.AdductCharge)); break; case ConvertToSmallMoleculesChargesMode.invert_some: fragmentAdducts.Add(Adduct.FromChargeNoMass(charge.AdductCharge)); fragmentAdducts.Add(Adduct.FromChargeNoMass(-charge.AdductCharge)); break; default: fragmentAdducts.Add(Adduct.FromChargeNoMass(charge.AdductCharge)); break; } } newdoc = newdoc.ChangeSettings(newdoc.Settings.ChangeTransitionSettings(newdoc.Settings.TransitionSettings.ChangeFilter( newdoc.Settings.TransitionSettings.Filter.ChangeSmallMoleculeIonTypes(ionTypes). ChangeSmallMoleculePrecursorAdducts(precursorAdducts).ChangeSmallMoleculeFragmentAdducts(fragmentAdducts)))); foreach (var peptideGroupDocNode in document.MoleculeGroups) { if (!peptideGroupDocNode.IsProteomic) { newdoc = (SrmDocument)newdoc.Add(peptideGroupDocNode); // Already a small molecule } else { var newPeptideGroup = new PeptideGroup(); var newPeptideGroupDocNode = new PeptideGroupDocNode(newPeptideGroup, peptideGroupDocNode.Annotations.Merge(note), peptideGroupDocNode.Name, peptideGroupDocNode.Description, new PeptideDocNode[0], peptideGroupDocNode.AutoManageChildren); foreach (var mol in peptideGroupDocNode.Molecules) { if (invertChargesMode == ConvertToSmallMoleculesChargesMode.invert_some) { invertCharges = !invertCharges; } var peptideAsMolecule = ConvertToSmallMolecule(mode, document, mol); var newPeptide = new Peptide(peptideAsMolecule); var newPeptideDocNode = new PeptideDocNode(newPeptide, newdoc.Settings, mol.ExplicitMods != null && mol.ExplicitMods.HasIsotopeLabels ? mol.ExplicitMods : null, // Custom molecules use modifications - but just the static isotope labels null, mol.GlobalStandardType, mol.Rank, mol.ExplicitRetentionTime, note, mol.Results, new TransitionGroupDocNode[0], mol.AutoManageChildren); foreach (var transitionGroupDocNode in mol.TransitionGroups) { if (transitionGroupDocNode.IsDecoy) { if (ignoreDecoys) continue; throw new Exception(@"There is no translation from decoy to small molecules"); } var precursorCharge = transitionGroupDocNode.TransitionGroup.PrecursorAdduct.AdductCharge * (invertCharges ? -1 : 1); var isotopeLabelType = transitionGroupDocNode.TransitionGroup.LabelType; Adduct adduct; ConvertToSmallMolecule(mode, document, mol, out adduct, precursorCharge, isotopeLabelType, precursorMap); var newTransitionGroup = new TransitionGroup(newPeptide, adduct, isotopeLabelType); // Deal with library info - remove now if we can't use it due to charge swap or loss of molecule ID, otherwise clean it up later SpectrumHeaderInfo libInfo; if (canConvertLibraries && transitionGroupDocNode.HasLibInfo) { libInfo = transitionGroupDocNode.LibInfo.LibraryName.Contains(BiblioSpecLiteSpec.DotConvertedToSmallMolecules) ? transitionGroupDocNode.LibInfo : transitionGroupDocNode.LibInfo.ChangeLibraryName(transitionGroupDocNode.LibInfo.LibraryName + BiblioSpecLiteSpec.DotConvertedToSmallMolecules); } else { libInfo = null; } var resultsNew = ConvertTransitionGroupChromInfoLibraryInfoToSmallMolecules(transitionGroupDocNode, mode, invertChargesMode); var newTransitionGroupDocNode = new TransitionGroupDocNode(newTransitionGroup, transitionGroupDocNode.Annotations.Merge(note), document.Settings, null, libInfo, transitionGroupDocNode.ExplicitValues, resultsNew, null, transitionGroupDocNode.AutoManageChildren); var mzShiftPrecursor = invertCharges ? 2.0 * BioMassCalc.MassProton : 0; // We removed hydrogen rather than added var mzShiftFragment = invertCharges ? -2.0 * BioMassCalc.MassElectron : 0; // We will move proton masses to the fragment and use charge-only adducts Assume.IsTrue(Math.Abs(newTransitionGroupDocNode.PrecursorMz.Value + mzShiftPrecursor - transitionGroupDocNode.PrecursorMz.Value) <= 1E-5); foreach (var transition in transitionGroupDocNode.Transitions) { var mass = TypedMass.ZERO_MONO_MASSH; var ionType = IonType.custom; CustomMolecule transitionCustomMolecule; if (transition.Transition.IonType == IonType.precursor) { ionType = IonType.precursor; transitionCustomMolecule = null; // Precursor transition uses the parent molecule if (transition.Transition.MassIndex > 0) { mass = newTransitionGroupDocNode.IsotopeDist.GetMassI(transition.Transition.MassIndex); } else { mass = newTransitionGroupDocNode.GetPrecursorIonMass(); } } else if (transition.Transition.IonType == IonType.custom) { transitionCustomMolecule = transition.Transition.CustomIon; mass = transitionCustomMolecule.MonoisotopicMass; } else { // CONSIDER - try to get fragment formula? var mzMassType = transition.MzMassType.IsMonoisotopic() ? MassType.Monoisotopic : MassType.Average; // Account for adduct mass here, since we're going to replace it with a charge-only adduct to mimic normal small mol use var chargeOnly = Adduct.FromChargeNoMass(transition.Transition.Charge); mass = chargeOnly.MassFromMz(transition.Mz, mzMassType); // We can't really get at both mono and average mass from m/z, but for test purposes this is fine var massMono = new TypedMass(mass.Value, MassType.Monoisotopic); var massAverage = new TypedMass(mass.Value, MassType.Average); var name = transition.HasLoss ? string.Format(@"{0}[-{1}]", transition.Transition.FragmentIonName, (int)transition.LostMass) : transition.Transition.FragmentIonName; transitionCustomMolecule = new CustomMolecule(massMono, massAverage, name); } if (ionType != IonType.precursor) { if (mode == ConvertToSmallMoleculesMode.masses_and_names) { // Discard the formula if we're testing the use of mass-with-names (for matching in ratio calcs) target specification transitionCustomMolecule = new CustomMolecule(transitionCustomMolecule.MonoisotopicMass, transitionCustomMolecule.AverageMass, transition.Transition.FragmentIonName); } else if (mode == ConvertToSmallMoleculesMode.masses_only) { // Discard the formula and name if we're testing the use of mass-only target specification transitionCustomMolecule = new CustomMolecule(transitionCustomMolecule.MonoisotopicMass, transitionCustomMolecule.AverageMass); } } // Normally in small molecule world fragment transition adducts are charge only var transitionAdduct = (transition.Transition.IonType == IonType.precursor) ? adduct.ChangeCharge(transition.Transition.Charge*(invertCharges ? -1 : 1)) : Adduct.FromChargeNoMass(transition.Transition.Charge*(invertCharges ? -1 : 1)); // We don't label fragments // Deal with library info - remove now if we can't use it due to charge swap or loss of molecule ID, otherwise clean it up later var transitionLibInfo = transition.HasLibInfo && canConvertLibraries ? transition.LibInfo : null; var newTransition = new Transition(newTransitionGroup, ionType, null, transition.Transition.MassIndex, transitionAdduct, null, transitionCustomMolecule); var newTransitionDocNode = new TransitionDocNode(newTransition, transition.Annotations.Merge(note), null, mass, transition.QuantInfo.ChangeLibInfo(transitionLibInfo), ExplicitTransitionValues.EMPTY, transition.Results); var mzShift = transition.Transition.IonType == IonType.precursor ? mzShiftPrecursor : mzShiftFragment; Assume.IsTrue(Math.Abs(newTransitionDocNode.Mz + mzShift - transition.Mz.Value) <= .5 * BioMassCalc.MassElectron, String.Format(@"unexpected mz difference {0}-{1}={2}", newTransitionDocNode.Mz, transition.Mz, newTransitionDocNode.Mz - transition.Mz.Value)); newTransitionGroupDocNode = (TransitionGroupDocNode)newTransitionGroupDocNode.Add(newTransitionDocNode); } if (newPeptideDocNode != null) newPeptideDocNode = (PeptideDocNode)newPeptideDocNode.Add(newTransitionGroupDocNode); } newPeptideGroupDocNode = (PeptideGroupDocNode)newPeptideGroupDocNode.Add(newPeptideDocNode); } newdoc = (SrmDocument)newdoc.Add(newPeptideGroupDocNode); } } if (newdoc.Settings.PeptideSettings.Prediction.IonMobilityPredictor != null && newdoc.Settings.PeptideSettings.Prediction.IonMobilityPredictor.MeasuredMobilityIons != null && newdoc.Settings.PeptideSettings.Prediction.IonMobilityPredictor.MeasuredMobilityIons.Any()) { var mapped = new Dictionary<LibKey, IonMobilityAndCCS>(); foreach (var item in newdoc.Settings.PeptideSettings.Prediction.IonMobilityPredictor.MeasuredMobilityIons) { LibKey smallMolKey; if (precursorMap.TryGetValue(item.Key, out smallMolKey)) { mapped.Add(smallMolKey, item.Value); } } var newpredictorDt = newdoc.Settings.PeptideSettings.Prediction.IonMobilityPredictor.ChangeMeasuredIonMobilityValues(mapped); var newpredictor = newdoc.Settings.PeptideSettings.Prediction.ChangeDriftTimePredictor(newpredictorDt); var newSettings = newdoc.Settings.ChangePeptideSettings(newdoc.Settings.PeptideSettings.ChangePrediction(newpredictor)); newdoc = newdoc.ChangeSettings(newSettings); } if (canConvertLibraries) { // Output a new set of libraries with known charge,modifiedSeq transformed to adduct,molecule var dictOldNamesToNew = new Dictionary<string, string>(); var oldGroupLibInfos = new List<SpectrumHeaderInfo>(); var oldTransitionLibInfos = new List<TransitionLibInfo>(); if (document.Settings.PeptideSettings.Libraries.HasLibraries) { oldGroupLibInfos.AddRange(document.MoleculeTransitionGroups.Select(group => group.LibInfo)); oldTransitionLibInfos.AddRange(document.MoleculeTransitions.Select(t => t.LibInfo)); var newSettings = BlibDb.MinimizeLibrariesAndConvertToSmallMolecules(document, pathForLibraryFiles, Resources.RefinementSettings_ConvertToSmallMolecules_Converted_To_Small_Molecules, precursorMap, dictOldNamesToNew, null).Settings; CloseLibraryStreams(document); newdoc = newdoc.ChangeSettings(newdoc.Settings. ChangePeptideLibraries(l => newSettings.PeptideSettings.Libraries)); } if (dictOldNamesToNew.Any()) { // Restore library info for use with revised libraries var oldGroupLibInfoIndex = 0; var oldTransitionLibInfoIndex = 0; newdoc = (SrmDocument)newdoc.ChangeAll(node => { var nodeGroup = node as TransitionGroupDocNode; if (nodeGroup != null) { var groupLibInfo = oldGroupLibInfos[oldGroupLibInfoIndex++]; if (groupLibInfo == null) return node; var libName = groupLibInfo.LibraryName; var libNameNew = dictOldNamesToNew[libName]; if (Equals(libName, libNameNew)) return node; groupLibInfo = groupLibInfo.ChangeLibraryName(libNameNew); return nodeGroup.ChangeLibInfo(groupLibInfo); } var nodeTran = node as TransitionDocNode; if (nodeTran == null) return node; var libInfo = oldTransitionLibInfos[oldTransitionLibInfoIndex++]; if (libInfo == null) return node; return nodeTran.ChangeLibInfo(libInfo); }, (int)SrmDocument.Level.Transitions); } if (document.Settings.HasIonMobilityLibraryPersisted) { var newDbPath = document.Settings.PeptideSettings.Prediction.IonMobilityPredictor .IonMobilityLibrary.PersistMinimized(pathForLibraryFiles, document, precursorMap); var spec = new IonMobilityLibrary(document.Settings.PeptideSettings.Prediction.IonMobilityPredictor.IonMobilityLibrary.Name + @" " + Resources.RefinementSettings_ConvertToSmallMolecules_Converted_To_Small_Molecules, newDbPath); var driftTimePredictor = document.Settings.PeptideSettings.Prediction.IonMobilityPredictor.ChangeLibrary(spec); newdoc = newdoc.ChangeSettings(newdoc.Settings.ChangePeptideSettings(newdoc.Settings.PeptideSettings.ChangePrediction( newdoc.Settings.PeptideSettings.Prediction.ChangeDriftTimePredictor(driftTimePredictor)))); } } // No retention time prediction for small molecules (yet?) newdoc = newdoc.ChangeSettings(newdoc.Settings.ChangePeptideSettings(newdoc.Settings.PeptideSettings.ChangePrediction( newdoc.Settings.PeptideSettings.Prediction.ChangeRetentionTime(null)))); CloseLibraryStreams(newdoc); return newdoc; } /// <summary> /// Closes all library streams on a document. /// Use this when "ChangeSettings" was called on a document that hads libraries, /// and that document is not owned by a DocumentContainer. /// </summary> private void CloseLibraryStreams(SrmDocument doc) { foreach (var library in doc.Settings.PeptideSettings.Libraries.Libraries) { foreach (var stream in library.ReadStreams) { stream.CloseStream(); } } } public SrmDocument ConvertToExplicitRetentionTimes(SrmDocument document, double timeOffset, double winOffset) { for (bool changing = true; changing;) { changing = false; foreach (var peptideGroupDocNode in document.MoleculeGroups) { var pepGroupPath = new IdentityPath(IdentityPath.ROOT, peptideGroupDocNode.Id); foreach (var nodePep in peptideGroupDocNode.Molecules) { var pepPath = new IdentityPath(pepGroupPath, nodePep.Id); var rt = nodePep.AverageMeasuredRetentionTime; if (rt.HasValue) { double? rtWin = document.Settings.PeptideSettings.Prediction.MeasuredRTWindow; var explicitRetentionTimeInfo = new ExplicitRetentionTimeInfo(rt.Value+timeOffset, rtWin+winOffset); if (!explicitRetentionTimeInfo.Equals(nodePep.ExplicitRetentionTime)) { document = (SrmDocument)document.ReplaceChild(pepPath.Parent, nodePep.ChangeExplicitRetentionTime(explicitRetentionTimeInfo)); changing = true; break; } } } if (changing) break; } } return document; } public SrmDocument RemoveDecoys(SrmDocument document) { // Remove the existing decoys return (SrmDocument) document.RemoveAll(document.MoleculeGroups.Where(nodePeptideGroup => nodePeptideGroup.IsDecoy) .Select(nodePeptideGroup => nodePeptideGroup.Id.GlobalIndex).ToArray()); } public SrmDocument GenerateDecoys(SrmDocument document) { return GenerateDecoys(document, NumberOfDecoys, DecoysMethod); } public SrmDocument GenerateDecoys(SrmDocument document, int numDecoys, string decoysMethod) { // Remove the existing decoys document = RemoveDecoys(document); if (decoysMethod == DecoyGeneration.SHUFFLE_SEQUENCE) { var random = new Random(RANDOM_SEED); return GenerateDecoysFunc(document, numDecoys, true, m => GetShuffledPeptideSequence(m, random)); } if (decoysMethod == DecoyGeneration.REVERSE_SEQUENCE) return GenerateDecoysFunc(document, numDecoys, false, GetReversedPeptideSequence); return GenerateDecoysFunc(document, numDecoys, false, null); } private struct SequenceMods { public SequenceMods(PeptideDocNode nodePep) : this() { Peptide = nodePep.Peptide; Sequence = Peptide.Target.Sequence; Mods = nodePep.ExplicitMods; } public Peptide Peptide { get; private set; } public string Sequence { get; set; } public ExplicitMods Mods { get; set; } } public static int SuggestDecoyCount(SrmDocument document) { int count = 0; foreach (var nodePep in document.Peptides) { // Exclude any existing decoys and standard peptides if (nodePep.IsDecoy || nodePep.GlobalStandardType != null) continue; count += PeakFeatureEnumerator.ComparableGroups(nodePep).Count(); } return count; } private static SrmDocument GenerateDecoysFunc(SrmDocument document, int numDecoys, bool multiCycle, Func<SequenceMods, SequenceMods> genDecoySequence) { // Loop through the existing tree in random order creating decoys var settings = document.Settings; var enzyme = settings.PeptideSettings.Enzyme; var decoyNodePepList = new List<PeptideDocNode>(); var setDecoyKeys = new HashSet<PeptideModKey>(); var randomShift = new Random(RANDOM_SEED); while (numDecoys > 0) { int startDecoys = numDecoys; foreach (var nodePep in document.Peptides.ToArray().RandomOrder(RANDOM_SEED)) { if (numDecoys == 0) break; // Decoys should not be based on standard peptides if (nodePep.GlobalStandardType != null) continue; // If the non-terminal end of the peptide sequence is all a single character, skip this peptide, // since it can't support decoy generation. var sequence = nodePep.Peptide.Sequence; if (genDecoySequence != null && sequence.Substring(0, sequence.Length - 1).Distinct().Count() == 1) continue; var seqMods = new SequenceMods(nodePep); if (genDecoySequence != null) { seqMods = genDecoySequence(seqMods); } var peptide = nodePep.Peptide; var decoyPeptide = new Peptide(null, seqMods.Sequence, null, null, enzyme.CountCleavagePoints(seqMods.Sequence), true); if (seqMods.Mods != null) seqMods.Mods = seqMods.Mods.ChangePeptide(decoyPeptide); foreach (var comparableGroups in PeakFeatureEnumerator.ComparableGroups(nodePep)) { var decoyNodeTranGroupList = GetDecoyGroups(nodePep, decoyPeptide, seqMods.Mods, comparableGroups, document, Equals(seqMods.Sequence, peptide.Sequence), randomShift); if (decoyNodeTranGroupList.Count == 0) continue; var nodePepNew = new PeptideDocNode(decoyPeptide, settings, seqMods.Mods, null, nodePep.ExplicitRetentionTime, decoyNodeTranGroupList.ToArray(), false); if (!Equals(nodePep.ModifiedSequence, nodePepNew.ModifiedSequence)) { var sourceKey = new ModifiedSequenceMods(nodePep.ModifiedSequence, nodePep.ExplicitMods); nodePepNew = nodePepNew.ChangeSourceKey(sourceKey); } // Avoid adding duplicate peptides if (setDecoyKeys.Contains(nodePepNew.Key)) continue; setDecoyKeys.Add(nodePepNew.Key); decoyNodePepList.Add(nodePepNew); numDecoys--; } } // Stop if not multi-cycle or the number of decoys has not changed. if (!multiCycle || startDecoys == numDecoys) break; } var decoyNodePepGroup = new PeptideGroupDocNode(new PeptideGroup(true), Annotations.EMPTY, PeptideGroup.DECOYS, null, decoyNodePepList.ToArray(), false); decoyNodePepGroup = decoyNodePepGroup.ChangeSettings(document.Settings, SrmSettingsDiff.ALL); return (SrmDocument)document.Add(decoyNodePepGroup); } private static List<TransitionGroupDocNode> GetDecoyGroups(PeptideDocNode nodePep, Peptide decoyPeptide, ExplicitMods mods, IEnumerable<TransitionGroupDocNode> comparableGroups, SrmDocument document, bool shiftMass, Random randomShift) { var decoyNodeTranGroupList = new List<TransitionGroupDocNode>(); var chargeToPrecursor = new Tuple<int, TransitionGroupDocNode>[2*(TransitionGroup.MAX_PRECURSOR_CHARGE+1)]; // Allow for negative charges foreach (TransitionGroupDocNode nodeGroup in comparableGroups) { var transGroup = nodeGroup.TransitionGroup; int precursorMassShift; TransitionGroupDocNode nodeGroupPrimary = null; var primaryPrecursor = chargeToPrecursor[TransitionGroup.MAX_PRECURSOR_CHARGE + nodeGroup.TransitionGroup.PrecursorAdduct.AdductCharge]; // Allow for negative charges if (primaryPrecursor != null) { precursorMassShift = primaryPrecursor.Item1; nodeGroupPrimary = primaryPrecursor.Item2; } else if (shiftMass) { precursorMassShift = GetPrecursorMassShift(randomShift); } else { precursorMassShift = TransitionGroup.ALTERED_SEQUENCE_DECOY_MZ_SHIFT; } var decoyGroup = new TransitionGroup(decoyPeptide, transGroup.PrecursorAdduct, transGroup.LabelType, false, precursorMassShift); var decoyNodeTranList = nodeGroupPrimary != null ? decoyGroup.GetMatchingTransitions(document.Settings, nodeGroupPrimary, mods) : GetDecoyTransitions(nodeGroup, decoyGroup, shiftMass, randomShift); var nodeGroupDecoy = new TransitionGroupDocNode(decoyGroup, Annotations.EMPTY, document.Settings, mods, nodeGroup.LibInfo, nodeGroup.ExplicitValues, nodeGroup.Results, decoyNodeTranList, false); decoyNodeTranGroupList.Add(nodeGroupDecoy); if (primaryPrecursor == null) { chargeToPrecursor[TransitionGroup.MAX_PRECURSOR_CHARGE + transGroup.PrecursorAdduct.AdductCharge] = // Allow for negative charges new Tuple<int, TransitionGroupDocNode>(precursorMassShift, nodeGroupDecoy); } } return decoyNodeTranGroupList; } private static TransitionDocNode[] GetDecoyTransitions(TransitionGroupDocNode nodeGroup, TransitionGroup decoyGroup, bool shiftMass, Random randomShift) { var decoyNodeTranList = new List<TransitionDocNode>(); foreach (var nodeTran in nodeGroup.Transitions) { var transition = nodeTran.Transition; int productMassShift = 0; if (shiftMass) productMassShift = GetProductMassShift(randomShift); else if (transition.IsPrecursor() && decoyGroup.DecoyMassShift.HasValue) productMassShift = decoyGroup.DecoyMassShift.Value; var decoyTransition = new Transition(decoyGroup, transition.IonType, transition.CleavageOffset, transition.MassIndex, transition.Adduct, productMassShift, transition.CustomIon); decoyNodeTranList.Add(new TransitionDocNode(decoyTransition, nodeTran.Losses, nodeTran.MzMassType.IsAverage() ? TypedMass.ZERO_AVERAGE_MASSH : TypedMass.ZERO_MONO_MASSH, nodeTran.QuantInfo, nodeTran.ExplicitValues)); } return decoyNodeTranList.ToArray(); } private const int RANDOM_SEED = 7*7*7*7*7; // 7^5 recommended by Brian S. private static int GetPrecursorMassShift(Random random) { // Do not allow zero for the mass shift of the precursor int massShift = random.Next(TransitionGroup.MIN_PRECURSOR_DECOY_MASS_SHIFT, TransitionGroup.MAX_PRECURSOR_DECOY_MASS_SHIFT); return massShift < 0 ? massShift : massShift + 1; } private static int GetProductMassShift(Random random) { int massShift = random.Next(Transition.MIN_PRODUCT_DECOY_MASS_SHIFT, Transition.MAX_PRODUCT_DECOY_MASS_SHIFT); // TODO: Validation code (at least 5 from the precursor) return massShift < 0 ? massShift : massShift + 1; } private static TypedExplicitModifications GetStaticTypedMods(Peptide peptide, IList<ExplicitMod> staticMods) { return staticMods != null ? new TypedExplicitModifications(peptide, IsotopeLabelType.light, staticMods) : null; } private static SequenceMods GetReversedPeptideSequence(SequenceMods seqMods) { string sequence = seqMods.Sequence; char finalA = sequence.Last(); sequence = sequence.Substring(0, sequence.Length - 1); int lenSeq = sequence.Length; char[] reversedArray = sequence.ToCharArray(); Array.Reverse(reversedArray); seqMods.Sequence = new string(reversedArray) + finalA; if (seqMods.Mods != null) { var reversedStaticMods = GetReversedMods(seqMods.Mods.StaticModifications, lenSeq); var typedStaticMods = GetStaticTypedMods(seqMods.Peptide, reversedStaticMods); seqMods.Mods = new ExplicitMods(seqMods.Peptide, reversedStaticMods, GetReversedHeavyMods(seqMods, typedStaticMods, lenSeq), seqMods.Mods.IsVariableStaticMods); } return seqMods; } private static IList<ExplicitMod> GetReversedMods(IEnumerable<ExplicitMod> mods, int lenSeq) { return GetRearrangedMods(mods, lenSeq, i => lenSeq - i - 1); } private static IEnumerable<TypedExplicitModifications> GetReversedHeavyMods(SequenceMods seqMods, TypedExplicitModifications typedStaticMods, int lenSeq) { var reversedHeavyMods = seqMods.Mods.GetHeavyModifications().Select(typedMod => new TypedExplicitModifications(seqMods.Peptide, typedMod.LabelType, GetReversedMods(typedMod.Modifications, lenSeq))); foreach (var typedMods in reversedHeavyMods) { yield return typedMods.AddModMasses(typedStaticMods); } } private static SequenceMods GetShuffledPeptideSequence(SequenceMods seqMods, Random random) { string sequence = seqMods.Sequence; char finalA = sequence.Last(); string sequencePrefix = sequence.Substring(0, sequence.Length - 1); int lenPrefix = sequencePrefix.Length; // Calculate a random shuffling of the current positions int[] newIndices = new int[lenPrefix]; do { for (int i = 0; i < lenPrefix; i++) newIndices[i] = i; for (int i = 0; i < lenPrefix; i++) Helpers.Swap(ref newIndices[random.Next(newIndices.Length)], ref newIndices[random.Next(newIndices.Length)]); // Move the amino acids to their new positions char[] shuffledArray = new char[lenPrefix]; for (int i = 0; i < lenPrefix; i++) shuffledArray[newIndices[i]] = sequencePrefix[i]; seqMods.Sequence = new string(shuffledArray) + finalA; } // Make sure random shuffling did not just result in the same sequence while (seqMods.Sequence.Equals(sequence)); if (seqMods.Mods != null) { var shuffledStaticMods = GetShuffledMods(seqMods.Mods.StaticModifications, lenPrefix, newIndices); var typedStaticMods = GetStaticTypedMods(seqMods.Peptide, shuffledStaticMods); seqMods.Mods = new ExplicitMods(seqMods.Peptide, shuffledStaticMods, GetShuffledHeavyMods(seqMods, typedStaticMods, lenPrefix, newIndices), seqMods.Mods.IsVariableStaticMods); } return seqMods; } private static IList<ExplicitMod> GetShuffledMods(IEnumerable<ExplicitMod> mods, int lenSeq, int[] newIndices) { return GetRearrangedMods(mods, lenSeq, i => newIndices[i]); } private static IEnumerable<TypedExplicitModifications> GetShuffledHeavyMods(SequenceMods seqMods, TypedExplicitModifications typedStaticMods, int lenSeq, int[] newIndices) { var shuffledHeavyMods = seqMods.Mods.GetHeavyModifications().Select(typedMod => new TypedExplicitModifications(seqMods.Peptide, typedMod.LabelType, GetShuffledMods(typedMod.Modifications, lenSeq, newIndices))); foreach (var typedMods in shuffledHeavyMods) { yield return typedMods.AddModMasses(typedStaticMods); } } private static IList<ExplicitMod> GetRearrangedMods(IEnumerable<ExplicitMod> mods, int lenSeq, Func<int, int> getNewIndex) { if (null == mods) { return null; } var arrayMods = mods.ToArray(); for (int i = 0; i < arrayMods.Length; i++) { var mod = arrayMods[i]; if (mod.IndexAA < lenSeq) arrayMods[i] = new ExplicitMod(getNewIndex(mod.IndexAA), mod.Modification); } Array.Sort(arrayMods, (mod1, mod2) => Comparer.Default.Compare(mod1.IndexAA, mod2.IndexAA)); return arrayMods; } private sealed class PepAreaSortInfo { private readonly PeptideDocNode _nodePep; // private readonly Adduct _bestCharge; public PepAreaSortInfo(PeptideDocNode nodePep, ICollection<IsotopeLabelType> internalStandardTypes, int bestResultIndex, int index) { _nodePep = nodePep; // Get transition group areas by charge state var chargeGroups = from nodeGroup in nodePep.TransitionGroups where !internalStandardTypes.Contains(nodeGroup.TransitionGroup.LabelType) group nodeGroup by nodeGroup.TransitionGroup.PrecursorAdduct into g select new {Charge = g.Key, Area = g.Sum(ng => ng.GetPeakArea(bestResultIndex))}; // Store the best charge state and its area var bestChargeGroup = chargeGroups.OrderBy(cg => cg.Area).First(); // _bestCharge = bestChargeGroup.Charge; Area = bestChargeGroup.Area ?? 0; Index = index; } public float Area { get; private set; } public int Index { get; private set; } public int Rank { get; set; } public PeptideDocNode Peptide { get { return _nodePep; } } } private sealed class AreaSortInfo { public AreaSortInfo(float area, int ordinal, bool abovePrecursorMz, int index) { Area = area; Ordinal = ordinal; AbovePrecusorMz = abovePrecursorMz; Index = index; } public float Area { get; private set; } public int Ordinal { get; private set; } public bool AbovePrecusorMz { get; private set; } public int Index { get; private set; } } private sealed class RefinementIdentity { public RefinementIdentity(CustomMolecule customMolecule) { CustomMolecule = customMolecule; } public RefinementIdentity(string sequence) { Sequence = sequence; } public RefinementIdentity(Target id) { CustomMolecule = id.IsProteomic ? null : id.Molecule; Sequence = id.IsProteomic ? id.Sequence : null; } private CustomMolecule CustomMolecule { get; set; } private string Sequence { get; set; } public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; if (ReferenceEquals(this, obj)) return true; if (!(obj is RefinementIdentity)) return false; return Equals((RefinementIdentity) obj); } public override int GetHashCode() { int result = Sequence != null ? Sequence.GetHashCode() : 0; result = (result*397) ^ (CustomMolecule != null ? CustomMolecule.GetHashCode() : 0); return result; } private bool Equals(RefinementIdentity identity) { return Equals(identity.Sequence, Sequence) && Equals(identity.CustomMolecule, CustomMolecule); } public override string ToString() { return CustomMolecule != null ? CustomMolecule.ToString() : Sequence; } } } }
1
12,967
Is there a benefit to keeping this in RefinementSettings? Or could we move it into its own class in TestUtil?
ProteoWizard-pwiz
.cs
@@ -6,14 +6,10 @@ import ( // Query represents an active query. type Query interface { - // Spec returns the spec used to execute this query. - // Spec must not be modified. - Spec() *Spec - - // Ready returns a channel that will deliver the query results. + // Results returns a channel that will deliver the query results. // Its possible that the channel is closed before any results arrive, // in which case the query should be inspected for an error using Err(). - Ready() <-chan map[string]Result + Results() <-chan Result // Done must always be called to free resources. It is safe to call Done // multiple times.
1
package flux import ( "time" ) // Query represents an active query. type Query interface { // Spec returns the spec used to execute this query. // Spec must not be modified. Spec() *Spec // Ready returns a channel that will deliver the query results. // Its possible that the channel is closed before any results arrive, // in which case the query should be inspected for an error using Err(). Ready() <-chan map[string]Result // Done must always be called to free resources. It is safe to call Done // multiple times. Done() // Cancel will signal that query execution should stop. // Done must still be called to free resources. // It is safe to call Cancel multiple times. Cancel() // Err reports any error the query may have encountered. Err() error // Statistics reports the statistics for the query. // The statistics are not complete until Done is called. Statistics() Statistics } type Metadata map[string][]interface{} func (md Metadata) Add(key string, value interface{}) { md[key] = append(md[key], value) } func (md Metadata) AddAll(other Metadata) { for key, values := range other { md[key] = append(md[key], values...) } } // Range will iterate over the Metadata. It will invoke the function for each // key/value pair. If there are multiple values for a single key, then this will // be called with the same key once for each value. func (md Metadata) Range(fn func(key string, value interface{}) bool) { for key, values := range md { for _, value := range values { if ok := fn(key, value); !ok { return } } } } func (md Metadata) Del(key string) { delete(md, key) } // Statistics is a collection of statistics about the processing of a query. type Statistics struct { // TotalDuration is the total amount of time in nanoseconds spent. TotalDuration time.Duration `json:"total_duration"` // CompileDuration is the amount of time in nanoseconds spent compiling the query. CompileDuration time.Duration `json:"compile_duration"` // QueueDuration is the amount of time in nanoseconds spent queueing. QueueDuration time.Duration `json:"queue_duration"` // PlanDuration is the amount of time in nanoseconds spent in plannig the query. PlanDuration time.Duration `json:"plan_duration"` // RequeueDuration is the amount of time in nanoseconds spent requeueing. RequeueDuration time.Duration `json:"requeue_duration"` // ExecuteDuration is the amount of time in nanoseconds spent in executing the query. ExecuteDuration time.Duration `json:"execute_duration"` // Concurrency is the number of goroutines allocated to process the query Concurrency int `json:"concurrency"` // MaxAllocated is the maximum number of bytes the query allocated. MaxAllocated int64 `json:"max_allocated"` // Metadata contains metadata key/value pairs that have been attached during execution. Metadata Metadata `json:"metadata"` } // Add returns the sum of s and other. func (s Statistics) Add(other Statistics) Statistics { md := make(Metadata) md.AddAll(s.Metadata) md.AddAll(other.Metadata) return Statistics{ TotalDuration: s.TotalDuration + other.TotalDuration, CompileDuration: s.CompileDuration + other.CompileDuration, QueueDuration: s.QueueDuration + other.QueueDuration, PlanDuration: s.PlanDuration + other.PlanDuration, RequeueDuration: s.RequeueDuration + other.RequeueDuration, ExecuteDuration: s.ExecuteDuration + other.ExecuteDuration, Concurrency: s.Concurrency + other.Concurrency, MaxAllocated: s.MaxAllocated + other.MaxAllocated, Metadata: md, } }
1
10,002
I would have expected the Ready method to need to change to be a `<-chan Result` instead of a `<-chan map[string]Result`. The difference being that now the Ready channel can produce more than one set of results. In fact the name Ready is inaccurate now since its not about the query being ready but just a mechanism to deliver results. If this should be a follow up PR that is fine, lets create an issue for it.
influxdata-flux
go
@@ -36,7 +36,7 @@ public interface ProjectLoader { * @return * @throws ProjectManagerException */ - public List<Project> fetchAllActiveProjects() throws ProjectManagerException; + List<Project> fetchAllActiveProjects() throws ProjectManagerException; /** * Loads whole project, including permissions, by the project id.
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.project; import java.io.File; import java.util.Collection; import java.util.List; import java.util.Map; import azkaban.flow.Flow; import azkaban.project.ProjectLogEvent.EventType; import azkaban.user.Permission; import azkaban.user.User; import azkaban.utils.Props; import azkaban.utils.Triple; public interface ProjectLoader { /** * Returns all projects which are active * * @return * @throws ProjectManagerException */ public List<Project> fetchAllActiveProjects() throws ProjectManagerException; /** * Loads whole project, including permissions, by the project id. * * @param id * @return * @throws ProjectManagerException */ public Project fetchProjectById(int id) throws ProjectManagerException; /** * Loads whole project, including permissions, by the project name. * @param name * @return * @throws ProjectManagerException */ public Project fetchProjectByName(String name) throws ProjectManagerException; /** * Should create an empty project with the given name and user and adds it to * the data store. It will auto assign a unique id for this project if * successful. * * If an active project of the same name exists, it will throw an exception. * If the name and description of the project exceeds the store's constraints, * it will throw an exception. * * @param name * @return * @throws ProjectManagerException if an active project of the same name * exists. */ public Project createNewProject(String name, String description, User creator) throws ProjectManagerException; /** * Removes the project by marking it inactive. * * @param project * @throws ProjectManagerException */ public void removeProject(Project project, String user) throws ProjectManagerException; /** * Adds and updates the user permissions. Does not check if the user is valid. * If the permission doesn't exist, it adds. If the permission exists, it * updates. * * @param project * @param name * @param perm * @param isGroup * @throws ProjectManagerException */ public void updatePermission(Project project, String name, Permission perm, boolean isGroup) throws ProjectManagerException; public void removePermission(Project project, String name, boolean isGroup) throws ProjectManagerException; /** * Modifies and commits the project description. * * @param project * @param description * @throws ProjectManagerException */ public void updateDescription(Project project, String description, String user) throws ProjectManagerException; /** * Stores logs for a particular project. Will soft fail rather than throw * exception. * * @param project * @param type * @param message return true if the posting was success. */ public boolean postEvent(Project project, EventType type, String user, String message); /** * Returns all the events for a project sorted * * @param project * @return */ public List<ProjectLogEvent> getProjectEvents(Project project, int num, int skip) throws ProjectManagerException; /** * Will upload the files and return the version number of the file uploaded. */ public void uploadProjectFile(Project project, int version, String filetype, String filename, File localFile, String user) throws ProjectManagerException; /** * Get file that's uploaded. * * @return */ public ProjectFileHandler getUploadedFile(Project project, int version) throws ProjectManagerException; /** * Get file that's uploaded. * * @return */ public ProjectFileHandler getUploadedFile(int projectId, int version) throws ProjectManagerException; /** * Changes and commits different project version. * * @param project * @param version * @throws ProjectManagerException */ public void changeProjectVersion(Project project, int version, String user) throws ProjectManagerException; public void updateFlow(Project project, int version, Flow flow) throws ProjectManagerException; /** * Uploads all computed flows * * @param project * @param version * @param flows * @throws ProjectManagerException */ public void uploadFlows(Project project, int version, Collection<Flow> flows) throws ProjectManagerException; /** * Upload just one flow. * * @param project * @param version * @param flow * @throws ProjectManagerException */ public void uploadFlow(Project project, int version, Flow flow) throws ProjectManagerException; /** * Fetches one particular flow. * * @param project * @param version * @param flowId * @throws ProjectManagerException */ public Flow fetchFlow(Project project, String flowId) throws ProjectManagerException; /** * Fetches all flows. * * @param project * @param version * @param flowId * @throws ProjectManagerException */ public List<Flow> fetchAllProjectFlows(Project project) throws ProjectManagerException; /** * Gets the latest upload version. */ public int getLatestProjectVersion(Project project) throws ProjectManagerException; /** * Upload Project properties * * @param project * @param path * @param properties * @throws ProjectManagerException */ public void uploadProjectProperty(Project project, Props props) throws ProjectManagerException; /** * Upload Project properties. Map contains key value of path and properties * * @param project * @param path * @param properties * @throws ProjectManagerException */ public void uploadProjectProperties(Project project, List<Props> properties) throws ProjectManagerException; /** * Fetch project properties * * @param project * @param propsName * @return * @throws ProjectManagerException */ public Props fetchProjectProperty(Project project, String propsName) throws ProjectManagerException; /** * Fetch all project properties * * @param project * @return * @throws ProjectManagerException */ public Map<String, Props> fetchProjectProperties(int projectId, int version) throws ProjectManagerException; /** * Cleans all project versions less tha * * @param projectId * @param version * @throws ProjectManagerException */ public void cleanOlderProjectVersion(int projectId, int version) throws ProjectManagerException; public void updateProjectProperty(Project project, Props props) throws ProjectManagerException; Props fetchProjectProperty(int projectId, int projectVer, String propsName) throws ProjectManagerException; List<Triple<String, Boolean, Permission>> getProjectPermissions(int projectId) throws ProjectManagerException; void updateProjectSettings(Project project) throws ProjectManagerException; }
1
12,796
why? what if we want to access those methods outside of azkaban-common?
azkaban-azkaban
java
@@ -45,9 +45,9 @@ namespace Nethermind.Merge.Plugin.Test { private async Task<MergeTestBlockchain> CreateBlockChain() => await new MergeTestBlockchain(new ManualTimestamper()).Build(new SingleReleaseSpecProvider(Berlin.Instance, 1)); - private IConsensusRpcModule CreateConsensusModule(MergeTestBlockchain chain) + private IEngineRpcModule CreateConsensusModule(MergeTestBlockchain chain) { - return new ConsensusRpcModule( + return new EngineRpcModule( new AssembleBlockHandler(chain.BlockTree, chain.BlockProductionTrigger, chain.Timestamper, chain.LogManager), new NewBlockHandler(chain.BlockTree, chain.BlockPreprocessorStep, chain.BlockchainProcessor, chain.State, new InitConfig(), chain.LogManager), new SetHeadBlockHandler(chain.BlockTree, chain.State, chain.LogManager),
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. // using System.Threading.Tasks; using Nethermind.Api; using Nethermind.Api; using Nethermind.Blockchain; using Nethermind.Blockchain.Comparers; using Nethermind.Blockchain.Processing; using Nethermind.Blockchain.Producers; using Nethermind.Blockchain.Rewards; using Nethermind.Blockchain.Validators; using Nethermind.Consensus; using Nethermind.Core; using Nethermind.Core.Specs; using Nethermind.Core.Test; using Nethermind.Core.Test.Blockchain; using Nethermind.Core.Test.Builders; using Nethermind.Crypto; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.Merge.Plugin.Handlers; using Nethermind.Runner.Ethereum; using Nethermind.Specs; using Nethermind.Specs.Forks; using Nethermind.State; namespace Nethermind.Merge.Plugin.Test { public partial class ConsensusModuleTests { private async Task<MergeTestBlockchain> CreateBlockChain() => await new MergeTestBlockchain(new ManualTimestamper()).Build(new SingleReleaseSpecProvider(Berlin.Instance, 1)); private IConsensusRpcModule CreateConsensusModule(MergeTestBlockchain chain) { return new ConsensusRpcModule( new AssembleBlockHandler(chain.BlockTree, chain.BlockProductionTrigger, chain.Timestamper, chain.LogManager), new NewBlockHandler(chain.BlockTree, chain.BlockPreprocessorStep, chain.BlockchainProcessor, chain.State, new InitConfig(), chain.LogManager), new SetHeadBlockHandler(chain.BlockTree, chain.State, chain.LogManager), new FinaliseBlockHandler(chain.BlockFinder, chain.BlockFinalizationManager, chain.LogManager), chain.LogManager); } private class MergeTestBlockchain : TestBlockchain { public MergeTestBlockchain(ManualTimestamper timestamper) { Timestamper = timestamper; GenesisBlockBuilder = Core.Test.Builders.Build.A.Block.Genesis.Genesis .WithTimestamp(UInt256.One); Signer = new Eth2Signer(MinerAddress); } protected override Task AddBlocksOnStart() => Task.CompletedTask; public override ILogManager LogManager { get; } = new NUnitLogManager(); private IBlockValidator BlockValidator { get; set; } = null!; private ISigner Signer { get; } protected override IBlockProducer CreateTestBlockProducer(TxPoolTxSource txPoolTxSource, ISealer sealer, ITransactionComparerProvider transactionComparerProvider) { MiningConfig miningConfig = new(); TargetAdjustedGasLimitCalculator targetAdjustedGasLimitCalculator = new(SpecProvider, miningConfig); BlockProducerEnvFactory blockProducerEnvFactory = new( DbProvider, BlockTree, ReadOnlyTrieStore, SpecProvider, BlockValidator, NoBlockRewards.Instance, ReceiptStorage, BlockPreprocessorStep, TxPool, transactionComparerProvider, miningConfig, LogManager); return new Eth2TestBlockProducerFactory(targetAdjustedGasLimitCalculator).Create( blockProducerEnvFactory, BlockTree, BlockProductionTrigger, SpecProvider, Signer, Timestamper, miningConfig, LogManager); } protected override BlockProcessor CreateBlockProcessor() { BlockValidator = CreateBlockValidator(); return new BlockProcessor( SpecProvider, BlockValidator, NoBlockRewards.Instance, new BlockProcessor.BlockValidationTransactionsExecutor(TxProcessor, State), State, Storage, ReceiptStorage, NullWitnessCollector.Instance, LogManager); } private IBlockValidator CreateBlockValidator() { HeaderValidator headerValidator = new(BlockTree, new Eth2SealEngine(Signer), SpecProvider, LogManager); return new BlockValidator( new TxValidator(SpecProvider.ChainId), headerValidator, Always.Valid, SpecProvider, LogManager); } public Address MinerAddress => TestItem.PrivateKeyA.Address; public IManualBlockFinalizationManager BlockFinalizationManager { get; } = new ManualBlockFinalizationManager(); protected override async Task<TestBlockchain> Build(ISpecProvider? specProvider = null, UInt256? initialValues = null) { TestBlockchain chain = await base.Build(specProvider, initialValues); await chain.BlockchainProcessor.StopAsync(true); Suggester.Dispose(); return chain; } public async Task<MergeTestBlockchain> Build(ISpecProvider? specProvider = null) => (MergeTestBlockchain) await Build(specProvider, null); } } }
1
26,113
Rename file too
NethermindEth-nethermind
.cs
@@ -27,7 +27,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed decoding EC2 Query response", err), + r.HTTPResponse.StatusCode, + "", + ) return } }
1
package ec2query //go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go import ( "encoding/xml" "io" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) // UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal} // UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta} // UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError} // Unmarshal unmarshals a response body for the EC2 protocol. func Unmarshal(r *request.Request) { defer r.HTTPResponse.Body.Close() if r.DataFilled() { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) return } } } // UnmarshalMeta unmarshals response headers for the EC2 protocol. func UnmarshalMeta(r *request.Request) { // TODO implement unmarshaling of request IDs } type xmlErrorResponse struct { XMLName xml.Name `xml:"Response"` Code string `xml:"Errors>Error>Code"` Message string `xml:"Errors>Error>Message"` RequestID string `xml:"RequestID"` } // UnmarshalError unmarshals a response error for the EC2 protocol. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() resp := &xmlErrorResponse{} err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) if err != nil && err != io.EOF { r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) } else { r.Error = awserr.NewRequestFailure( awserr.New(resp.Code, resp.Message, nil), r.HTTPResponse.StatusCode, resp.RequestID, ) } }
1
9,367
Need to populate the `RequestID` field for these errors. This can be obtained from r.RequestID i think, but need to make sure. The `r.RequestID` should of been populated from the `UnmarshalMeta` handler list.
aws-aws-sdk-go
go
@@ -35,8 +35,8 @@ public class LongRunningConfigTest { private static final String GAPIC_CONFIG_METADATA_TYPE = "HeaderType"; private static final String ANNOTATIONS_RETURN_TYPE_NAME = "BookType"; private static final String ANNOTATIONS_METADATA_TYPE = "FooterType"; - private static final boolean TEST_IMPLEMENTS_DELETE = false; - private static final boolean TEST_IMPLEMENTS_CANCEL = false; + private static boolean TEST_IMPLEMENTS_DELETE = false; + private static boolean TEST_IMPLEMENTS_CANCEL = false; private static int TEST_INITIAL_POLL_DELAY = 5; private static double TEST_POLL_DELAY_MULTIPLIER = 10; private static long TEST_MAX_POLL_DELAY = 12500;
1
/* Copyright 2018 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.config; import static com.google.common.truth.Truth.assertThat; import com.google.api.codegen.LongRunningConfigProto; import com.google.api.codegen.util.ProtoParser; import com.google.api.tools.framework.model.BoundedDiagCollector; import com.google.api.tools.framework.model.DiagCollector; import com.google.api.tools.framework.model.MessageType; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.Model; import com.google.api.tools.framework.model.SymbolTable; import com.google.api.tools.framework.model.TypeRef; import com.google.longrunning.OperationTypes; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; public class LongRunningConfigTest { private static final String GAPIC_CONFIG_RETURN_TYPE_NAME = "MethodResponse"; private static final String GAPIC_CONFIG_METADATA_TYPE = "HeaderType"; private static final String ANNOTATIONS_RETURN_TYPE_NAME = "BookType"; private static final String ANNOTATIONS_METADATA_TYPE = "FooterType"; private static final boolean TEST_IMPLEMENTS_DELETE = false; private static final boolean TEST_IMPLEMENTS_CANCEL = false; private static int TEST_INITIAL_POLL_DELAY = 5; private static double TEST_POLL_DELAY_MULTIPLIER = 10; private static long TEST_MAX_POLL_DELAY = 12500; private static int TEST_TOTAL_POLL_TIMEOUT = 50000; private static final ProtoParser protoParser = Mockito.mock(ProtoParser.class); private static final Method simpleMethod = Mockito.mock(Method.class); private static final Method lroAnnotatedMethod = Mockito.mock(Method.class); private static final Model model = Mockito.mock(Model.class); private static final SymbolTable symbolTable = Mockito.mock(SymbolTable.class); private static final MessageType gapicConfigMetadataMessage = Mockito.mock(MessageType.class); private static final MessageType gapicConfigReturnMessage = Mockito.mock(MessageType.class); private static final MessageType annotationsMetadataMessage = Mockito.mock(MessageType.class); private static final MessageType annotationsReturnMessage = Mockito.mock(MessageType.class); private static final TypeRef gapicConfigMetadataType = TypeRef.of(gapicConfigMetadataMessage); private static final TypeRef gapicConfigReturnType = TypeRef.of(gapicConfigReturnMessage); private static final TypeRef annotationsMetadataType = TypeRef.of(annotationsMetadataMessage); private static final TypeRef annotationsReturnType = TypeRef.of(annotationsReturnMessage); private static final LongRunningConfigProto baseLroConfigProto = LongRunningConfigProto.newBuilder() .setMetadataType(GAPIC_CONFIG_METADATA_TYPE) .setReturnType(GAPIC_CONFIG_RETURN_TYPE_NAME) .build(); private static final LongRunningConfigProto lroConfigProtoWithPollSettings = baseLroConfigProto .toBuilder() .setImplementsCancel(TEST_IMPLEMENTS_CANCEL) .setImplementsDelete(TEST_IMPLEMENTS_DELETE) .setInitialPollDelayMillis(TEST_INITIAL_POLL_DELAY) .setPollDelayMultiplier(TEST_POLL_DELAY_MULTIPLIER) .setMaxPollDelayMillis(TEST_MAX_POLL_DELAY) .setTotalPollTimeoutMillis(TEST_TOTAL_POLL_TIMEOUT) .build(); @BeforeClass public static void startUp() { Mockito.when(simpleMethod.getModel()).thenReturn(model); Mockito.when(lroAnnotatedMethod.getModel()).thenReturn(model); Mockito.when(model.getSymbolTable()).thenReturn(symbolTable); Mockito.when(protoParser.getLongRunningOperation(lroAnnotatedMethod)) .thenReturn( OperationTypes.newBuilder() .setMetadata(ANNOTATIONS_METADATA_TYPE) .setResponse(ANNOTATIONS_RETURN_TYPE_NAME) .build()); Mockito.when(symbolTable.lookupType(GAPIC_CONFIG_METADATA_TYPE)) .thenReturn(gapicConfigMetadataType); Mockito.when(symbolTable.lookupType(GAPIC_CONFIG_RETURN_TYPE_NAME)) .thenReturn(gapicConfigReturnType); Mockito.when(symbolTable.lookupType(ANNOTATIONS_METADATA_TYPE)) .thenReturn(annotationsMetadataType); Mockito.when(symbolTable.lookupType(ANNOTATIONS_RETURN_TYPE_NAME)) .thenReturn(annotationsReturnType); } @Test public void testCreateLROWithoutGapicConfig() { DiagCollector diagCollector = new BoundedDiagCollector(); LongRunningConfig longRunningConfig = LongRunningConfig.createLongRunningConfig( lroAnnotatedMethod, diagCollector, LongRunningConfigProto.getDefaultInstance(), protoParser); assertThat(diagCollector.getErrorCount()).isEqualTo(0); assertThat(longRunningConfig).isNotNull(); ProtoTypeRef metadataTypeModel = (ProtoTypeRef) longRunningConfig.getMetadataType(); assertThat(metadataTypeModel.getProtoType()).isEqualTo(annotationsMetadataType); ProtoTypeRef returnTypeModel = (ProtoTypeRef) longRunningConfig.getReturnType(); assertThat(returnTypeModel.getProtoType()).isEqualTo(annotationsReturnType); assertThat(longRunningConfig.getInitialPollDelay().toMillis()) .isEqualTo(LongRunningConfig.LRO_INITIAL_POLL_DELAY_MILLIS); assertThat(longRunningConfig.getMaxPollDelay().toMillis()) .isEqualTo(LongRunningConfig.LRO_MAX_POLL_DELAY_MILLIS); assertThat(longRunningConfig.getPollDelayMultiplier()) .isEqualTo(LongRunningConfig.LRO_POLL_DELAY_MULTIPLIER); assertThat(longRunningConfig.getTotalPollTimeout().toMillis()) .isEqualTo(LongRunningConfig.LRO_TOTAL_POLL_TIMEOUT_MILLS); assertThat(longRunningConfig.implementsCancel()) .isEqualTo(LongRunningConfig.LRO_IMPLEMENTS_CANCEL); assertThat(longRunningConfig.implementsDelete()) .isEqualTo(LongRunningConfig.LRO_IMPLEMENTS_DELETE); } @Test public void testCreateLROWithGapicConfigOnly() { DiagCollector diagCollector = new BoundedDiagCollector(); // simpleMethod has no LRO proto annotations. // lroConfigProtoWithPollSettings contains LRO settings. LongRunningConfig longRunningConfig = LongRunningConfig.createLongRunningConfig( simpleMethod, diagCollector, lroConfigProtoWithPollSettings, protoParser); assertThat(diagCollector.getErrorCount()).isEqualTo(0); assertThat(longRunningConfig).isNotNull(); ProtoTypeRef metadataTypeModel = (ProtoTypeRef) longRunningConfig.getMetadataType(); assertThat(metadataTypeModel.getProtoType()).isEqualTo(gapicConfigMetadataType); ProtoTypeRef returnTypeModel = (ProtoTypeRef) longRunningConfig.getReturnType(); assertThat(returnTypeModel.getProtoType()).isEqualTo(gapicConfigReturnType); // These are the values specified by lroConfigProtoWithPollSettings. assertThat(longRunningConfig.getInitialPollDelay().toMillis()) .isEqualTo(TEST_INITIAL_POLL_DELAY); assertThat(longRunningConfig.getMaxPollDelay().toMillis()).isEqualTo(TEST_MAX_POLL_DELAY); assertThat(longRunningConfig.getPollDelayMultiplier()).isEqualTo(TEST_POLL_DELAY_MULTIPLIER); assertThat(longRunningConfig.getTotalPollTimeout().toMillis()) .isEqualTo(TEST_TOTAL_POLL_TIMEOUT); assertThat(longRunningConfig.implementsCancel()).isEqualTo(TEST_IMPLEMENTS_CANCEL); assertThat(longRunningConfig.implementsDelete()).isEqualTo(TEST_IMPLEMENTS_DELETE); } @Test public void testCreateLROWithAnnotationsOverridingGapicConfig() { DiagCollector diagCollector = new BoundedDiagCollector(); // lroAnnotatedMethod contains different settings than that in lroConfigProtoWithPollSettings. LongRunningConfig longRunningConfig = LongRunningConfig.createLongRunningConfig( lroAnnotatedMethod, diagCollector, lroConfigProtoWithPollSettings, protoParser); assertThat(diagCollector.getErrorCount()).isEqualTo(0); assertThat(longRunningConfig).isNotNull(); // Assert that proto annotations settings take precendence over gapic config. ProtoTypeRef metadataTypeModel = (ProtoTypeRef) longRunningConfig.getMetadataType(); assertThat(metadataTypeModel.getProtoType()).isEqualTo(annotationsMetadataType); ProtoTypeRef returnTypeModel = (ProtoTypeRef) longRunningConfig.getReturnType(); assertThat(returnTypeModel.getProtoType()).isEqualTo(annotationsReturnType); assertThat(longRunningConfig.getInitialPollDelay().toMillis()) .isEqualTo(LongRunningConfig.LRO_INITIAL_POLL_DELAY_MILLIS); assertThat(longRunningConfig.getMaxPollDelay().toMillis()) .isEqualTo(LongRunningConfig.LRO_MAX_POLL_DELAY_MILLIS); assertThat(longRunningConfig.getPollDelayMultiplier()) .isEqualTo(LongRunningConfig.LRO_POLL_DELAY_MULTIPLIER); assertThat(longRunningConfig.getTotalPollTimeout().toMillis()) .isEqualTo(LongRunningConfig.LRO_TOTAL_POLL_TIMEOUT_MILLS); assertThat(longRunningConfig.implementsCancel()) .isEqualTo(LongRunningConfig.LRO_IMPLEMENTS_CANCEL); assertThat(longRunningConfig.implementsDelete()) .isEqualTo(LongRunningConfig.LRO_IMPLEMENTS_DELETE); } @Test public void testCreateLROWithNonLROMethod() { DiagCollector diagCollector = new BoundedDiagCollector(); LongRunningConfig longRunningConfig = LongRunningConfig.createLongRunningConfig( simpleMethod, diagCollector, LongRunningConfigProto.getDefaultInstance(), protoParser); assertThat(diagCollector.getErrorCount()).isEqualTo(0); assertThat(longRunningConfig).isNull(); } }
1
27,325
Why is the `final` being removed here?
googleapis-gapic-generator
java
@@ -1,13 +1,14 @@ // // Bismillah ar-Rahmaan ar-Raheem // -// Easylogging++ v9.95.0 +// Easylogging++ v9.96.4 // Cross-platform logging library for C++ applications // -// Copyright (c) 2017 muflihun.com +// Copyright (c) 2012-2018 Muflihun Labs +// Copyright (c) 2012-2018 @abumusamq // // This library is released under the MIT Licence. -// http://labs.muflihun.com/easyloggingpp/licence.php +// https://github.com/muflihun/easyloggingpp/blob/master/LICENSE // // https://github.com/muflihun/easyloggingpp // https://muflihun.github.io/easyloggingpp
1
// // Bismillah ar-Rahmaan ar-Raheem // // Easylogging++ v9.95.0 // Cross-platform logging library for C++ applications // // Copyright (c) 2017 muflihun.com // // This library is released under the MIT Licence. // http://labs.muflihun.com/easyloggingpp/licence.php // // https://github.com/muflihun/easyloggingpp // https://muflihun.github.io/easyloggingpp // http://muflihun.com // // Changed by SDF //#include "easylogging++.h" #include "util/Logging.h" #if defined(AUTO_INITIALIZE_EASYLOGGINGPP) INITIALIZE_EASYLOGGINGPP #endif namespace el { // el::base::utils namespace base { namespace utils { /// @brief Aborts application due with user-defined status static void abort(int status, const std::string& reason) { // Both status and reason params are there for debugging with tools like gdb etc ELPP_UNUSED(status); ELPP_UNUSED(reason); #if defined(ELPP_COMPILER_MSVC) && defined(_M_IX86) && defined(_DEBUG) // Ignore msvc critical error dialog - break instead (on debug mode) _asm int 3 #else ::abort(); #endif // defined(ELPP_COMPILER_MSVC) && defined(_M_IX86) && defined(_DEBUG) } } // namespace utils } // namespace base // el // LevelHelper const char* LevelHelper::convertToString(Level level) { // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. if (level == Level::Global) return "GLOBAL"; if (level == Level::Debug) return "DEBUG"; if (level == Level::Info) return "INFO"; if (level == Level::Warning) return "WARNING"; if (level == Level::Error) return "ERROR"; if (level == Level::Fatal) return "FATAL"; if (level == Level::Verbose) return "VERBOSE"; if (level == Level::Trace) return "TRACE"; return "UNKNOWN"; } struct StringToLevelItem { const char* levelString; Level level; }; static struct StringToLevelItem stringToLevelMap[] = { { "global", Level::Global }, { "debug", Level::Debug }, { "info", Level::Info }, { "warning", Level::Warning }, { "error", Level::Error }, { "fatal", Level::Fatal }, { "verbose", Level::Verbose }, { "trace", Level::Trace } }; Level LevelHelper::convertFromString(const char* levelStr) { for (auto& item : stringToLevelMap) { if (base::utils::Str::cStringCaseEq(levelStr, item.levelString)) { return item.level; } } return Level::Unknown; } void LevelHelper::forEachLevel(base::type::EnumType* startIndex, const std::function<bool(void)>& fn) { base::type::EnumType lIndexMax = LevelHelper::kMaxValid; do { if (fn()) { break; } *startIndex = static_cast<base::type::EnumType>(*startIndex << 1); } while (*startIndex <= lIndexMax); } // ConfigurationTypeHelper const char* ConfigurationTypeHelper::convertToString(ConfigurationType configurationType) { // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. if (configurationType == ConfigurationType::Enabled) return "ENABLED"; if (configurationType == ConfigurationType::Filename) return "FILENAME"; if (configurationType == ConfigurationType::Format) return "FORMAT"; if (configurationType == ConfigurationType::ToFile) return "TO_FILE"; if (configurationType == ConfigurationType::ToStandardOutput) return "TO_STANDARD_OUTPUT"; if (configurationType == ConfigurationType::SubsecondPrecision) return "SUBSECOND_PRECISION"; if (configurationType == ConfigurationType::PerformanceTracking) return "PERFORMANCE_TRACKING"; if (configurationType == ConfigurationType::MaxLogFileSize) return "MAX_LOG_FILE_SIZE"; if (configurationType == ConfigurationType::LogFlushThreshold) return "LOG_FLUSH_THRESHOLD"; return "UNKNOWN"; } struct ConfigurationStringToTypeItem { const char* configString; ConfigurationType configType; }; static struct ConfigurationStringToTypeItem configStringToTypeMap[] = { { "enabled", ConfigurationType::Enabled }, { "to_file", ConfigurationType::ToFile }, { "to_standard_output", ConfigurationType::ToStandardOutput }, { "format", ConfigurationType::Format }, { "filename", ConfigurationType::Filename }, { "subsecond_precision", ConfigurationType::SubsecondPrecision }, { "milliseconds_width", ConfigurationType::MillisecondsWidth }, { "performance_tracking", ConfigurationType::PerformanceTracking }, { "max_log_file_size", ConfigurationType::MaxLogFileSize }, { "log_flush_threshold", ConfigurationType::LogFlushThreshold }, }; ConfigurationType ConfigurationTypeHelper::convertFromString(const char* configStr) { for (auto& item : configStringToTypeMap) { if (base::utils::Str::cStringCaseEq(configStr, item.configString)) { return item.configType; } } return ConfigurationType::Unknown; } void ConfigurationTypeHelper::forEachConfigType(base::type::EnumType* startIndex, const std::function<bool(void)>& fn) { base::type::EnumType cIndexMax = ConfigurationTypeHelper::kMaxValid; do { if (fn()) { break; } *startIndex = static_cast<base::type::EnumType>(*startIndex << 1); } while (*startIndex <= cIndexMax); } // Configuration Configuration::Configuration(const Configuration& c) : m_level(c.m_level), m_configurationType(c.m_configurationType), m_value(c.m_value) { } Configuration& Configuration::operator=(const Configuration& c) { if (&c != this) { m_level = c.m_level; m_configurationType = c.m_configurationType; m_value = c.m_value; } return *this; } /// @brief Full constructor used to sets value of configuration Configuration::Configuration(Level level, ConfigurationType configurationType, const std::string& value) : m_level(level), m_configurationType(configurationType), m_value(value) { } void Configuration::log(el::base::type::ostream_t& os) const { os << LevelHelper::convertToString(m_level) << ELPP_LITERAL(" ") << ConfigurationTypeHelper::convertToString(m_configurationType) << ELPP_LITERAL(" = ") << m_value.c_str(); } /// @brief Used to find configuration from configuration (pointers) repository. Avoid using it. Configuration::Predicate::Predicate(Level level, ConfigurationType configurationType) : m_level(level), m_configurationType(configurationType) { } bool Configuration::Predicate::operator()(const Configuration* conf) const { return ((conf != nullptr) && (conf->level() == m_level) && (conf->configurationType() == m_configurationType)); } // Configurations Configurations::Configurations(void) : m_configurationFile(std::string()), m_isFromFile(false) { } Configurations::Configurations(const std::string& configurationFile, bool useDefaultsForRemaining, Configurations* base) : m_configurationFile(configurationFile), m_isFromFile(false) { parseFromFile(configurationFile, base); if (useDefaultsForRemaining) { setRemainingToDefault(); } } bool Configurations::parseFromFile(const std::string& configurationFile, Configurations* base) { // We initial assertion with true because if we have assertion diabled, we want to pass this // check and if assertion is enabled we will have values re-assigned any way. bool assertionPassed = true; ELPP_ASSERT((assertionPassed = base::utils::File::pathExists(configurationFile.c_str(), true)) == true, "Configuration file [" << configurationFile << "] does not exist!"); if (!assertionPassed) { return false; } bool success = Parser::parseFromFile(configurationFile, this, base); m_isFromFile = success; return success; } bool Configurations::parseFromText(const std::string& configurationsString, Configurations* base) { bool success = Parser::parseFromText(configurationsString, this, base); if (success) { m_isFromFile = false; } return success; } void Configurations::setFromBase(Configurations* base) { if (base == nullptr || base == this) { return; } base::threading::ScopedLock scopedLock(base->lock()); for (Configuration*& conf : base->list()) { set(conf); } } bool Configurations::hasConfiguration(ConfigurationType configurationType) { base::type::EnumType lIndex = LevelHelper::kMinValid; bool result = false; LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { if (hasConfiguration(LevelHelper::castFromInt(lIndex), configurationType)) { result = true; } return result; }); return result; } bool Configurations::hasConfiguration(Level level, ConfigurationType configurationType) { base::threading::ScopedLock scopedLock(lock()); #if ELPP_COMPILER_INTEL // We cant specify template types here, Intel C++ throws compilation error // "error: type name is not allowed" return RegistryWithPred::get(level, configurationType) != nullptr; #else return RegistryWithPred<Configuration, Configuration::Predicate>::get(level, configurationType) != nullptr; #endif // ELPP_COMPILER_INTEL } void Configurations::set(Level level, ConfigurationType configurationType, const std::string& value) { base::threading::ScopedLock scopedLock(lock()); unsafeSet(level, configurationType, value); // This is not unsafe anymore as we have locked mutex if (level == Level::Global) { unsafeSetGlobally(configurationType, value, false); // Again this is not unsafe either } } void Configurations::set(Configuration* conf) { if (conf == nullptr) { return; } set(conf->level(), conf->configurationType(), conf->value()); } void Configurations::setToDefault(void) { setGlobally(ConfigurationType::Enabled, std::string("true"), true); #if !defined(ELPP_NO_DEFAULT_LOG_FILE) setGlobally(ConfigurationType::Filename, std::string(base::consts::kDefaultLogFile), true); #else ELPP_UNUSED(base::consts::kDefaultLogFile); #endif // !defined(ELPP_NO_DEFAULT_LOG_FILE) #if defined(ELPP_NO_LOG_TO_FILE) setGlobally(ConfigurationType::ToFile, std::string("false"), true); #else setGlobally(ConfigurationType::ToFile, std::string("true"), true); #endif // defined(ELPP_NO_LOG_TO_FILE) setGlobally(ConfigurationType::ToStandardOutput, std::string("true"), true); setGlobally(ConfigurationType::SubsecondPrecision, std::string("3"), true); setGlobally(ConfigurationType::PerformanceTracking, std::string("true"), true); setGlobally(ConfigurationType::MaxLogFileSize, std::string("0"), true); setGlobally(ConfigurationType::LogFlushThreshold, std::string("0"), true); setGlobally(ConfigurationType::Format, std::string("%datetime %level [%logger] %msg"), true); set(Level::Debug, ConfigurationType::Format, std::string("%datetime %level [%logger] [%user@%host] [%func] [%loc] %msg")); // INFO and WARNING are set to default by Level::Global set(Level::Error, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); set(Level::Fatal, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); set(Level::Verbose, ConfigurationType::Format, std::string("%datetime %level-%vlevel [%logger] %msg")); set(Level::Trace, ConfigurationType::Format, std::string("%datetime %level [%logger] [%func] [%loc] %msg")); } void Configurations::setRemainingToDefault(void) { base::threading::ScopedLock scopedLock(lock()); #if defined(ELPP_NO_LOG_TO_FILE) unsafeSetIfNotExist(Level::Global, ConfigurationType::Enabled, std::string("false")); #else unsafeSetIfNotExist(Level::Global, ConfigurationType::Enabled, std::string("true")); #endif // defined(ELPP_NO_LOG_TO_FILE) #if !defined(ELPP_NO_DEFAULT_LOG_FILE) unsafeSetIfNotExist(Level::Global, ConfigurationType::Filename, std::string(base::consts::kDefaultLogFile)); #endif // !defined(ELPP_NO_DEFAULT_LOG_FILE) unsafeSetIfNotExist(Level::Global, ConfigurationType::ToStandardOutput, std::string("true")); unsafeSetIfNotExist(Level::Global, ConfigurationType::SubsecondPrecision, std::string("3")); unsafeSetIfNotExist(Level::Global, ConfigurationType::PerformanceTracking, std::string("true")); unsafeSetIfNotExist(Level::Global, ConfigurationType::MaxLogFileSize, std::string("0")); unsafeSetIfNotExist(Level::Global, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); unsafeSetIfNotExist(Level::Debug, ConfigurationType::Format, std::string("%datetime %level [%logger] [%user@%host] [%func] [%loc] %msg")); // INFO and WARNING are set to default by Level::Global unsafeSetIfNotExist(Level::Error, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); unsafeSetIfNotExist(Level::Fatal, ConfigurationType::Format, std::string("%datetime %level [%logger] %msg")); unsafeSetIfNotExist(Level::Verbose, ConfigurationType::Format, std::string("%datetime %level-%vlevel [%logger] %msg")); unsafeSetIfNotExist(Level::Trace, ConfigurationType::Format, std::string("%datetime %level [%logger] [%func] [%loc] %msg")); } bool Configurations::Parser::parseFromFile(const std::string& configurationFile, Configurations* sender, Configurations* base) { sender->setFromBase(base); std::ifstream fileStream_(configurationFile.c_str(), std::ifstream::in); ELPP_ASSERT(fileStream_.is_open(), "Unable to open configuration file [" << configurationFile << "] for parsing."); bool parsedSuccessfully = false; std::string line = std::string(); Level currLevel = Level::Unknown; std::string currConfigStr = std::string(); std::string currLevelStr = std::string(); while (fileStream_.good()) { std::getline(fileStream_, line); parsedSuccessfully = parseLine(&line, &currConfigStr, &currLevelStr, &currLevel, sender); ELPP_ASSERT(parsedSuccessfully, "Unable to parse configuration line: " << line); } return parsedSuccessfully; } bool Configurations::Parser::parseFromText(const std::string& configurationsString, Configurations* sender, Configurations* base) { sender->setFromBase(base); bool parsedSuccessfully = false; std::stringstream ss(configurationsString); std::string line = std::string(); Level currLevel = Level::Unknown; std::string currConfigStr = std::string(); std::string currLevelStr = std::string(); while (std::getline(ss, line)) { parsedSuccessfully = parseLine(&line, &currConfigStr, &currLevelStr, &currLevel, sender); ELPP_ASSERT(parsedSuccessfully, "Unable to parse configuration line: " << line); } return parsedSuccessfully; } void Configurations::Parser::ignoreComments(std::string* line) { std::size_t foundAt = 0; std::size_t quotesStart = line->find("\""); std::size_t quotesEnd = std::string::npos; if (quotesStart != std::string::npos) { quotesEnd = line->find("\"", quotesStart + 1); while (quotesEnd != std::string::npos && line->at(quotesEnd - 1) == '\\') { // Do not erase slash yet - we will erase it in parseLine(..) while loop quotesEnd = line->find("\"", quotesEnd + 2); } } if ((foundAt = line->find(base::consts::kConfigurationComment)) != std::string::npos) { if (foundAt < quotesEnd) { foundAt = line->find(base::consts::kConfigurationComment, quotesEnd + 1); } *line = line->substr(0, foundAt); } } bool Configurations::Parser::isLevel(const std::string& line) { return base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationLevel)); } bool Configurations::Parser::isComment(const std::string& line) { return base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationComment)); } bool Configurations::Parser::isConfig(const std::string& line) { std::size_t assignment = line.find('='); return line != "" && ((line[0] >= 'A' && line[0] <= 'Z') || (line[0] >= 'a' && line[0] <= 'z')) && (assignment != std::string::npos) && (line.size() > assignment); } bool Configurations::Parser::parseLine(std::string* line, std::string* currConfigStr, std::string* currLevelStr, Level* currLevel, Configurations* conf) { ConfigurationType currConfig = ConfigurationType::Unknown; std::string currValue = std::string(); *line = base::utils::Str::trim(*line); if (isComment(*line)) return true; ignoreComments(line); *line = base::utils::Str::trim(*line); if (line->empty()) { // Comment ignored return true; } if (isLevel(*line)) { if (line->size() <= 2) { return true; } *currLevelStr = line->substr(1, line->size() - 2); *currLevelStr = base::utils::Str::toUpper(*currLevelStr); *currLevelStr = base::utils::Str::trim(*currLevelStr); *currLevel = LevelHelper::convertFromString(currLevelStr->c_str()); return true; } if (isConfig(*line)) { std::size_t assignment = line->find('='); *currConfigStr = line->substr(0, assignment); *currConfigStr = base::utils::Str::toUpper(*currConfigStr); *currConfigStr = base::utils::Str::trim(*currConfigStr); currConfig = ConfigurationTypeHelper::convertFromString(currConfigStr->c_str()); currValue = line->substr(assignment + 1); currValue = base::utils::Str::trim(currValue); std::size_t quotesStart = currValue.find("\"", 0); std::size_t quotesEnd = std::string::npos; if (quotesStart != std::string::npos) { quotesEnd = currValue.find("\"", quotesStart + 1); while (quotesEnd != std::string::npos && currValue.at(quotesEnd - 1) == '\\') { currValue = currValue.erase(quotesEnd - 1, 1); quotesEnd = currValue.find("\"", quotesEnd + 2); } } if (quotesStart != std::string::npos && quotesEnd != std::string::npos) { // Quote provided - check and strip if valid ELPP_ASSERT((quotesStart < quotesEnd), "Configuration error - No ending quote found in [" << currConfigStr << "]"); ELPP_ASSERT((quotesStart + 1 != quotesEnd), "Empty configuration value for [" << currConfigStr << "]"); if ((quotesStart != quotesEnd) && (quotesStart + 1 != quotesEnd)) { // Explicit check in case if assertion is disabled currValue = currValue.substr(quotesStart + 1, quotesEnd - 1); } } } ELPP_ASSERT(*currLevel != Level::Unknown, "Unrecognized severity level [" << *currLevelStr << "]"); ELPP_ASSERT(currConfig != ConfigurationType::Unknown, "Unrecognized configuration [" << *currConfigStr << "]"); if (*currLevel == Level::Unknown || currConfig == ConfigurationType::Unknown) { return false; // unrecognizable level or config } conf->set(*currLevel, currConfig, currValue); return true; } void Configurations::unsafeSetIfNotExist(Level level, ConfigurationType configurationType, const std::string& value) { Configuration* conf = RegistryWithPred<Configuration, Configuration::Predicate>::get(level, configurationType); if (conf == nullptr) { unsafeSet(level, configurationType, value); } } void Configurations::unsafeSet(Level level, ConfigurationType configurationType, const std::string& value) { Configuration* conf = RegistryWithPred<Configuration, Configuration::Predicate>::get(level, configurationType); if (conf == nullptr) { registerNew(new Configuration(level, configurationType, value)); } else { conf->setValue(value); } if (level == Level::Global) { unsafeSetGlobally(configurationType, value, false); } } void Configurations::setGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel) { if (includeGlobalLevel) { set(Level::Global, configurationType, value); } base::type::EnumType lIndex = LevelHelper::kMinValid; LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { set(LevelHelper::castFromInt(lIndex), configurationType, value); return false; // Do not break lambda function yet as we need to set all levels regardless }); } void Configurations::unsafeSetGlobally(ConfigurationType configurationType, const std::string& value, bool includeGlobalLevel) { if (includeGlobalLevel) { unsafeSet(Level::Global, configurationType, value); } base::type::EnumType lIndex = LevelHelper::kMinValid; LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { unsafeSet(LevelHelper::castFromInt(lIndex), configurationType, value); return false; // Do not break lambda function yet as we need to set all levels regardless }); } // LogBuilder void LogBuilder::convertToColoredOutput(base::type::string_t* logLine, Level level) { if (!m_termSupportsColor) return; const base::type::char_t* resetColor = ELPP_LITERAL("\x1b[0m"); if (level == Level::Error || level == Level::Fatal) *logLine = ELPP_LITERAL("\x1b[31m") + *logLine + resetColor; else if (level == Level::Warning) *logLine = ELPP_LITERAL("\x1b[33m") + *logLine + resetColor; else if (level == Level::Debug) *logLine = ELPP_LITERAL("\x1b[32m") + *logLine + resetColor; else if (level == Level::Info) *logLine = ELPP_LITERAL("\x1b[36m") + *logLine + resetColor; else if (level == Level::Trace) *logLine = ELPP_LITERAL("\x1b[35m") + *logLine + resetColor; } // Logger Logger::Logger(const std::string& id, base::LogStreamsReferenceMap* logStreamsReference) : m_id(id), m_typedConfigurations(nullptr), m_parentApplicationName(std::string()), m_isConfigured(false), m_logStreamsReference(logStreamsReference) { initUnflushedCount(); } Logger::Logger(const std::string& id, const Configurations& configurations, base::LogStreamsReferenceMap* logStreamsReference) : m_id(id), m_typedConfigurations(nullptr), m_parentApplicationName(std::string()), m_isConfigured(false), m_logStreamsReference(logStreamsReference) { initUnflushedCount(); configure(configurations); } Logger::Logger(const Logger& logger) { base::utils::safeDelete(m_typedConfigurations); m_id = logger.m_id; m_typedConfigurations = logger.m_typedConfigurations; m_parentApplicationName = logger.m_parentApplicationName; m_isConfigured = logger.m_isConfigured; m_configurations = logger.m_configurations; m_unflushedCount = logger.m_unflushedCount; m_logStreamsReference = logger.m_logStreamsReference; } Logger& Logger::operator=(const Logger& logger) { if (&logger != this) { base::utils::safeDelete(m_typedConfigurations); m_id = logger.m_id; m_typedConfigurations = logger.m_typedConfigurations; m_parentApplicationName = logger.m_parentApplicationName; m_isConfigured = logger.m_isConfigured; m_configurations = logger.m_configurations; m_unflushedCount = logger.m_unflushedCount; m_logStreamsReference = logger.m_logStreamsReference; } return *this; } void Logger::configure(const Configurations& configurations) { m_isConfigured = false; // we set it to false in case if we fail initUnflushedCount(); if (m_typedConfigurations != nullptr) { Configurations* c = const_cast<Configurations*>(m_typedConfigurations->configurations()); if (c->hasConfiguration(Level::Global, ConfigurationType::Filename)) { // This check is definitely needed for cases like ELPP_NO_DEFAULT_LOG_FILE flush(); } } base::threading::ScopedLock scopedLock(lock()); if (m_configurations != configurations) { m_configurations.setFromBase(const_cast<Configurations*>(&configurations)); } base::utils::safeDelete(m_typedConfigurations); m_typedConfigurations = new base::TypedConfigurations(&m_configurations, m_logStreamsReference); resolveLoggerFormatSpec(); m_isConfigured = true; } void Logger::reconfigure(void) { ELPP_INTERNAL_INFO(1, "Reconfiguring logger [" << m_id << "]"); configure(m_configurations); } bool Logger::isValidId(const std::string& id) { for (std::string::const_iterator it = id.begin(); it != id.end(); ++it) { if (!base::utils::Str::contains(base::consts::kValidLoggerIdSymbols, *it)) { return false; } } return true; } void Logger::flush(void) { ELPP_INTERNAL_INFO(3, "Flushing logger [" << m_id << "] all levels"); base::threading::ScopedLock scopedLock(lock()); base::type::EnumType lIndex = LevelHelper::kMinValid; LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { flush(LevelHelper::castFromInt(lIndex), nullptr); return false; }); } void Logger::flush(Level level, base::type::fstream_t* fs) { if (fs == nullptr && m_typedConfigurations->toFile(level)) { fs = m_typedConfigurations->fileStream(level); } if (fs != nullptr) { fs->flush(); std::map<Level, unsigned int>::iterator iter = m_unflushedCount.find(level); if (iter != m_unflushedCount.end()) { iter->second = 0; } } } void Logger::initUnflushedCount(void) { m_unflushedCount.clear(); base::type::EnumType lIndex = LevelHelper::kMinValid; LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { m_unflushedCount.insert(std::make_pair(LevelHelper::castFromInt(lIndex), 0)); return false; }); } void Logger::resolveLoggerFormatSpec(void) const { base::type::EnumType lIndex = LevelHelper::kMinValid; LevelHelper::forEachLevel(&lIndex, [&](void) -> bool { base::LogFormat* logFormat = const_cast<base::LogFormat*>(&m_typedConfigurations->logFormat(LevelHelper::castFromInt(lIndex))); base::utils::Str::replaceFirstWithEscape(logFormat->m_format, base::consts::kLoggerIdFormatSpecifier, m_id); return false; }); } // el::base namespace base { // el::base::utils namespace utils { // File base::type::fstream_t* File::newFileStream(const std::string& filename) { base::type::fstream_t *fs = new base::type::fstream_t(filename.c_str(), base::type::fstream_t::out #if !defined(ELPP_FRESH_LOG_FILE) | base::type::fstream_t::app #endif ); #if defined(ELPP_UNICODE) std::locale elppUnicodeLocale(""); # if ELPP_OS_WINDOWS std::locale elppUnicodeLocaleWindows(elppUnicodeLocale, new std::codecvt_utf8_utf16<wchar_t>); elppUnicodeLocale = elppUnicodeLocaleWindows; # endif // ELPP_OS_WINDOWS fs->imbue(elppUnicodeLocale); #endif // defined(ELPP_UNICODE) if (fs->is_open()) { fs->flush(); } else { base::utils::safeDelete(fs); ELPP_INTERNAL_ERROR("Bad file [" << filename << "]", true); } return fs; } std::size_t File::getSizeOfFile(base::type::fstream_t* fs) { if (fs == nullptr) { return 0; } std::streampos currPos = fs->tellg(); fs->seekg(0, fs->end); std::size_t size = static_cast<std::size_t>(fs->tellg()); fs->seekg(currPos); return size; } bool File::pathExists(const char* path, bool considerFile) { if (path == nullptr) { return false; } #if ELPP_OS_UNIX ELPP_UNUSED(considerFile); struct stat st; return (stat(path, &st) == 0); #elif ELPP_OS_WINDOWS DWORD fileType = GetFileAttributesA(path); if (fileType == INVALID_FILE_ATTRIBUTES) { return false; } return considerFile ? true : ((fileType & FILE_ATTRIBUTE_DIRECTORY) == 0 ? false : true); #endif // ELPP_OS_UNIX } bool File::createPath(const std::string& path) { if (path.empty()) { return false; } if (base::utils::File::pathExists(path.c_str())) { return true; } int status = -1; char* currPath = const_cast<char*>(path.c_str()); std::string builtPath = std::string(); #if ELPP_OS_UNIX if (path[0] == '/') { builtPath = "/"; } currPath = STRTOK(currPath, base::consts::kFilePathSeperator, 0); #elif ELPP_OS_WINDOWS // Use secure functions API char* nextTok_ = nullptr; currPath = STRTOK(currPath, base::consts::kFilePathSeperator, &nextTok_); ELPP_UNUSED(nextTok_); #endif // ELPP_OS_UNIX while (currPath != nullptr) { builtPath.append(currPath); builtPath.append(base::consts::kFilePathSeperator); #if ELPP_OS_UNIX status = mkdir(builtPath.c_str(), ELPP_LOG_PERMS); currPath = STRTOK(nullptr, base::consts::kFilePathSeperator, 0); #elif ELPP_OS_WINDOWS status = _mkdir(builtPath.c_str()); currPath = STRTOK(nullptr, base::consts::kFilePathSeperator, &nextTok_); #endif // ELPP_OS_UNIX } if (status == -1) { ELPP_INTERNAL_ERROR("Error while creating path [" << path << "]", true); return false; } return true; } std::string File::extractPathFromFilename(const std::string& fullPath, const char* separator) { if ((fullPath == "") || (fullPath.find(separator) == std::string::npos)) { return fullPath; } std::size_t lastSlashAt = fullPath.find_last_of(separator); if (lastSlashAt == 0) { return std::string(separator); } return fullPath.substr(0, lastSlashAt + 1); } void File::buildStrippedFilename(const char* filename, char buff[], std::size_t limit) { std::size_t sizeOfFilename = strlen(filename); if (sizeOfFilename >= limit) { filename += (sizeOfFilename - limit); if (filename[0] != '.' && filename[1] != '.') { // prepend if not already filename += 3; // 3 = '..' STRCAT(buff, "..", limit); } } STRCAT(buff, filename, limit); } void File::buildBaseFilename(const std::string& fullPath, char buff[], std::size_t limit, const char* separator) { const char *filename = fullPath.c_str(); std::size_t lastSlashAt = fullPath.find_last_of(separator); filename += lastSlashAt ? lastSlashAt+1 : 0; std::size_t sizeOfFilename = strlen(filename); if (sizeOfFilename >= limit) { filename += (sizeOfFilename - limit); if (filename[0] != '.' && filename[1] != '.') { // prepend if not already filename += 3; // 3 = '..' STRCAT(buff, "..", limit); } } STRCAT(buff, filename, limit); } // Str bool Str::wildCardMatch(const char* str, const char* pattern) { while (*pattern) { switch (*pattern) { case '?': if (!*str) return false; ++str; ++pattern; break; case '*': if (wildCardMatch(str, pattern + 1)) return true; if (*str && wildCardMatch(str + 1, pattern)) return true; return false; default: if (*str++ != *pattern++) return false; break; } } return !*str && !*pattern; } std::string& Str::ltrim(std::string& str) { str.erase(str.begin(), std::find_if(str.begin(), str.end(), [](char c) { return !std::isspace(c); } )); return str; } std::string& Str::rtrim(std::string& str) { str.erase(std::find_if(str.rbegin(), str.rend(), [](char c) { return !std::isspace(c); }).base(), str.end()); return str; } std::string& Str::trim(std::string& str) { return ltrim(rtrim(str)); } bool Str::startsWith(const std::string& str, const std::string& start) { return (str.length() >= start.length()) && (str.compare(0, start.length(), start) == 0); } bool Str::endsWith(const std::string& str, const std::string& end) { return (str.length() >= end.length()) && (str.compare(str.length() - end.length(), end.length(), end) == 0); } std::string& Str::replaceAll(std::string& str, char replaceWhat, char replaceWith) { std::replace(str.begin(), str.end(), replaceWhat, replaceWith); return str; } std::string& Str::replaceAll(std::string& str, const std::string& replaceWhat, const std::string& replaceWith) { if (replaceWhat == replaceWith) return str; std::size_t foundAt = std::string::npos; while ((foundAt = str.find(replaceWhat, foundAt + 1)) != std::string::npos) { str.replace(foundAt, replaceWhat.length(), replaceWith); } return str; } void Str::replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, const base::type::string_t& replaceWith) { std::size_t foundAt = base::type::string_t::npos; while ((foundAt = str.find(replaceWhat, foundAt + 1)) != base::type::string_t::npos) { if (foundAt > 0 && str[foundAt - 1] == base::consts::kFormatSpecifierChar) { str.erase(foundAt > 0 ? foundAt - 1 : 0, 1); ++foundAt; } else { str.replace(foundAt, replaceWhat.length(), replaceWith); return; } } } #if defined(ELPP_UNICODE) void Str::replaceFirstWithEscape(base::type::string_t& str, const base::type::string_t& replaceWhat, const std::string& replaceWith) { replaceFirstWithEscape(str, replaceWhat, base::type::string_t(replaceWith.begin(), replaceWith.end())); } #endif // defined(ELPP_UNICODE) std::string& Str::toUpper(std::string& str) { std::transform(str.begin(), str.end(), str.begin(), [](char c) { return static_cast<char>(::toupper(c)); }); return str; } bool Str::cStringEq(const char* s1, const char* s2) { if (s1 == nullptr && s2 == nullptr) return true; if (s1 == nullptr || s2 == nullptr) return false; return strcmp(s1, s2) == 0; } bool Str::cStringCaseEq(const char* s1, const char* s2) { if (s1 == nullptr && s2 == nullptr) return true; if (s1 == nullptr || s2 == nullptr) return false; // With thanks to cygwin for this code int d = 0; while (true) { const int c1 = toupper(*s1++); const int c2 = toupper(*s2++); if (((d = c1 - c2) != 0) || (c2 == '\0')) { break; } } return d == 0; } bool Str::contains(const char* str, char c) { for (; *str; ++str) { if (*str == c) return true; } return false; } char* Str::convertAndAddToBuff(std::size_t n, int len, char* buf, const char* bufLim, bool zeroPadded) { char localBuff[10] = ""; char* p = localBuff + sizeof(localBuff) - 2; if (n > 0) { for (; n > 0 && p > localBuff && len > 0; n /= 10, --len) *--p = static_cast<char>(n % 10 + '0'); } else { *--p = '0'; --len; } if (zeroPadded) while (p > localBuff && len-- > 0) *--p = static_cast<char>('0'); return addToBuff(p, buf, bufLim); } char* Str::addToBuff(const char* str, char* buf, const char* bufLim) { while ((buf < bufLim) && ((*buf = *str++) != '\0')) ++buf; return buf; } char* Str::clearBuff(char buff[], std::size_t lim) { STRCPY(buff, "", lim); ELPP_UNUSED(lim); // For *nix we dont have anything using lim in above STRCPY macro return buff; } /// @brief Converst wchar* to char* /// NOTE: Need to free return value after use! char* Str::wcharPtrToCharPtr(const wchar_t* line) { std::size_t len_ = wcslen(line) + 1; char* buff_ = static_cast<char*>(malloc(len_ + 1)); # if ELPP_OS_UNIX || (ELPP_OS_WINDOWS && !ELPP_CRT_DBG_WARNINGS) std::wcstombs(buff_, line, len_); # elif ELPP_OS_WINDOWS std::size_t convCount_ = 0; mbstate_t mbState_; ::memset(static_cast<void*>(&mbState_), 0, sizeof(mbState_)); wcsrtombs_s(&convCount_, buff_, len_, &line, len_, &mbState_); # endif // ELPP_OS_UNIX || (ELPP_OS_WINDOWS && !ELPP_CRT_DBG_WARNINGS) return buff_; } // OS #if ELPP_OS_WINDOWS /// @brief Gets environment variables for Windows based OS. /// We are not using <code>getenv(const char*)</code> because of CRT deprecation /// @param varname Variable name to get environment variable value for /// @return If variable exist the value of it otherwise nullptr const char* OS::getWindowsEnvironmentVariable(const char* varname) { const DWORD bufferLen = 50; static char buffer[bufferLen]; if (GetEnvironmentVariableA(varname, buffer, bufferLen)) { return buffer; } return nullptr; } #endif // ELPP_OS_WINDOWS #if ELPP_OS_ANDROID std::string OS::getProperty(const char* prop) { char propVal[PROP_VALUE_MAX + 1]; int ret = __system_property_get(prop, propVal); return ret == 0 ? std::string() : std::string(propVal); } std::string OS::getDeviceName(void) { std::stringstream ss; std::string manufacturer = getProperty("ro.product.manufacturer"); std::string model = getProperty("ro.product.model"); if (manufacturer.empty() || model.empty()) { return std::string(); } ss << manufacturer << "-" << model; return ss.str(); } #endif // ELPP_OS_ANDROID const std::string OS::getBashOutput(const char* command) { #if (ELPP_OS_UNIX && !ELPP_OS_ANDROID && !ELPP_CYGWIN) if (command == nullptr) { return std::string(); } FILE* proc = nullptr; if ((proc = popen(command, "r")) == nullptr) { ELPP_INTERNAL_ERROR("\nUnable to run command [" << command << "]", true); return std::string(); } char hBuff[4096]; if (fgets(hBuff, sizeof(hBuff), proc) != nullptr) { pclose(proc); if (hBuff[strlen(hBuff) - 1] == '\n') { hBuff[strlen(hBuff) - 1] = '\0'; } return std::string(hBuff); } else { pclose(proc); } return std::string(); #else ELPP_UNUSED(command); return std::string(); #endif // (ELPP_OS_UNIX && !ELPP_OS_ANDROID && !ELPP_CYGWIN) } std::string OS::getEnvironmentVariable(const char* variableName, const char* defaultVal, const char* alternativeBashCommand) { #if ELPP_OS_UNIX const char* val = getenv(variableName); #elif ELPP_OS_WINDOWS const char* val = getWindowsEnvironmentVariable(variableName); #endif // ELPP_OS_UNIX if ((val == nullptr) || ((strcmp(val, "") == 0))) { #if ELPP_OS_UNIX && defined(ELPP_FORCE_ENV_VAR_FROM_BASH) // Try harder on unix-based systems std::string valBash = base::utils::OS::getBashOutput(alternativeBashCommand); if (valBash.empty()) { return std::string(defaultVal); } else { return valBash; } #elif ELPP_OS_WINDOWS || ELPP_OS_UNIX ELPP_UNUSED(alternativeBashCommand); return std::string(defaultVal); #endif // ELPP_OS_UNIX && defined(ELPP_FORCE_ENV_VAR_FROM_BASH) } return std::string(val); } std::string OS::currentUser(void) { #if ELPP_OS_UNIX && !ELPP_OS_ANDROID return getEnvironmentVariable("USER", base::consts::kUnknownUser, "whoami"); #elif ELPP_OS_WINDOWS return getEnvironmentVariable("USERNAME", base::consts::kUnknownUser); #elif ELPP_OS_ANDROID ELPP_UNUSED(base::consts::kUnknownUser); return std::string("android"); #else return std::string(); #endif // ELPP_OS_UNIX && !ELPP_OS_ANDROID } std::string OS::currentHost(void) { #if ELPP_OS_UNIX && !ELPP_OS_ANDROID return getEnvironmentVariable("HOSTNAME", base::consts::kUnknownHost, "hostname"); #elif ELPP_OS_WINDOWS return getEnvironmentVariable("COMPUTERNAME", base::consts::kUnknownHost); #elif ELPP_OS_ANDROID ELPP_UNUSED(base::consts::kUnknownHost); return getDeviceName(); #else return std::string(); #endif // ELPP_OS_UNIX && !ELPP_OS_ANDROID } bool OS::termSupportsColor(void) { std::string term = getEnvironmentVariable("TERM", ""); return term == "xterm" || term == "xterm-color" || term == "xterm-256color" || term == "screen" || term == "linux" || term == "cygwin" || term == "screen-256color"; } // DateTime void DateTime::gettimeofday(struct timeval* tv) { #if ELPP_OS_WINDOWS if (tv != nullptr) { # if ELPP_COMPILER_MSVC || defined(_MSC_EXTENSIONS) const unsigned __int64 delta_ = 11644473600000000Ui64; # else const unsigned __int64 delta_ = 11644473600000000ULL; # endif // ELPP_COMPILER_MSVC || defined(_MSC_EXTENSIONS) const double secOffSet = 0.000001; const unsigned long usecOffSet = 1000000; FILETIME fileTime; GetSystemTimeAsFileTime(&fileTime); unsigned __int64 present = 0; present |= fileTime.dwHighDateTime; present = present << 32; present |= fileTime.dwLowDateTime; present /= 10; // mic-sec // Subtract the difference present -= delta_; tv->tv_sec = static_cast<long>(present * secOffSet); tv->tv_usec = static_cast<long>(present % usecOffSet); } #else ::gettimeofday(tv, nullptr); #endif // ELPP_OS_WINDOWS } std::string DateTime::getDateTime(const char* format, const base::SubsecondPrecision* ssPrec) { struct timeval currTime; gettimeofday(&currTime); return timevalToString(currTime, format, ssPrec); } std::string DateTime::timevalToString(struct timeval tval, const char* format, const el::base::SubsecondPrecision* ssPrec) { struct ::tm timeInfo; buildTimeInfo(&tval, &timeInfo); const int kBuffSize = 30; char buff_[kBuffSize] = ""; parseFormat(buff_, kBuffSize, format, &timeInfo, static_cast<std::size_t>(tval.tv_usec / ssPrec->m_offset), ssPrec); return std::string(buff_); } base::type::string_t DateTime::formatTime(unsigned long long time, base::TimestampUnit timestampUnit) { base::type::EnumType start = static_cast<base::type::EnumType>(timestampUnit); const base::type::char_t* unit = base::consts::kTimeFormats[start].unit; for (base::type::EnumType i = start; i < base::consts::kTimeFormatsCount - 1; ++i) { if (time <= base::consts::kTimeFormats[i].value) { break; } if (base::consts::kTimeFormats[i].value == 1000.0f && time / 1000.0f < 1.9f) { break; } time /= static_cast<decltype(time)>(base::consts::kTimeFormats[i].value); unit = base::consts::kTimeFormats[i + 1].unit; } base::type::stringstream_t ss; ss << time << " " << unit; return ss.str(); } unsigned long long DateTime::getTimeDifference(const struct timeval& endTime, const struct timeval& startTime, base::TimestampUnit timestampUnit) { if (timestampUnit == base::TimestampUnit::Microsecond) { return static_cast<unsigned long long>(static_cast<unsigned long long>(1000000 * endTime.tv_sec + endTime.tv_usec) - static_cast<unsigned long long>(1000000 * startTime.tv_sec + startTime.tv_usec)); } // milliseconds auto conv = [](const struct timeval& tim) { return static_cast<unsigned long long>((tim.tv_sec * 1000) + (tim.tv_usec / 1000)); }; return static_cast<unsigned long long>(conv(endTime) - conv(startTime)); } struct ::tm* DateTime::buildTimeInfo(struct timeval* currTime, struct ::tm* timeInfo) { #if ELPP_OS_UNIX time_t rawTime = currTime->tv_sec; ::elpptime_r(&rawTime, timeInfo); return timeInfo; #else # if ELPP_COMPILER_MSVC ELPP_UNUSED(currTime); time_t t; # if defined(_USE_32BIT_TIME_T) _time32(&t); # else _time64(&t); # endif elpptime_s(timeInfo, &t); return timeInfo; # else // For any other compilers that don't have CRT warnings issue e.g, MinGW or TDM GCC- we use different method time_t rawTime = currTime->tv_sec; struct tm* tmInf = elpptime(&rawTime); *timeInfo = *tmInf; return timeInfo; # endif // ELPP_COMPILER_MSVC #endif // ELPP_OS_UNIX } char* DateTime::parseFormat(char* buf, std::size_t bufSz, const char* format, const struct tm* tInfo, std::size_t msec, const base::SubsecondPrecision* ssPrec) { const char* bufLim = buf + bufSz; for (; *format; ++format) { if (*format == base::consts::kFormatSpecifierChar) { switch (*++format) { case base::consts::kFormatSpecifierChar: // Escape break; case '\0': // End --format; break; case 'd': // Day buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_mday, 2, buf, bufLim); continue; case 'a': // Day of week (short) buf = base::utils::Str::addToBuff(base::consts::kDaysAbbrev[tInfo->tm_wday], buf, bufLim); continue; case 'A': // Day of week (long) buf = base::utils::Str::addToBuff(base::consts::kDays[tInfo->tm_wday], buf, bufLim); continue; case 'M': // month buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_mon + 1, 2, buf, bufLim); continue; case 'b': // month (short) buf = base::utils::Str::addToBuff(base::consts::kMonthsAbbrev[tInfo->tm_mon], buf, bufLim); continue; case 'B': // month (long) buf = base::utils::Str::addToBuff(base::consts::kMonths[tInfo->tm_mon], buf, bufLim); continue; case 'y': // year (two digits) buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_year + base::consts::kYearBase, 2, buf, bufLim); continue; case 'Y': // year (four digits) buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_year + base::consts::kYearBase, 4, buf, bufLim); continue; case 'h': // hour (12-hour) buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_hour % 12, 2, buf, bufLim); continue; case 'H': // hour (24-hour) buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_hour, 2, buf, bufLim); continue; case 'm': // minute buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_min, 2, buf, bufLim); continue; case 's': // second buf = base::utils::Str::convertAndAddToBuff(tInfo->tm_sec, 2, buf, bufLim); continue; case 'z': // subsecond part case 'g': buf = base::utils::Str::convertAndAddToBuff(msec, ssPrec->m_width, buf, bufLim); continue; case 'F': // AM/PM buf = base::utils::Str::addToBuff((tInfo->tm_hour >= 12) ? base::consts::kPm : base::consts::kAm, buf, bufLim); continue; default: continue; } } if (buf == bufLim) break; *buf++ = *format; } return buf; } // CommandLineArgs void CommandLineArgs::setArgs(int argc, char** argv) { m_params.clear(); m_paramsWithValue.clear(); if (argc == 0 || argv == nullptr) { return; } m_argc = argc; m_argv = argv; for (int i = 1; i < m_argc; ++i) { const char* v = (strstr(m_argv[i], "=")); if (v != nullptr && strlen(v) > 0) { std::string key = std::string(m_argv[i]); key = key.substr(0, key.find_first_of('=')); if (hasParamWithValue(key.c_str())) { ELPP_INTERNAL_INFO(1, "Skipping [" << key << "] arg since it already has value [" << getParamValue(key.c_str()) << "]"); } else { m_paramsWithValue.insert(std::make_pair(key, std::string(v + 1))); } } if (v == nullptr) { if (hasParam(m_argv[i])) { ELPP_INTERNAL_INFO(1, "Skipping [" << m_argv[i] << "] arg since it already exists"); } else { m_params.push_back(std::string(m_argv[i])); } } } } bool CommandLineArgs::hasParamWithValue(const char* paramKey) const { return m_paramsWithValue.find(std::string(paramKey)) != m_paramsWithValue.end(); } const char* CommandLineArgs::getParamValue(const char* paramKey) const { std::map<std::string, std::string>::const_iterator iter = m_paramsWithValue.find(std::string(paramKey)); return iter != m_paramsWithValue.end() ? iter->second.c_str() : ""; } bool CommandLineArgs::hasParam(const char* paramKey) const { return std::find(m_params.begin(), m_params.end(), std::string(paramKey)) != m_params.end(); } bool CommandLineArgs::empty(void) const { return m_params.empty() && m_paramsWithValue.empty(); } std::size_t CommandLineArgs::size(void) const { return m_params.size() + m_paramsWithValue.size(); } base::type::ostream_t& operator<<(base::type::ostream_t& os, const CommandLineArgs& c) { for (int i = 1; i < c.m_argc; ++i) { os << ELPP_LITERAL("[") << c.m_argv[i] << ELPP_LITERAL("]"); if (i < c.m_argc - 1) { os << ELPP_LITERAL(" "); } } return os; } } // namespace utils // el::base::threading namespace threading { #if ELPP_THREADING_ENABLED # if ELPP_USE_STD_THREADING # if ELPP_ASYNC_LOGGING static void msleep(int ms) { // Only when async logging enabled - this is because async is strict on compiler # if defined(ELPP_NO_SLEEP_FOR) usleep(ms * 1000); # else std::this_thread::sleep_for(std::chrono::milliseconds(ms)); # endif // defined(ELPP_NO_SLEEP_FOR) } # endif // ELPP_ASYNC_LOGGING # endif // !ELPP_USE_STD_THREADING #endif // ELPP_THREADING_ENABLED } // namespace threading // el::base // SubsecondPrecision void SubsecondPrecision::init(int width) { if (width < 1 || width > 6) { width = base::consts::kDefaultSubsecondPrecision; } m_width = width; switch (m_width) { case 3: m_offset = 1000; break; case 4: m_offset = 100; break; case 5: m_offset = 10; break; case 6: m_offset = 1; break; default: m_offset = 1000; break; } } // LogFormat LogFormat::LogFormat(void) : m_level(Level::Unknown), m_userFormat(base::type::string_t()), m_format(base::type::string_t()), m_dateTimeFormat(std::string()), m_flags(0x0), m_currentUser(base::utils::OS::currentUser()), m_currentHost(base::utils::OS::currentHost()) { } LogFormat::LogFormat(Level level, const base::type::string_t& format) : m_level(level), m_userFormat(format), m_currentUser(base::utils::OS::currentUser()), m_currentHost(base::utils::OS::currentHost()) { parseFromFormat(m_userFormat); } LogFormat::LogFormat(const LogFormat& logFormat): m_level(logFormat.m_level), m_userFormat(logFormat.m_userFormat), m_format(logFormat.m_format), m_dateTimeFormat(logFormat.m_dateTimeFormat), m_flags(logFormat.m_flags), m_currentUser(logFormat.m_currentUser), m_currentHost(logFormat.m_currentHost) { } LogFormat::LogFormat(LogFormat&& logFormat) { m_level = std::move(logFormat.m_level); m_userFormat = std::move(logFormat.m_userFormat); m_format = std::move(logFormat.m_format); m_dateTimeFormat = std::move(logFormat.m_dateTimeFormat); m_flags = std::move(logFormat.m_flags); m_currentUser = std::move(logFormat.m_currentUser); m_currentHost = std::move(logFormat.m_currentHost); } LogFormat& LogFormat::operator=(const LogFormat& logFormat) { if (&logFormat != this) { m_level = logFormat.m_level; m_userFormat = logFormat.m_userFormat; m_dateTimeFormat = logFormat.m_dateTimeFormat; m_flags = logFormat.m_flags; m_currentUser = logFormat.m_currentUser; m_currentHost = logFormat.m_currentHost; } return *this; } bool LogFormat::operator==(const LogFormat& other) { return m_level == other.m_level && m_userFormat == other.m_userFormat && m_format == other.m_format && m_dateTimeFormat == other.m_dateTimeFormat && m_flags == other.m_flags; } /// @brief Updates format to be used while logging. /// @param userFormat User provided format void LogFormat::parseFromFormat(const base::type::string_t& userFormat) { // We make copy because we will be changing the format // i.e, removing user provided date format from original format // and then storing it. base::type::string_t formatCopy = userFormat; m_flags = 0x0; auto conditionalAddFlag = [&](const base::type::char_t* specifier, base::FormatFlags flag) { std::size_t foundAt = base::type::string_t::npos; while ((foundAt = formatCopy.find(specifier, foundAt + 1)) != base::type::string_t::npos) { if (foundAt > 0 && formatCopy[foundAt - 1] == base::consts::kFormatSpecifierChar) { if (hasFlag(flag)) { // If we already have flag we remove the escape chars so that '%%' is turned to '%' // even after specifier resolution - this is because we only replaceFirst specifier formatCopy.erase(foundAt > 0 ? foundAt - 1 : 0, 1); ++foundAt; } } else { if (!hasFlag(flag)) addFlag(flag); } } }; conditionalAddFlag(base::consts::kAppNameFormatSpecifier, base::FormatFlags::AppName); conditionalAddFlag(base::consts::kSeverityLevelFormatSpecifier, base::FormatFlags::Level); conditionalAddFlag(base::consts::kSeverityLevelShortFormatSpecifier, base::FormatFlags::LevelShort); conditionalAddFlag(base::consts::kLoggerIdFormatSpecifier, base::FormatFlags::LoggerId); conditionalAddFlag(base::consts::kThreadIdFormatSpecifier, base::FormatFlags::ThreadId); conditionalAddFlag(base::consts::kLogFileFormatSpecifier, base::FormatFlags::File); conditionalAddFlag(base::consts::kLogFileBaseFormatSpecifier, base::FormatFlags::FileBase); conditionalAddFlag(base::consts::kLogLineFormatSpecifier, base::FormatFlags::Line); conditionalAddFlag(base::consts::kLogLocationFormatSpecifier, base::FormatFlags::Location); conditionalAddFlag(base::consts::kLogFunctionFormatSpecifier, base::FormatFlags::Function); conditionalAddFlag(base::consts::kCurrentUserFormatSpecifier, base::FormatFlags::User); conditionalAddFlag(base::consts::kCurrentHostFormatSpecifier, base::FormatFlags::Host); conditionalAddFlag(base::consts::kMessageFormatSpecifier, base::FormatFlags::LogMessage); conditionalAddFlag(base::consts::kVerboseLevelFormatSpecifier, base::FormatFlags::VerboseLevel); // For date/time we need to extract user's date format first std::size_t dateIndex = std::string::npos; if ((dateIndex = formatCopy.find(base::consts::kDateTimeFormatSpecifier)) != std::string::npos) { while (dateIndex > 0 && formatCopy[dateIndex - 1] == base::consts::kFormatSpecifierChar) { dateIndex = formatCopy.find(base::consts::kDateTimeFormatSpecifier, dateIndex + 1); } if (dateIndex != std::string::npos) { addFlag(base::FormatFlags::DateTime); updateDateFormat(dateIndex, formatCopy); } } m_format = formatCopy; updateFormatSpec(); } void LogFormat::updateDateFormat(std::size_t index, base::type::string_t& currFormat) { if (hasFlag(base::FormatFlags::DateTime)) { index += ELPP_STRLEN(base::consts::kDateTimeFormatSpecifier); } const base::type::char_t* ptr = currFormat.c_str() + index; if ((currFormat.size() > index) && (ptr[0] == '{')) { // User has provided format for date/time ++ptr; int count = 1; // Start by 1 in order to remove starting brace std::stringstream ss; for (; *ptr; ++ptr, ++count) { if (*ptr == '}') { ++count; // In order to remove ending brace break; } ss << static_cast<char>(*ptr); } currFormat.erase(index, count); m_dateTimeFormat = ss.str(); } else { // No format provided, use default if (hasFlag(base::FormatFlags::DateTime)) { m_dateTimeFormat = std::string(base::consts::kDefaultDateTimeFormat); } } } void LogFormat::updateFormatSpec(void) { // Do not use switch over strongly typed enums because Intel C++ compilers dont support them yet. if (m_level == Level::Debug) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kDebugLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kDebugLevelShortLogValue); } else if (m_level == Level::Info) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kInfoLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kInfoLevelShortLogValue); } else if (m_level == Level::Warning) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kWarningLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kWarningLevelShortLogValue); } else if (m_level == Level::Error) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kErrorLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kErrorLevelShortLogValue); } else if (m_level == Level::Fatal) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kFatalLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kFatalLevelShortLogValue); } else if (m_level == Level::Verbose) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kVerboseLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kVerboseLevelShortLogValue); } else if (m_level == Level::Trace) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelFormatSpecifier, base::consts::kTraceLevelLogValue); base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kSeverityLevelShortFormatSpecifier, base::consts::kTraceLevelShortLogValue); } if (hasFlag(base::FormatFlags::User)) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kCurrentUserFormatSpecifier, m_currentUser); } if (hasFlag(base::FormatFlags::Host)) { base::utils::Str::replaceFirstWithEscape(m_format, base::consts::kCurrentHostFormatSpecifier, m_currentHost); } // Ignore Level::Global and Level::Unknown } // TypedConfigurations TypedConfigurations::TypedConfigurations(Configurations* configurations, base::LogStreamsReferenceMap* logStreamsReference) { m_configurations = configurations; m_logStreamsReference = logStreamsReference; build(m_configurations); } TypedConfigurations::TypedConfigurations(const TypedConfigurations& other) { this->m_configurations = other.m_configurations; this->m_logStreamsReference = other.m_logStreamsReference; build(m_configurations); } bool TypedConfigurations::enabled(Level level) { return getConfigByVal<bool>(level, &m_enabledMap, "enabled"); } bool TypedConfigurations::toFile(Level level) { return getConfigByVal<bool>(level, &m_toFileMap, "toFile"); } const std::string& TypedConfigurations::filename(Level level) { return getConfigByRef<std::string>(level, &m_filenameMap, "filename"); } bool TypedConfigurations::toStandardOutput(Level level) { return getConfigByVal<bool>(level, &m_toStandardOutputMap, "toStandardOutput"); } const base::LogFormat& TypedConfigurations::logFormat(Level level) { return getConfigByRef<base::LogFormat>(level, &m_logFormatMap, "logFormat"); } const base::SubsecondPrecision& TypedConfigurations::subsecondPrecision(Level level) { return getConfigByRef<base::SubsecondPrecision>(level, &m_subsecondPrecisionMap, "subsecondPrecision"); } const base::MillisecondsWidth& TypedConfigurations::millisecondsWidth(Level level) { return getConfigByRef<base::MillisecondsWidth>(level, &m_subsecondPrecisionMap, "millisecondsWidth"); } bool TypedConfigurations::performanceTracking(Level level) { return getConfigByVal<bool>(level, &m_performanceTrackingMap, "performanceTracking"); } base::type::fstream_t* TypedConfigurations::fileStream(Level level) { return getConfigByRef<base::FileStreamPtr>(level, &m_fileStreamMap, "fileStream").get(); } std::size_t TypedConfigurations::maxLogFileSize(Level level) { return getConfigByVal<std::size_t>(level, &m_maxLogFileSizeMap, "maxLogFileSize"); } std::size_t TypedConfigurations::logFlushThreshold(Level level) { return getConfigByVal<std::size_t>(level, &m_logFlushThresholdMap, "logFlushThreshold"); } void TypedConfigurations::build(Configurations* configurations) { base::threading::ScopedLock scopedLock(lock()); auto getBool = [] (std::string boolStr) -> bool { // Pass by value for trimming base::utils::Str::trim(boolStr); return (boolStr == "TRUE" || boolStr == "true" || boolStr == "1"); }; std::vector<Configuration*> withFileSizeLimit; for (Configurations::const_iterator it = configurations->begin(); it != configurations->end(); ++it) { Configuration* conf = *it; // We cannot use switch on strong enums because Intel C++ dont support them yet if (conf->configurationType() == ConfigurationType::Enabled) { setValue(conf->level(), getBool(conf->value()), &m_enabledMap); } else if (conf->configurationType() == ConfigurationType::ToFile) { setValue(conf->level(), getBool(conf->value()), &m_toFileMap); } else if (conf->configurationType() == ConfigurationType::ToStandardOutput) { setValue(conf->level(), getBool(conf->value()), &m_toStandardOutputMap); } else if (conf->configurationType() == ConfigurationType::Filename) { // We do not yet configure filename but we will configure in another // loop. This is because if file cannot be created, we will force ToFile // to be false. Because configuring logger is not necessarily performance // sensative operation, we can live with another loop; (by the way this loop // is not very heavy either) } else if (conf->configurationType() == ConfigurationType::Format) { setValue(conf->level(), base::LogFormat(conf->level(), base::type::string_t(conf->value().begin(), conf->value().end())), &m_logFormatMap); } else if (conf->configurationType() == ConfigurationType::SubsecondPrecision) { setValue(Level::Global, base::SubsecondPrecision(static_cast<int>(getULong(conf->value()))), &m_subsecondPrecisionMap); } else if (conf->configurationType() == ConfigurationType::PerformanceTracking) { setValue(Level::Global, getBool(conf->value()), &m_performanceTrackingMap); } else if (conf->configurationType() == ConfigurationType::MaxLogFileSize) { setValue(conf->level(), static_cast<std::size_t>(getULong(conf->value())), &m_maxLogFileSizeMap); #if !defined(ELPP_NO_DEFAULT_LOG_FILE) withFileSizeLimit.push_back(conf); #endif // !defined(ELPP_NO_DEFAULT_LOG_FILE) } else if (conf->configurationType() == ConfigurationType::LogFlushThreshold) { setValue(conf->level(), static_cast<std::size_t>(getULong(conf->value())), &m_logFlushThresholdMap); } } // As mentioned earlier, we will now set filename configuration in separate loop to deal with non-existent files for (Configurations::const_iterator it = configurations->begin(); it != configurations->end(); ++it) { Configuration* conf = *it; if (conf->configurationType() == ConfigurationType::Filename) { insertFile(conf->level(), conf->value()); } } for (std::vector<Configuration*>::iterator conf = withFileSizeLimit.begin(); conf != withFileSizeLimit.end(); ++conf) { // This is not unsafe as mutex is locked in currect scope unsafeValidateFileRolling((*conf)->level(), base::defaultPreRollOutCallback); } } unsigned long TypedConfigurations::getULong(std::string confVal) { bool valid = true; base::utils::Str::trim(confVal); valid = !confVal.empty() && std::find_if(confVal.begin(), confVal.end(), [](char c) { return !base::utils::Str::isDigit(c); }) == confVal.end(); if (!valid) { valid = false; ELPP_ASSERT(valid, "Configuration value not a valid integer [" << confVal << "]"); return 0; } return atol(confVal.c_str()); } std::string TypedConfigurations::resolveFilename(const std::string& filename) { std::string resultingFilename = filename; std::size_t dateIndex = std::string::npos; std::string dateTimeFormatSpecifierStr = std::string(base::consts::kDateTimeFormatSpecifierForFilename); if ((dateIndex = resultingFilename.find(dateTimeFormatSpecifierStr.c_str())) != std::string::npos) { while (dateIndex > 0 && resultingFilename[dateIndex - 1] == base::consts::kFormatSpecifierChar) { dateIndex = resultingFilename.find(dateTimeFormatSpecifierStr.c_str(), dateIndex + 1); } if (dateIndex != std::string::npos) { const char* ptr = resultingFilename.c_str() + dateIndex; // Goto end of specifier ptr += dateTimeFormatSpecifierStr.size(); std::string fmt; if ((resultingFilename.size() > dateIndex) && (ptr[0] == '{')) { // User has provided format for date/time ++ptr; int count = 1; // Start by 1 in order to remove starting brace std::stringstream ss; for (; *ptr; ++ptr, ++count) { if (*ptr == '}') { ++count; // In order to remove ending brace break; } ss << *ptr; } resultingFilename.erase(dateIndex + dateTimeFormatSpecifierStr.size(), count); fmt = ss.str(); } else { fmt = std::string(base::consts::kDefaultDateTimeFormatInFilename); } base::SubsecondPrecision ssPrec(3); std::string now = base::utils::DateTime::getDateTime(fmt.c_str(), &ssPrec); base::utils::Str::replaceAll(now, '/', '-'); // Replace path element since we are dealing with filename base::utils::Str::replaceAll(resultingFilename, dateTimeFormatSpecifierStr, now); } } return resultingFilename; } void TypedConfigurations::insertFile(Level level, const std::string& fullFilename) { #if defined(ELPP_NO_LOG_TO_FILE) setValue(level, false, &m_toFileMap); ELPP_UNUSED(fullFilename); m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(nullptr))); return; #endif std::string resolvedFilename = resolveFilename(fullFilename); if (resolvedFilename.empty()) { std::cerr << "Could not load empty file for logging, please re-check your configurations for level [" << LevelHelper::convertToString(level) << "]"; } std::string filePath = base::utils::File::extractPathFromFilename(resolvedFilename, base::consts::kFilePathSeperator); if (filePath.size() < resolvedFilename.size()) { base::utils::File::createPath(filePath); } auto create = [&](Level level) { base::LogStreamsReferenceMap::iterator filestreamIter = m_logStreamsReference->find(resolvedFilename); base::type::fstream_t* fs = nullptr; if (filestreamIter == m_logStreamsReference->end()) { // We need a completely new stream, nothing to share with fs = base::utils::File::newFileStream(resolvedFilename); m_filenameMap.insert(std::make_pair(level, resolvedFilename)); m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(fs))); m_logStreamsReference->insert(std::make_pair(resolvedFilename, base::FileStreamPtr(m_fileStreamMap.at(level)))); } else { // Woops! we have an existing one, share it! m_filenameMap.insert(std::make_pair(level, filestreamIter->first)); m_fileStreamMap.insert(std::make_pair(level, base::FileStreamPtr(filestreamIter->second))); fs = filestreamIter->second.get(); } if (fs == nullptr) { // We display bad file error from newFileStream() ELPP_INTERNAL_ERROR("Setting [TO_FILE] of [" << LevelHelper::convertToString(level) << "] to FALSE", false); setValue(level, false, &m_toFileMap); } }; // If we dont have file conf for any level, create it for Level::Global first // otherwise create for specified level create(m_filenameMap.empty() && m_fileStreamMap.empty() ? Level::Global : level); } bool TypedConfigurations::unsafeValidateFileRolling(Level level, const PreRollOutCallback& preRollOutCallback) { base::type::fstream_t* fs = unsafeGetConfigByRef(level, &m_fileStreamMap, "fileStream").get(); if (fs == nullptr) { return true; } std::size_t maxLogFileSize = unsafeGetConfigByVal(level, &m_maxLogFileSizeMap, "maxLogFileSize"); std::size_t currFileSize = base::utils::File::getSizeOfFile(fs); if (maxLogFileSize != 0 && currFileSize >= maxLogFileSize) { std::string fname = unsafeGetConfigByRef(level, &m_filenameMap, "filename"); ELPP_INTERNAL_INFO(1, "Truncating log file [" << fname << "] as a result of configurations for level [" << LevelHelper::convertToString(level) << "]"); fs->close(); preRollOutCallback(fname.c_str(), currFileSize); fs->open(fname, std::fstream::out | std::fstream::trunc); return true; } return false; } // RegisteredHitCounters bool RegisteredHitCounters::validateEveryN(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { base::threading::ScopedLock scopedLock(lock()); base::HitCounter* counter = get(filename, lineNumber); if (counter == nullptr) { registerNew(counter = new base::HitCounter(filename, lineNumber)); } counter->validateHitCounts(n); bool result = (n >= 1 && counter->hitCounts() != 0 && counter->hitCounts() % n == 0); return result; } /// @brief Validates counter for hits >= N, i.e, registers new if does not exist otherwise updates original one /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned bool RegisteredHitCounters::validateAfterN(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { base::threading::ScopedLock scopedLock(lock()); base::HitCounter* counter = get(filename, lineNumber); if (counter == nullptr) { registerNew(counter = new base::HitCounter(filename, lineNumber)); } // Do not use validateHitCounts here since we do not want to reset counter here // Note the >= instead of > because we are incrementing // after this check if (counter->hitCounts() >= n) return true; counter->increment(); return false; } /// @brief Validates counter for hits are <= n, i.e, registers new if does not exist otherwise updates original one /// @return True if validation resulted in triggering hit. Meaning logs should be written everytime true is returned bool RegisteredHitCounters::validateNTimes(const char* filename, base::type::LineNumber lineNumber, std::size_t n) { base::threading::ScopedLock scopedLock(lock()); base::HitCounter* counter = get(filename, lineNumber); if (counter == nullptr) { registerNew(counter = new base::HitCounter(filename, lineNumber)); } counter->increment(); // Do not use validateHitCounts here since we do not want to reset counter here if (counter->hitCounts() <= n) return true; return false; } // RegisteredLoggers RegisteredLoggers::RegisteredLoggers(const LogBuilderPtr& defaultLogBuilder) : m_defaultLogBuilder(defaultLogBuilder) { m_defaultConfigurations.setToDefault(); } Logger* RegisteredLoggers::get(const std::string& id, bool forceCreation) { base::threading::ScopedLock scopedLock(lock()); Logger* logger_ = base::utils::Registry<Logger, std::string>::get(id); if (logger_ == nullptr && forceCreation) { bool validId = Logger::isValidId(id); if (!validId) { ELPP_ASSERT(validId, "Invalid logger ID [" << id << "]. Not registering this logger."); return nullptr; } logger_ = new Logger(id, m_defaultConfigurations, &m_logStreamsReference); logger_->m_logBuilder = m_defaultLogBuilder; registerNew(id, logger_); LoggerRegistrationCallback* callback = nullptr; for (const std::pair<std::string, base::type::LoggerRegistrationCallbackPtr>& h : m_loggerRegistrationCallbacks) { callback = h.second.get(); if (callback != nullptr && callback->enabled()) { callback->handle(logger_); } } } return logger_; } bool RegisteredLoggers::remove(const std::string& id) { if (id == base::consts::kDefaultLoggerId) { return false; } Logger* logger = base::utils::Registry<Logger, std::string>::get(id); if (logger != nullptr) { unregister(logger); } return true; } void RegisteredLoggers::unsafeFlushAll(void) { ELPP_INTERNAL_INFO(1, "Flushing all log files"); for (base::LogStreamsReferenceMap::iterator it = m_logStreamsReference.begin(); it != m_logStreamsReference.end(); ++it) { if (it->second.get() == nullptr) continue; it->second->flush(); } } // VRegistry VRegistry::VRegistry(base::type::VerboseLevel level, base::type::EnumType* pFlags) : m_level(level), m_pFlags(pFlags) { } /// @brief Sets verbose level. Accepted range is 0-9 void VRegistry::setLevel(base::type::VerboseLevel level) { base::threading::ScopedLock scopedLock(lock()); if (level > 9) m_level = base::consts::kMaxVerboseLevel; else m_level = level; } void VRegistry::setModules(const char* modules) { base::threading::ScopedLock scopedLock(lock()); auto addSuffix = [](std::stringstream& ss, const char* sfx, const char* prev) { if (prev != nullptr && base::utils::Str::endsWith(ss.str(), std::string(prev))) { std::string chr(ss.str().substr(0, ss.str().size() - strlen(prev))); ss.str(std::string("")); ss << chr; } if (base::utils::Str::endsWith(ss.str(), std::string(sfx))) { std::string chr(ss.str().substr(0, ss.str().size() - strlen(sfx))); ss.str(std::string("")); ss << chr; } ss << sfx; }; auto insert = [&](std::stringstream& ss, base::type::VerboseLevel level) { if (!base::utils::hasFlag(LoggingFlag::DisableVModulesExtensions, *m_pFlags)) { addSuffix(ss, ".h", nullptr); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".c", ".h"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".cpp", ".c"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".cc", ".cpp"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".cxx", ".cc"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".-inl.h", ".cxx"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".hxx", ".-inl.h"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".hpp", ".hxx"); m_modules.insert(std::make_pair(ss.str(), level)); addSuffix(ss, ".hh", ".hpp"); } m_modules.insert(std::make_pair(ss.str(), level)); }; bool isMod = true; bool isLevel = false; std::stringstream ss; int level = -1; for (; *modules; ++modules) { switch (*modules) { case '=': isLevel = true; isMod = false; break; case ',': isLevel = false; isMod = true; if (!ss.str().empty() && level != -1) { insert(ss, static_cast<base::type::VerboseLevel>(level)); ss.str(std::string("")); level = -1; } break; default: if (isMod) { ss << *modules; } else if (isLevel) { if (isdigit(*modules)) { level = static_cast<base::type::VerboseLevel>(*modules) - 48; } } break; } } if (!ss.str().empty() && level != -1) { insert(ss, static_cast<base::type::VerboseLevel>(level)); } } bool VRegistry::allowed(base::type::VerboseLevel vlevel, const char* file) { base::threading::ScopedLock scopedLock(lock()); if (m_modules.empty() || file == nullptr) { return vlevel <= m_level; } else { char baseFilename[base::consts::kSourceFilenameMaxLength] = ""; base::utils::File::buildBaseFilename(file, baseFilename); std::map<std::string, base::type::VerboseLevel>::iterator it = m_modules.begin(); for (; it != m_modules.end(); ++it) { if (base::utils::Str::wildCardMatch(baseFilename, it->first.c_str())) { return vlevel <= it->second; } } if (base::utils::hasFlag(LoggingFlag::AllowVerboseIfModuleNotSpecified, *m_pFlags)) { return true; } return false; } } void VRegistry::setFromArgs(const base::utils::CommandLineArgs* commandLineArgs) { if (commandLineArgs->hasParam("-v") || commandLineArgs->hasParam("--verbose") || commandLineArgs->hasParam("-V") || commandLineArgs->hasParam("--VERBOSE")) { setLevel(base::consts::kMaxVerboseLevel); } else if (commandLineArgs->hasParamWithValue("--v")) { setLevel(static_cast<base::type::VerboseLevel>(atoi(commandLineArgs->getParamValue("--v")))); } else if (commandLineArgs->hasParamWithValue("--V")) { setLevel(static_cast<base::type::VerboseLevel>(atoi(commandLineArgs->getParamValue("--V")))); } else if ((commandLineArgs->hasParamWithValue("-vmodule")) && vModulesEnabled()) { setModules(commandLineArgs->getParamValue("-vmodule")); } else if (commandLineArgs->hasParamWithValue("-VMODULE") && vModulesEnabled()) { setModules(commandLineArgs->getParamValue("-VMODULE")); } } #if !defined(ELPP_DEFAULT_LOGGING_FLAGS) # define ELPP_DEFAULT_LOGGING_FLAGS 0x0 #endif // !defined(ELPP_DEFAULT_LOGGING_FLAGS) // Storage #if ELPP_ASYNC_LOGGING Storage::Storage(const LogBuilderPtr& defaultLogBuilder, base::IWorker* asyncDispatchWorker) : #else Storage::Storage(const LogBuilderPtr& defaultLogBuilder) : #endif // ELPP_ASYNC_LOGGING m_registeredHitCounters(new base::RegisteredHitCounters()), m_registeredLoggers(new base::RegisteredLoggers(defaultLogBuilder)), m_flags(ELPP_DEFAULT_LOGGING_FLAGS), m_vRegistry(new base::VRegistry(0, &m_flags)), #if ELPP_ASYNC_LOGGING m_asyncLogQueue(new base::AsyncLogQueue()), m_asyncDispatchWorker(asyncDispatchWorker), #endif // ELPP_ASYNC_LOGGING m_preRollOutCallback(base::defaultPreRollOutCallback) { // Register default logger m_registeredLoggers->get(std::string(base::consts::kDefaultLoggerId)); // We register default logger anyway (worse case it's not going to register) just in case m_registeredLoggers->get("default"); // Register performance logger and reconfigure format Logger* performanceLogger = m_registeredLoggers->get(std::string(base::consts::kPerformanceLoggerId)); m_registeredLoggers->get("performance"); performanceLogger->configurations()->setGlobally(ConfigurationType::Format, std::string("%datetime %level %msg")); performanceLogger->reconfigure(); #if defined(ELPP_SYSLOG) // Register syslog logger and reconfigure format Logger* sysLogLogger = m_registeredLoggers->get(std::string(base::consts::kSysLogLoggerId)); sysLogLogger->configurations()->setGlobally(ConfigurationType::Format, std::string("%level: %msg")); sysLogLogger->reconfigure(); #endif // defined(ELPP_SYSLOG) addFlag(LoggingFlag::AllowVerboseIfModuleNotSpecified); #if ELPP_ASYNC_LOGGING installLogDispatchCallback<base::AsyncLogDispatchCallback>(std::string("AsyncLogDispatchCallback")); #else installLogDispatchCallback<base::DefaultLogDispatchCallback>(std::string("DefaultLogDispatchCallback")); #endif // ELPP_ASYNC_LOGGING #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) installPerformanceTrackingCallback<base::DefaultPerformanceTrackingCallback> (std::string("DefaultPerformanceTrackingCallback")); #endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) ELPP_INTERNAL_INFO(1, "Easylogging++ has been initialized"); #if ELPP_ASYNC_LOGGING m_asyncDispatchWorker->start(); #endif // ELPP_ASYNC_LOGGING } Storage::~Storage(void) { ELPP_INTERNAL_INFO(4, "Destroying storage"); #if ELPP_ASYNC_LOGGING ELPP_INTERNAL_INFO(5, "Replacing log dispatch callback to synchronous"); uninstallLogDispatchCallback<base::AsyncLogDispatchCallback>(std::string("AsyncLogDispatchCallback")); installLogDispatchCallback<base::DefaultLogDispatchCallback>(std::string("DefaultLogDispatchCallback")); ELPP_INTERNAL_INFO(5, "Destroying asyncDispatchWorker"); base::utils::safeDelete(m_asyncDispatchWorker); ELPP_INTERNAL_INFO(5, "Destroying asyncLogQueue"); base::utils::safeDelete(m_asyncLogQueue); #endif // ELPP_ASYNC_LOGGING ELPP_INTERNAL_INFO(5, "Destroying registeredHitCounters"); base::utils::safeDelete(m_registeredHitCounters); ELPP_INTERNAL_INFO(5, "Destroying registeredLoggers"); base::utils::safeDelete(m_registeredLoggers); ELPP_INTERNAL_INFO(5, "Destroying vRegistry"); base::utils::safeDelete(m_vRegistry); } bool Storage::hasCustomFormatSpecifier(const char* formatSpecifier) { base::threading::ScopedLock scopedLock(lock()); return std::find(m_customFormatSpecifiers.begin(), m_customFormatSpecifiers.end(), formatSpecifier) != m_customFormatSpecifiers.end(); } void Storage::installCustomFormatSpecifier(const CustomFormatSpecifier& customFormatSpecifier) { if (hasCustomFormatSpecifier(customFormatSpecifier.formatSpecifier())) { return; } base::threading::ScopedLock scopedLock(lock()); m_customFormatSpecifiers.push_back(customFormatSpecifier); } bool Storage::uninstallCustomFormatSpecifier(const char* formatSpecifier) { base::threading::ScopedLock scopedLock(lock()); std::vector<CustomFormatSpecifier>::iterator it = std::find(m_customFormatSpecifiers.begin(), m_customFormatSpecifiers.end(), formatSpecifier); if (it != m_customFormatSpecifiers.end() && strcmp(formatSpecifier, it->formatSpecifier()) == 0) { m_customFormatSpecifiers.erase(it); return true; } return false; } void Storage::setApplicationArguments(int argc, char** argv) { m_commandLineArgs.setArgs(argc, argv); m_vRegistry->setFromArgs(commandLineArgs()); // default log file #if !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) if (m_commandLineArgs.hasParamWithValue(base::consts::kDefaultLogFileParam)) { Configurations c; c.setGlobally(ConfigurationType::Filename, std::string(m_commandLineArgs.getParamValue(base::consts::kDefaultLogFileParam))); registeredLoggers()->setDefaultConfigurations(c); for (base::RegisteredLoggers::iterator it = registeredLoggers()->begin(); it != registeredLoggers()->end(); ++it) { it->second->configure(c); } } #endif // !defined(ELPP_DISABLE_LOG_FILE_FROM_ARG) #if defined(ELPP_LOGGING_FLAGS_FROM_ARG) if (m_commandLineArgs.hasParamWithValue(base::consts::kLoggingFlagsParam)) { int userInput = atoi(m_commandLineArgs.getParamValue(base::consts::kLoggingFlagsParam)); if (ELPP_DEFAULT_LOGGING_FLAGS == 0x0) { m_flags = userInput; } else { base::utils::addFlag<base::type::EnumType>(userInput, &m_flags); } } #endif // defined(ELPP_LOGGING_FLAGS_FROM_ARG) } // DefaultLogDispatchCallback void DefaultLogDispatchCallback::handle(const LogDispatchData* data) { m_data = data; dispatch(m_data->logMessage()->logger()->logBuilder()->build(m_data->logMessage(), m_data->dispatchAction() == base::DispatchAction::NormalLog)); } void DefaultLogDispatchCallback::dispatch(base::type::string_t&& logLine) { if (m_data->dispatchAction() == base::DispatchAction::NormalLog) { if (m_data->logMessage()->logger()->m_typedConfigurations->toFile(m_data->logMessage()->level())) { base::type::fstream_t* fs = m_data->logMessage()->logger()->m_typedConfigurations->fileStream( m_data->logMessage()->level()); if (fs != nullptr) { fs->write(logLine.c_str(), logLine.size()); if (fs->fail()) { ELPP_INTERNAL_ERROR("Unable to write log to file [" << m_data->logMessage()->logger()->m_typedConfigurations->filename(m_data->logMessage()->level()) << "].\n" << "Few possible reasons (could be something else):\n" << " * Permission denied\n" << " * Disk full\n" << " * Disk is not writable", true); } else { if (ELPP->hasFlag(LoggingFlag::ImmediateFlush) || (m_data->logMessage()->logger()->isFlushNeeded(m_data->logMessage()->level()))) { m_data->logMessage()->logger()->flush(m_data->logMessage()->level(), fs); } } } else { ELPP_INTERNAL_ERROR("Log file for [" << LevelHelper::convertToString(m_data->logMessage()->level()) << "] " << "has not been configured but [TO_FILE] is configured to TRUE. [Logger ID: " << m_data->logMessage()->logger()->id() << "]", false); } } if (m_data->logMessage()->logger()->m_typedConfigurations->toStandardOutput(m_data->logMessage()->level())) { if (ELPP->hasFlag(LoggingFlag::ColoredTerminalOutput)) m_data->logMessage()->logger()->logBuilder()->convertToColoredOutput(&logLine, m_data->logMessage()->level()); ELPP_COUT << ELPP_COUT_LINE(logLine); } } #if defined(ELPP_SYSLOG) else if (m_data->dispatchAction() == base::DispatchAction::SysLog) { // Determine syslog priority int sysLogPriority = 0; if (m_data->logMessage()->level() == Level::Fatal) sysLogPriority = LOG_EMERG; else if (m_data->logMessage()->level() == Level::Error) sysLogPriority = LOG_ERR; else if (m_data->logMessage()->level() == Level::Warning) sysLogPriority = LOG_WARNING; else if (m_data->logMessage()->level() == Level::Info) sysLogPriority = LOG_INFO; else if (m_data->logMessage()->level() == Level::Debug) sysLogPriority = LOG_DEBUG; else sysLogPriority = LOG_NOTICE; # if defined(ELPP_UNICODE) char* line = base::utils::Str::wcharPtrToCharPtr(logLine.c_str()); syslog(sysLogPriority, "%s", line); free(line); # else syslog(sysLogPriority, "%s", logLine.c_str()); # endif } #endif // defined(ELPP_SYSLOG) } #if ELPP_ASYNC_LOGGING // AsyncLogDispatchCallback void AsyncLogDispatchCallback::handle(const LogDispatchData* data) { base::type::string_t logLine = data->logMessage()->logger()->logBuilder()->build(data->logMessage(), data->dispatchAction() == base::DispatchAction::NormalLog); if (data->dispatchAction() == base::DispatchAction::NormalLog && data->logMessage()->logger()->typedConfigurations()->toStandardOutput(data->logMessage()->level())) { if (ELPP->hasFlag(LoggingFlag::ColoredTerminalOutput)) data->logMessage()->logger()->logBuilder()->convertToColoredOutput(&logLine, data->logMessage()->level()); ELPP_COUT << ELPP_COUT_LINE(logLine); } // Save resources and only queue if we want to write to file otherwise just ignore handler if (data->logMessage()->logger()->typedConfigurations()->toFile(data->logMessage()->level())) { ELPP->asyncLogQueue()->push(AsyncLogItem(*(data->logMessage()), *data, logLine)); } } // AsyncDispatchWorker AsyncDispatchWorker::AsyncDispatchWorker() { setContinueRunning(false); } AsyncDispatchWorker::~AsyncDispatchWorker() { setContinueRunning(false); ELPP_INTERNAL_INFO(6, "Stopping dispatch worker - Cleaning log queue"); clean(); ELPP_INTERNAL_INFO(6, "Log queue cleaned"); } bool AsyncDispatchWorker::clean(void) { std::mutex m; std::unique_lock<std::mutex> lk(m); cv.wait(lk, [] { return !ELPP->asyncLogQueue()->empty(); }); emptyQueue(); lk.unlock(); cv.notify_one(); return ELPP->asyncLogQueue()->empty(); } void AsyncDispatchWorker::emptyQueue(void) { while (!ELPP->asyncLogQueue()->empty()) { AsyncLogItem data = ELPP->asyncLogQueue()->next(); handle(&data); base::threading::msleep(100); } } void AsyncDispatchWorker::start(void) { base::threading::msleep(5000); // 5s (why?) setContinueRunning(true); std::thread t1(&AsyncDispatchWorker::run, this); t1.join(); } void AsyncDispatchWorker::handle(AsyncLogItem* logItem) { LogDispatchData* data = logItem->data(); LogMessage* logMessage = logItem->logMessage(); Logger* logger = logMessage->logger(); base::TypedConfigurations* conf = logger->typedConfigurations(); base::type::string_t logLine = logItem->logLine(); if (data->dispatchAction() == base::DispatchAction::NormalLog) { if (conf->toFile(logMessage->level())) { base::type::fstream_t* fs = conf->fileStream(logMessage->level()); if (fs != nullptr) { fs->write(logLine.c_str(), logLine.size()); if (fs->fail()) { ELPP_INTERNAL_ERROR("Unable to write log to file [" << conf->filename(logMessage->level()) << "].\n" << "Few possible reasons (could be something else):\n" << " * Permission denied\n" << " * Disk full\n" << " * Disk is not writable", true); } else { if (ELPP->hasFlag(LoggingFlag::ImmediateFlush) || (logger->isFlushNeeded(logMessage->level()))) { logger->flush(logMessage->level(), fs); } } } else { ELPP_INTERNAL_ERROR("Log file for [" << LevelHelper::convertToString(logMessage->level()) << "] " << "has not been configured but [TO_FILE] is configured to TRUE. [Logger ID: " << logger->id() << "]", false); } } } # if defined(ELPP_SYSLOG) else if (data->dispatchAction() == base::DispatchAction::SysLog) { // Determine syslog priority int sysLogPriority = 0; if (logMessage->level() == Level::Fatal) sysLogPriority = LOG_EMERG; else if (logMessage->level() == Level::Error) sysLogPriority = LOG_ERR; else if (logMessage->level() == Level::Warning) sysLogPriority = LOG_WARNING; else if (logMessage->level() == Level::Info) sysLogPriority = LOG_INFO; else if (logMessage->level() == Level::Debug) sysLogPriority = LOG_DEBUG; else sysLogPriority = LOG_NOTICE; # if defined(ELPP_UNICODE) char* line = base::utils::Str::wcharPtrToCharPtr(logLine.c_str()); syslog(sysLogPriority, "%s", line); free(line); # else syslog(sysLogPriority, "%s", logLine.c_str()); # endif } # endif // defined(ELPP_SYSLOG) } void AsyncDispatchWorker::run(void) { while (continueRunning()) { emptyQueue(); base::threading::msleep(10); // 10ms } } #endif // ELPP_ASYNC_LOGGING // DefaultLogBuilder base::type::string_t DefaultLogBuilder::build(const LogMessage* logMessage, bool appendNewLine) const { base::TypedConfigurations* tc = logMessage->logger()->typedConfigurations(); const base::LogFormat* logFormat = &tc->logFormat(logMessage->level()); base::type::string_t logLine = logFormat->format(); char buff[base::consts::kSourceFilenameMaxLength + base::consts::kSourceLineMaxLength] = ""; const char* bufLim = buff + sizeof(buff); if (logFormat->hasFlag(base::FormatFlags::AppName)) { // App name base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kAppNameFormatSpecifier, logMessage->logger()->parentApplicationName()); } if (logFormat->hasFlag(base::FormatFlags::ThreadId)) { // Thread ID base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kThreadIdFormatSpecifier, ELPP->getThreadName(base::threading::getCurrentThreadId())); } if (logFormat->hasFlag(base::FormatFlags::DateTime)) { // DateTime base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kDateTimeFormatSpecifier, base::utils::DateTime::getDateTime(logFormat->dateTimeFormat().c_str(), &tc->subsecondPrecision(logMessage->level()))); } if (logFormat->hasFlag(base::FormatFlags::Function)) { // Function base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFunctionFormatSpecifier, logMessage->func()); } if (logFormat->hasFlag(base::FormatFlags::File)) { // File base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength); base::utils::File::buildStrippedFilename(logMessage->file().c_str(), buff); base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFileFormatSpecifier, std::string(buff)); } if (logFormat->hasFlag(base::FormatFlags::FileBase)) { // FileBase base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength); base::utils::File::buildBaseFilename(logMessage->file(), buff); base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogFileBaseFormatSpecifier, std::string(buff)); } if (logFormat->hasFlag(base::FormatFlags::Line)) { // Line char* buf = base::utils::Str::clearBuff(buff, base::consts::kSourceLineMaxLength); buf = base::utils::Str::convertAndAddToBuff(logMessage->line(), base::consts::kSourceLineMaxLength, buf, bufLim, false); base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogLineFormatSpecifier, std::string(buff)); } if (logFormat->hasFlag(base::FormatFlags::Location)) { // Location char* buf = base::utils::Str::clearBuff(buff, base::consts::kSourceFilenameMaxLength + base::consts::kSourceLineMaxLength); base::utils::File::buildStrippedFilename(logMessage->file().c_str(), buff); buf = base::utils::Str::addToBuff(buff, buf, bufLim); buf = base::utils::Str::addToBuff(":", buf, bufLim); buf = base::utils::Str::convertAndAddToBuff(logMessage->line(), base::consts::kSourceLineMaxLength, buf, bufLim, false); base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kLogLocationFormatSpecifier, std::string(buff)); } if (logMessage->level() == Level::Verbose && logFormat->hasFlag(base::FormatFlags::VerboseLevel)) { // Verbose level char* buf = base::utils::Str::clearBuff(buff, 1); buf = base::utils::Str::convertAndAddToBuff(logMessage->verboseLevel(), 1, buf, bufLim, false); base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kVerboseLevelFormatSpecifier, std::string(buff)); } if (logFormat->hasFlag(base::FormatFlags::LogMessage)) { // Log message base::utils::Str::replaceFirstWithEscape(logLine, base::consts::kMessageFormatSpecifier, logMessage->message()); } #if !defined(ELPP_DISABLE_CUSTOM_FORMAT_SPECIFIERS) for (std::vector<CustomFormatSpecifier>::const_iterator it = ELPP->customFormatSpecifiers()->begin(); it != ELPP->customFormatSpecifiers()->end(); ++it) { std::string fs(it->formatSpecifier()); base::type::string_t wcsFormatSpecifier(fs.begin(), fs.end()); base::utils::Str::replaceFirstWithEscape(logLine, wcsFormatSpecifier, it->resolver()(logMessage)); } #endif // !defined(ELPP_DISABLE_CUSTOM_FORMAT_SPECIFIERS) if (appendNewLine) logLine += ELPP_LITERAL("\n"); return logLine; } // LogDispatcher void LogDispatcher::dispatch(void) { if (m_proceed && m_dispatchAction == base::DispatchAction::None) { m_proceed = false; } if (!m_proceed) { return; } base::threading::ScopedLock scopedLock(ELPP->lock()); base::TypedConfigurations* tc = m_logMessage.logger()->m_typedConfigurations; if (ELPP->hasFlag(LoggingFlag::StrictLogFileSizeCheck)) { tc->validateFileRolling(m_logMessage.level(), ELPP->preRollOutCallback()); } LogDispatchCallback* callback = nullptr; LogDispatchData data; for (const std::pair<std::string, base::type::LogDispatchCallbackPtr>& h : ELPP->m_logDispatchCallbacks) { callback = h.second.get(); if (callback != nullptr && callback->enabled()) { data.setLogMessage(&m_logMessage); data.setDispatchAction(m_dispatchAction); callback->handle(&data); } } } // MessageBuilder void MessageBuilder::initialize(Logger* logger) { m_logger = logger; m_containerLogSeperator = ELPP->hasFlag(LoggingFlag::NewLineForContainer) ? ELPP_LITERAL("\n ") : ELPP_LITERAL(", "); } MessageBuilder& MessageBuilder::operator<<(const wchar_t* msg) { if (msg == nullptr) { m_logger->stream() << base::consts::kNullPointer; return *this; } # if defined(ELPP_UNICODE) m_logger->stream() << msg; # else char* buff_ = base::utils::Str::wcharPtrToCharPtr(msg); m_logger->stream() << buff_; free(buff_); # endif if (ELPP->hasFlag(LoggingFlag::AutoSpacing)) { m_logger->stream() << " "; } return *this; } // Writer Writer& Writer::construct(Logger* logger, bool needLock) { m_logger = logger; initializeLogger(logger->id(), false, needLock); m_messageBuilder.initialize(m_logger); return *this; } Writer& Writer::construct(int count, const char* loggerIds, ...) { if (ELPP->hasFlag(LoggingFlag::MultiLoggerSupport)) { va_list loggersList; va_start(loggersList, loggerIds); const char* id = loggerIds; for (int i = 0; i < count; ++i) { m_loggerIds.push_back(std::string(id)); id = va_arg(loggersList, const char*); } va_end(loggersList); initializeLogger(m_loggerIds.at(0)); } else { initializeLogger(std::string(loggerIds)); } m_messageBuilder.initialize(m_logger); return *this; } void Writer::initializeLogger(const std::string& loggerId, bool lookup, bool needLock) { if (lookup) { m_logger = ELPP->registeredLoggers()->get(loggerId, ELPP->hasFlag(LoggingFlag::CreateLoggerAutomatically)); } if (m_logger == nullptr) { ELPP->acquireLock(); if (!ELPP->registeredLoggers()->has(std::string(base::consts::kDefaultLoggerId))) { // Somehow default logger has been unregistered. Not good! Register again ELPP->registeredLoggers()->get(std::string(base::consts::kDefaultLoggerId)); } ELPP->releaseLock(); // Need to unlock it for next writer Writer(Level::Debug, m_file, m_line, m_func).construct(1, base::consts::kDefaultLoggerId) << "Logger [" << loggerId << "] is not registered yet!"; m_proceed = false; } else { if (needLock) { m_logger->acquireLock(); // This should not be unlocked by checking m_proceed because // m_proceed can be changed by lines below } if (ELPP->hasFlag(LoggingFlag::HierarchicalLogging)) { m_proceed = m_level == Level::Verbose ? m_logger->enabled(m_level) : LevelHelper::castToInt(m_level) >= LevelHelper::castToInt(ELPP->m_loggingLevel); } else { m_proceed = m_logger->enabled(m_level); } } } void Writer::processDispatch() { #if ELPP_LOGGING_ENABLED if (ELPP->hasFlag(LoggingFlag::MultiLoggerSupport)) { bool firstDispatched = false; base::type::string_t logMessage; std::size_t i = 0; do { if (m_proceed) { if (firstDispatched) { m_logger->stream() << logMessage; } else { firstDispatched = true; if (m_loggerIds.size() > 1) { logMessage = m_logger->stream().str(); } } triggerDispatch(); } else if (m_logger != nullptr) { m_logger->stream().str(ELPP_LITERAL("")); m_logger->releaseLock(); } if (i + 1 < m_loggerIds.size()) { initializeLogger(m_loggerIds.at(i + 1)); } } while (++i < m_loggerIds.size()); } else { if (m_proceed) { triggerDispatch(); } else if (m_logger != nullptr) { m_logger->stream().str(ELPP_LITERAL("")); m_logger->releaseLock(); } } #else if (m_logger != nullptr) { m_logger->stream().str(ELPP_LITERAL("")); m_logger->releaseLock(); } #endif // ELPP_LOGGING_ENABLED } void Writer::triggerDispatch(void) { if (m_proceed) { base::LogDispatcher(m_proceed, LogMessage(m_level, m_file, m_line, m_func, m_verboseLevel, m_logger), m_dispatchAction).dispatch(); } if (m_logger != nullptr) { m_logger->stream().str(ELPP_LITERAL("")); m_logger->releaseLock(); } if (m_proceed && m_level == Level::Fatal && !ELPP->hasFlag(LoggingFlag::DisableApplicationAbortOnFatalLog)) { base::Writer(Level::Warning, m_file, m_line, m_func).construct(1, base::consts::kDefaultLoggerId) << "Aborting application. Reason: Fatal log at [" << m_file << ":" << m_line << "]"; std::stringstream reasonStream; reasonStream << "Fatal log at [" << m_file << ":" << m_line << "]" << " If you wish to disable 'abort on fatal log' please use " << "el::Helpers::addFlag(el::LoggingFlag::DisableApplicationAbortOnFatalLog)"; base::utils::abort(1, reasonStream.str()); } m_proceed = false; } // PErrorWriter PErrorWriter::~PErrorWriter(void) { if (m_proceed) { #if ELPP_COMPILER_MSVC char buff[256]; strerror_s(buff, 256, errno); m_logger->stream() << ": " << buff << " [" << errno << "]"; #else m_logger->stream() << ": " << strerror(errno) << " [" << errno << "]"; #endif } } // PerformanceTracker #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) PerformanceTracker::PerformanceTracker(const std::string& blockName, base::TimestampUnit timestampUnit, const std::string& loggerId, bool scopedLog, Level level) : m_blockName(blockName), m_timestampUnit(timestampUnit), m_loggerId(loggerId), m_scopedLog(scopedLog), m_level(level), m_hasChecked(false), m_lastCheckpointId(std::string()), m_enabled(false) { #if !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED // We store it locally so that if user happen to change configuration by the end of scope // or before calling checkpoint, we still depend on state of configuraton at time of construction el::Logger* loggerPtr = ELPP->registeredLoggers()->get(loggerId, false); m_enabled = loggerPtr != nullptr && loggerPtr->m_typedConfigurations->performanceTracking(m_level); if (m_enabled) { base::utils::DateTime::gettimeofday(&m_startTime); } #endif // !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED } PerformanceTracker::~PerformanceTracker(void) { #if !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED if (m_enabled) { base::threading::ScopedLock scopedLock(lock()); if (m_scopedLog) { base::utils::DateTime::gettimeofday(&m_endTime); base::type::string_t formattedTime = getFormattedTimeTaken(); PerformanceTrackingData data(PerformanceTrackingData::DataType::Complete); data.init(this); data.m_formattedTimeTaken = formattedTime; PerformanceTrackingCallback* callback = nullptr; for (const std::pair<std::string, base::type::PerformanceTrackingCallbackPtr>& h : ELPP->m_performanceTrackingCallbacks) { callback = h.second.get(); if (callback != nullptr && callback->enabled()) { callback->handle(&data); } } } } #endif // !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) } void PerformanceTracker::checkpoint(const std::string& id, const char* file, base::type::LineNumber line, const char* func) { #if !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED if (m_enabled) { base::threading::ScopedLock scopedLock(lock()); base::utils::DateTime::gettimeofday(&m_endTime); base::type::string_t formattedTime = m_hasChecked ? getFormattedTimeTaken(m_lastCheckpointTime) : ELPP_LITERAL(""); PerformanceTrackingData data(PerformanceTrackingData::DataType::Checkpoint); data.init(this); data.m_checkpointId = id; data.m_file = file; data.m_line = line; data.m_func = func; data.m_formattedTimeTaken = formattedTime; PerformanceTrackingCallback* callback = nullptr; for (const std::pair<std::string, base::type::PerformanceTrackingCallbackPtr>& h : ELPP->m_performanceTrackingCallbacks) { callback = h.second.get(); if (callback != nullptr && callback->enabled()) { callback->handle(&data); } } base::utils::DateTime::gettimeofday(&m_lastCheckpointTime); m_hasChecked = true; m_lastCheckpointId = id; } #endif // !defined(ELPP_DISABLE_PERFORMANCE_TRACKING) && ELPP_LOGGING_ENABLED ELPP_UNUSED(id); ELPP_UNUSED(file); ELPP_UNUSED(line); ELPP_UNUSED(func); } const base::type::string_t PerformanceTracker::getFormattedTimeTaken(struct timeval startTime) const { if (ELPP->hasFlag(LoggingFlag::FixedTimeFormat)) { base::type::stringstream_t ss; ss << base::utils::DateTime::getTimeDifference(m_endTime, startTime, m_timestampUnit) << " " << base::consts::kTimeFormats[static_cast<base::type::EnumType> (m_timestampUnit)].unit; return ss.str(); } return base::utils::DateTime::formatTime(base::utils::DateTime::getTimeDifference(m_endTime, startTime, m_timestampUnit), m_timestampUnit); } #endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_PERFORMANCE_TRACKING) namespace debug { #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) // StackTrace StackTrace::StackTraceEntry::StackTraceEntry(std::size_t index, const char* loc, const char* demang, const char* hex, const char* addr) { m_index = index; m_location = std::string(loc); m_demangled = std::string(demang); m_hex = std::string(hex); m_addr = std::string(addr); } std::ostream& operator<<(std::ostream& ss, const StackTrace::StackTraceEntry& si) { ss << "[" << si.m_index << "] " << si.m_location << (si.m_demangled.empty() ? "" : ":") << si.m_demangled << (si.m_hex.empty() ? "" : "+") << si.m_hex << si.m_addr; return ss; } std::ostream& operator<<(std::ostream& os, const StackTrace& st) { std::vector<StackTrace::StackTraceEntry>::const_iterator it = st.m_stack.begin(); while (it != st.m_stack.end()) { os << " " << *it++ << "\n"; } return os; } void StackTrace::generateNew(void) { #if ELPP_STACKTRACE m_stack.clear(); void* stack[kMaxStack]; unsigned int size = backtrace(stack, kMaxStack); char** strings = backtrace_symbols(stack, size); if (size > kStackStart) { // Skip StackTrace c'tor and generateNew for (std::size_t i = kStackStart; i < size; ++i) { char* mangName = nullptr; char* hex = nullptr; char* addr = nullptr; for (char* c = strings[i]; *c; ++c) { switch (*c) { case '(': mangName = c; break; case '+': hex = c; break; case ')': addr = c; break; default: break; } } // Perform demangling if parsed properly if (mangName != nullptr && hex != nullptr && addr != nullptr && mangName < hex) { *mangName++ = '\0'; *hex++ = '\0'; *addr++ = '\0'; int status = 0; char* demangName = abi::__cxa_demangle(mangName, 0, 0, &status); // if demangling is successful, output the demangled function name if (status == 0) { // Success (see http://gcc.gnu.org/onlinedocs/libstdc++/libstdc++-html-USERS-4.3/a01696.html) StackTraceEntry entry(i - 1, strings[i], demangName, hex, addr); m_stack.push_back(entry); } else { // Not successful - we will use mangled name StackTraceEntry entry(i - 1, strings[i], mangName, hex, addr); m_stack.push_back(entry); } free(demangName); } else { StackTraceEntry entry(i - 1, strings[i]); m_stack.push_back(entry); } } } free(strings); #else ELPP_INTERNAL_INFO(1, "Stacktrace generation not supported for selected compiler"); #endif // ELPP_STACKTRACE } // Static helper functions static std::string crashReason(int sig) { std::stringstream ss; bool foundReason = false; for (int i = 0; i < base::consts::kCrashSignalsCount; ++i) { if (base::consts::kCrashSignals[i].numb == sig) { ss << "Application has crashed due to [" << base::consts::kCrashSignals[i].name << "] signal"; if (ELPP->hasFlag(el::LoggingFlag::LogDetailedCrashReason)) { ss << std::endl << " " << base::consts::kCrashSignals[i].brief << std::endl << " " << base::consts::kCrashSignals[i].detail; } foundReason = true; } } if (!foundReason) { ss << "Application has crashed due to unknown signal [" << sig << "]"; } return ss.str(); } /// @brief Logs reason of crash from sig static void logCrashReason(int sig, bool stackTraceIfAvailable, Level level, const char* logger) { std::stringstream ss; ss << "CRASH HANDLED; "; ss << crashReason(sig); #if ELPP_STACKTRACE if (stackTraceIfAvailable) { ss << std::endl << " ======= Backtrace: =========" << std::endl << base::debug::StackTrace(); } #else ELPP_UNUSED(stackTraceIfAvailable); #endif // ELPP_STACKTRACE ELPP_WRITE_LOG(el::base::Writer, level, base::DispatchAction::NormalLog, logger) << ss.str(); } static inline void crashAbort(int sig) { base::utils::abort(sig, std::string()); } /// @brief Default application crash handler /// /// @detail This function writes log using 'default' logger, prints stack trace for GCC based compilers and aborts program. static inline void defaultCrashHandler(int sig) { base::debug::logCrashReason(sig, true, Level::Fatal, base::consts::kDefaultLoggerId); base::debug::crashAbort(sig); } // CrashHandler CrashHandler::CrashHandler(bool useDefault) { if (useDefault) { setHandler(defaultCrashHandler); } } void CrashHandler::setHandler(const Handler& cHandler) { m_handler = cHandler; #if defined(ELPP_HANDLE_SIGABRT) int i = 0; // SIGABRT is at base::consts::kCrashSignals[0] #else int i = 1; #endif // defined(ELPP_HANDLE_SIGABRT) for (; i < base::consts::kCrashSignalsCount; ++i) { m_handler = signal(base::consts::kCrashSignals[i].numb, cHandler); } } #endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) } // namespace debug } // namespace base // el // Helpers #if defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) void Helpers::crashAbort(int sig, const char* sourceFile, unsigned int long line) { std::stringstream ss; ss << base::debug::crashReason(sig).c_str(); ss << " - [Called el::Helpers::crashAbort(" << sig << ")]"; if (sourceFile != nullptr && strlen(sourceFile) > 0) { ss << " - Source: " << sourceFile; if (line > 0) ss << ":" << line; else ss << " (line number not specified)"; } base::utils::abort(sig, ss.str()); } void Helpers::logCrashReason(int sig, bool stackTraceIfAvailable, Level level, const char* logger) { el::base::debug::logCrashReason(sig, stackTraceIfAvailable, level, logger); } #endif // defined(ELPP_FEATURE_ALL) || defined(ELPP_FEATURE_CRASH_LOG) // Loggers Logger* Loggers::getLogger(const std::string& identity, bool registerIfNotAvailable) { base::threading::ScopedLock scopedLock(ELPP->lock()); return ELPP->registeredLoggers()->get(identity, registerIfNotAvailable); } void Loggers::setDefaultLogBuilder(el::LogBuilderPtr& logBuilderPtr) { ELPP->registeredLoggers()->setDefaultLogBuilder(logBuilderPtr); } bool Loggers::unregisterLogger(const std::string& identity) { base::threading::ScopedLock scopedLock(ELPP->lock()); return ELPP->registeredLoggers()->remove(identity); } bool Loggers::hasLogger(const std::string& identity) { base::threading::ScopedLock scopedLock(ELPP->lock()); return ELPP->registeredLoggers()->has(identity); } Logger* Loggers::reconfigureLogger(Logger* logger, const Configurations& configurations) { if (!logger) return nullptr; logger->configure(configurations); return logger; } Logger* Loggers::reconfigureLogger(const std::string& identity, const Configurations& configurations) { return Loggers::reconfigureLogger(Loggers::getLogger(identity), configurations); } Logger* Loggers::reconfigureLogger(const std::string& identity, ConfigurationType configurationType, const std::string& value) { Logger* logger = Loggers::getLogger(identity); if (logger == nullptr) { return nullptr; } logger->configurations()->set(Level::Global, configurationType, value); logger->reconfigure(); return logger; } void Loggers::reconfigureAllLoggers(const Configurations& configurations) { for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->begin(); it != ELPP->registeredLoggers()->end(); ++it) { Loggers::reconfigureLogger(it->second, configurations); } } void Loggers::reconfigureAllLoggers(Level level, ConfigurationType configurationType, const std::string& value) { for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->begin(); it != ELPP->registeredLoggers()->end(); ++it) { Logger* logger = it->second; logger->configurations()->set(level, configurationType, value); logger->reconfigure(); } } void Loggers::setDefaultConfigurations(const Configurations& configurations, bool reconfigureExistingLoggers) { ELPP->registeredLoggers()->setDefaultConfigurations(configurations); if (reconfigureExistingLoggers) { Loggers::reconfigureAllLoggers(configurations); } } const Configurations* Loggers::defaultConfigurations(void) { return ELPP->registeredLoggers()->defaultConfigurations(); } const base::LogStreamsReferenceMap* Loggers::logStreamsReference(void) { return ELPP->registeredLoggers()->logStreamsReference(); } base::TypedConfigurations Loggers::defaultTypedConfigurations(void) { return base::TypedConfigurations( ELPP->registeredLoggers()->defaultConfigurations(), ELPP->registeredLoggers()->logStreamsReference()); } std::vector<std::string>* Loggers::populateAllLoggerIds(std::vector<std::string>* targetList) { targetList->clear(); for (base::RegisteredLoggers::iterator it = ELPP->registeredLoggers()->list().begin(); it != ELPP->registeredLoggers()->list().end(); ++it) { targetList->push_back(it->first); } return targetList; } void Loggers::configureFromGlobal(const char* globalConfigurationFilePath) { std::ifstream gcfStream(globalConfigurationFilePath, std::ifstream::in); ELPP_ASSERT(gcfStream.is_open(), "Unable to open global configuration file [" << globalConfigurationFilePath << "] for parsing."); std::string line = std::string(); std::stringstream ss; Logger* logger = nullptr; auto configure = [&](void) { ELPP_INTERNAL_INFO(8, "Configuring logger: '" << logger->id() << "' with configurations \n" << ss.str() << "\n--------------"); Configurations c; c.parseFromText(ss.str()); logger->configure(c); }; while (gcfStream.good()) { std::getline(gcfStream, line); ELPP_INTERNAL_INFO(1, "Parsing line: " << line); base::utils::Str::trim(line); if (Configurations::Parser::isComment(line)) continue; Configurations::Parser::ignoreComments(&line); base::utils::Str::trim(line); if (line.size() > 2 && base::utils::Str::startsWith(line, std::string(base::consts::kConfigurationLoggerId))) { if (!ss.str().empty() && logger != nullptr) { configure(); } ss.str(std::string("")); line = line.substr(2); base::utils::Str::trim(line); if (line.size() > 1) { ELPP_INTERNAL_INFO(1, "Getting logger: '" << line << "'"); logger = getLogger(line); } } else { ss << line << "\n"; } } if (!ss.str().empty() && logger != nullptr) { configure(); } } bool Loggers::configureFromArg(const char* argKey) { #if defined(ELPP_DISABLE_CONFIGURATION_FROM_PROGRAM_ARGS) ELPP_UNUSED(argKey); #else if (!Helpers::commandLineArgs()->hasParamWithValue(argKey)) { return false; } configureFromGlobal(Helpers::commandLineArgs()->getParamValue(argKey)); #endif // defined(ELPP_DISABLE_CONFIGURATION_FROM_PROGRAM_ARGS) return true; } void Loggers::flushAll(void) { ELPP->registeredLoggers()->flushAll(); } void Loggers::setVerboseLevel(base::type::VerboseLevel level) { ELPP->vRegistry()->setLevel(level); } base::type::VerboseLevel Loggers::verboseLevel(void) { return ELPP->vRegistry()->level(); } void Loggers::setVModules(const char* modules) { if (ELPP->vRegistry()->vModulesEnabled()) { ELPP->vRegistry()->setModules(modules); } } void Loggers::clearVModules(void) { ELPP->vRegistry()->clearModules(); } // VersionInfo const std::string VersionInfo::version(void) { return std::string("9.95.0"); } /// @brief Release date of current version const std::string VersionInfo::releaseDate(void) { return std::string("02-08-2017 2312hrs"); } } // namespace el
1
12,128
Maybe let's take this opportunity to change easylogging to a git submodule?
stellar-stellar-core
c
@@ -30,6 +30,9 @@ func extractFrontMatter(input string) (map[string]interface{}, string, error) { } firstLine := input[firstLineStart:firstLineEnd] + // ensure residue windows newline is removed + firstLine = strings.Trim(firstLine, "\r") + // see what kind of front matter there is, if any var closingFence string var fmParser func([]byte) (map[string]interface{}, error)
1
package templates import ( "encoding/json" "fmt" "strings" "unicode" "github.com/naoina/toml" "gopkg.in/yaml.v2" ) func extractFrontMatter(input string) (map[string]interface{}, string, error) { // get the bounds of the first non-empty line var firstLineStart, firstLineEnd int lineEmpty := true for i, b := range input { if b == '\n' { firstLineStart = firstLineEnd if firstLineStart > 0 { firstLineStart++ // skip newline character } firstLineEnd = i if !lineEmpty { break } continue } lineEmpty = lineEmpty && unicode.IsSpace(b) } firstLine := input[firstLineStart:firstLineEnd] // see what kind of front matter there is, if any var closingFence string var fmParser func([]byte) (map[string]interface{}, error) switch firstLine { case yamlFrontMatterFenceOpen: fmParser = yamlFrontMatter closingFence = yamlFrontMatterFenceClose case tomlFrontMatterFenceOpen: fmParser = tomlFrontMatter closingFence = tomlFrontMatterFenceClose case jsonFrontMatterFenceOpen: fmParser = jsonFrontMatter closingFence = jsonFrontMatterFenceClose default: // no recognized front matter; whole document is body return nil, input, nil } // find end of front matter fmEndFenceStart := strings.Index(input[firstLineEnd:], "\n"+closingFence) if fmEndFenceStart < 0 { return nil, "", fmt.Errorf("unterminated front matter") } fmEndFenceStart += firstLineEnd + 1 // add 1 to account for newline // extract and parse front matter frontMatter := input[firstLineEnd:fmEndFenceStart] fm, err := fmParser([]byte(frontMatter)) if err != nil { return nil, "", err } // the rest is the body body := input[fmEndFenceStart+len(closingFence):] return fm, body, nil } func yamlFrontMatter(input []byte) (map[string]interface{}, error) { m := make(map[string]interface{}) err := yaml.Unmarshal(input, &m) return m, err } func tomlFrontMatter(input []byte) (map[string]interface{}, error) { m := make(map[string]interface{}) err := toml.Unmarshal(input, &m) return m, err } func jsonFrontMatter(input []byte) (map[string]interface{}, error) { input = append([]byte{'{'}, input...) input = append(input, '}') m := make(map[string]interface{}) err := json.Unmarshal(input, &m) return m, err } type parsedMarkdownDoc struct { Meta map[string]interface{} `json:"meta,omitempty"` Body string `json:"body,omitempty"` } const ( yamlFrontMatterFenceOpen, yamlFrontMatterFenceClose = "---", "---" tomlFrontMatterFenceOpen, tomlFrontMatterFenceClose = "+++", "+++" jsonFrontMatterFenceOpen, jsonFrontMatterFenceClose = "{", "}" )
1
15,018
Should we just be generous and elide all extra whitespace? `strings.TrimSpace`
caddyserver-caddy
go
@@ -900,18 +900,6 @@ public class SalesforceSDKManager { return context.getString(getSalesforceR().stringAccountType()); } - /** - * Indicates whether the app is running on a tablet. - * - * @return True if the application is running on a tablet. - */ - public static boolean isTablet() { - if ((INSTANCE.context.getResources().getConfiguration().screenLayout & Configuration.SCREENLAYOUT_SIZE_MASK) == Configuration.SCREENLAYOUT_SIZE_XLARGE) { - return true; - } - return false; - } - @Override public String toString() { final StringBuilder sb = new StringBuilder();
1
/* * Copyright (c) 2014, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.app; import android.accounts.Account; import android.accounts.AccountManager; import android.accounts.AccountManagerCallback; import android.accounts.AccountManagerFuture; import android.annotation.TargetApi; import android.app.Activity; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.content.pm.PackageInfo; import android.content.pm.PackageManager.NameNotFoundException; import android.content.res.Configuration; import android.content.res.Resources; import android.os.AsyncTask; import android.os.Build; import android.os.SystemClock; import android.provider.Settings; import android.util.Log; import android.webkit.CookieManager; import android.webkit.CookieSyncManager; import com.salesforce.androidsdk.accounts.UserAccount; import com.salesforce.androidsdk.accounts.UserAccountManager; import com.salesforce.androidsdk.auth.AuthenticatorService; import com.salesforce.androidsdk.auth.HttpAccess; import com.salesforce.androidsdk.auth.OAuth2; import com.salesforce.androidsdk.config.AdminPermsManager; import com.salesforce.androidsdk.config.AdminSettingsManager; import com.salesforce.androidsdk.config.BootConfig; import com.salesforce.androidsdk.config.LoginServerManager; import com.salesforce.androidsdk.push.PushMessaging; import com.salesforce.androidsdk.push.PushNotificationInterface; import com.salesforce.androidsdk.rest.ClientManager; import com.salesforce.androidsdk.rest.ClientManager.LoginOptions; import com.salesforce.androidsdk.security.Encryptor; import com.salesforce.androidsdk.security.PRNGFixes; import com.salesforce.androidsdk.security.PasscodeManager; import com.salesforce.androidsdk.ui.AccountSwitcherActivity; import com.salesforce.androidsdk.ui.LoginActivity; import com.salesforce.androidsdk.ui.PasscodeActivity; import com.salesforce.androidsdk.ui.SalesforceR; import com.salesforce.androidsdk.util.EventsObservable; import com.salesforce.androidsdk.util.EventsObservable.EventType; import java.net.URI; import java.util.List; /** * This class serves as an interface to the various * functions of the Salesforce SDK. In order to use the SDK, * your app must first instantiate the singleton SalesforceSDKManager * object by calling the static init() method. After calling init(), * use the static getInstance() method to access the * singleton SalesforceSDKManager object. */ @SuppressWarnings("deprecation") public class SalesforceSDKManager { /** * Current version of this SDK. */ public static final String SDK_VERSION = "4.1.0.unstable"; /** * Default app name. */ private static final String DEFAULT_APP_DISPLAY_NAME = "Salesforce"; /** * Instance of the SalesforceSDKManager to use for this process. */ protected static SalesforceSDKManager INSTANCE; /** * Timeout value for push un-registration. */ private static final int PUSH_UNREGISTER_TIMEOUT_MILLIS = 30000; protected Context context; protected KeyInterface keyImpl; protected LoginOptions loginOptions; protected Class<? extends Activity> mainActivityClass; protected Class<? extends Activity> loginActivityClass = LoginActivity.class; protected Class<? extends PasscodeActivity> passcodeActivityClass = PasscodeActivity.class; protected Class<? extends AccountSwitcherActivity> switcherActivityClass = AccountSwitcherActivity.class; private String encryptionKey; private SalesforceR salesforceR = new SalesforceR(); private PasscodeManager passcodeManager; private LoginServerManager loginServerManager; private boolean isTestRun = false; private boolean isLoggingOut = false; private AdminSettingsManager adminSettingsManager; private AdminPermsManager adminPermsManager; private PushNotificationInterface pushNotificationInterface; private String uid; // device id private volatile boolean loggedOut = false; /** * PasscodeManager object lock. */ private Object passcodeManagerLock = new Object(); /** * Returns a singleton instance of this class. * * @return Singleton instance of SalesforceSDKManager. */ public static SalesforceSDKManager getInstance() { if (INSTANCE != null) { return INSTANCE; } else { throw new RuntimeException("Applications need to call SalesforceSDKManager.init() first."); } } /** * Protected constructor. * @param context Application context. * @param keyImpl Implementation for KeyInterface. * @param mainActivity Activity that should be launched after the login flow. * @param loginActivity Login activity. */ protected SalesforceSDKManager(Context context, KeyInterface keyImpl, Class<? extends Activity> mainActivity, Class<? extends Activity> loginActivity) { this.uid = Settings.Secure.getString(context.getContentResolver(), Settings.Secure.ANDROID_ID); this.context = context; this.keyImpl = keyImpl; this.mainActivityClass = mainActivity; if (loginActivity != null) { this.loginActivityClass = loginActivity; } } /** * Returns the class for the main activity. * * @return The class for the main activity. */ public Class<? extends Activity> getMainActivityClass() { return mainActivityClass; } /** * Returns the class for the account switcher activity. * * @return The class for the account switcher activity. */ public Class<? extends AccountSwitcherActivity> getAccountSwitcherActivityClass() { return switcherActivityClass; } /** * Returns the class for the account switcher activity. * * @return The class for the account switcher activity. */ public void setAccountSwitcherActivityClass(Class<? extends AccountSwitcherActivity> activity) { if (activity != null) { switcherActivityClass = activity; } } public interface KeyInterface { /** * Defines a single function for retrieving the key * associated with a given name. * * For the given name, this function must return the same key * even when the application is restarted. The value this * function returns must be Base64 encoded. * * {@link Encryptor#isBase64Encoded(String)} can be used to * determine whether the generated key is Base64 encoded. * * {@link Encryptor#hash(String, String)} can be used to * generate a Base64 encoded string. * * For example: * <code> * Encryptor.hash(name + "12s9adfgret=6235inkasd=012", name + "12kl0dsakj4-cuygsdf625wkjasdol8"); * </code> * * @param name The name associated with the key. * @return The key used for encrypting salts and keys. */ public String getKey(String name); } /** * For the given name, this function must return the same key * even when the application is restarted. The value this * function returns must be Base64 encoded. * * {@link Encryptor#isBase64Encoded(String)} can be used to * determine whether the generated key is Base64 encoded. * * {@link Encryptor#hash(String, String)} can be used to * generate a Base64 encoded string. * * For example: * <code> * Encryptor.hash(name + "12s9adfgret=6235inkasd=012", name + "12kl0dsakj4-cuygsdf625wkjasdol8"); * </code> * * @param name The name associated with the key. * @return The key used for encrypting salts and keys. */ public String getKey(String name) { String key = null; if (keyImpl != null) { key = keyImpl.getKey(name); } return key; } /** * Before Mobile SDK 1.3, SalesforceSDK was packaged as a jar, and each project had to provide * a subclass of SalesforceR. * * Since 1.3, SalesforceSDK is packaged as a library project, so the SalesforceR subclass is no longer needed. * @return SalesforceR object which allows reference to resources living outside the SDK. */ public SalesforceR getSalesforceR() { return salesforceR; } /** * Returns the class of the activity used to perform the login process and create the account. * * @return the class of the activity used to perform the login process and create the account. */ public Class<? extends Activity> getLoginActivityClass() { return loginActivityClass; } /** * Returns login options associated with the app. * * @return LoginOptions instance. */ public LoginOptions getLoginOptions() { if (loginOptions == null) { final BootConfig config = BootConfig.getBootConfig(context); loginOptions = new LoginOptions(null, getPasscodeHash(), config.getOauthRedirectURI(), config.getRemoteAccessConsumerKey(), config.getOauthScopes()); } return loginOptions; } /** * For internal use only. Initializes required components. * @param context Application context. * @param keyImpl Implementation of KeyInterface. * @param mainActivity Activity to be launched after the login flow. * @param loginActivity Login activity. */ private static void init(Context context, KeyInterface keyImpl, Class<? extends Activity> mainActivity, Class<? extends Activity> loginActivity) { if (INSTANCE == null) { INSTANCE = new SalesforceSDKManager(context, keyImpl, mainActivity, loginActivity); } initInternal(context); } /** * For internal use by Salesforce Mobile SDK or by subclasses * of SalesforceSDKManager. Initializes required components. * * @param context Application context. */ public static void initInternal(Context context) { // Applies PRNG fixes for certain older versions of Android. PRNGFixes.apply(); // Initializes the encryption module. Encryptor.init(context); // Initializes the HTTP client. HttpAccess.init(context, INSTANCE.getUserAgent()); // Upgrades to the latest version. SalesforceSDKUpgradeManager.getInstance().upgrade(); EventsObservable.get().notifyEvent(EventType.AppCreateComplete); } /** * Initializes required components. Native apps must call one overload of * this method before using the Salesforce Mobile SDK. * * @param context Application context. * @param keyImpl Implementation of KeyInterface. * @param mainActivity Activity that should be launched after the login flow. */ public static void initNative(Context context, KeyInterface keyImpl, Class<? extends Activity> mainActivity) { SalesforceSDKManager.init(context, keyImpl, mainActivity, LoginActivity.class); } /** * Initializes required components. Native apps must call one overload of * this method before using the Salesforce Mobile SDK. * * @param context Application context. * @param keyImpl Implementation of KeyInterface. * @param mainActivity Activity that should be launched after the login flow. * @param loginActivity Login activity. */ public static void initNative(Context context, KeyInterface keyImpl, Class<? extends Activity> mainActivity, Class<? extends Activity> loginActivity) { SalesforceSDKManager.init(context, keyImpl, mainActivity, loginActivity); } /** * Sets a custom passcode activity class to be used instead of the default class. * The custom class must subclass PasscodeActivity. * * @param activity Subclass of PasscodeActivity. */ public void setPasscodeActivity(Class<? extends PasscodeActivity> activity) { if (activity != null) { passcodeActivityClass = activity; } } /** * Returns the descriptor of the passcode activity class that's currently in use. * * @return Passcode activity class descriptor. */ public Class<? extends PasscodeActivity> getPasscodeActivity() { return passcodeActivityClass; } /** * Indicates whether the SDK should automatically log out when the * access token is revoked. If you override this method to return * false, your app is responsible for handling its own cleanup when the * access token is revoked. * * @return True if the SDK should automatically logout. */ public boolean shouldLogoutWhenTokenRevoked() { return true; } /** * Returns the application context. * * @return Application context. */ public Context getAppContext() { return context; } /** * Returns the login server manager associated with SalesforceSDKManager. * * @return LoginServerManager instance. */ public synchronized LoginServerManager getLoginServerManager() { if (loginServerManager == null) { loginServerManager = new LoginServerManager(context); } return loginServerManager; } /** * Sets a receiver that handles received push notifications. * * @param pnInterface Implementation of PushNotificationInterface. */ public synchronized void setPushNotificationReceiver(PushNotificationInterface pnInterface) { pushNotificationInterface = pnInterface; } /** * Returns the receiver that's configured to handle incoming push notifications. * * @return Configured implementation of PushNotificationInterface. */ public synchronized PushNotificationInterface getPushNotificationReceiver() { return pushNotificationInterface; } /** * Returns the passcode manager that's associated with SalesforceSDKManager. * * @return PasscodeManager instance. */ public PasscodeManager getPasscodeManager() { synchronized (passcodeManagerLock) { if (passcodeManager == null) { passcodeManager = new PasscodeManager(context); } return passcodeManager; } } /** * Returns the user account manager that's associated with SalesforceSDKManager. * * @return UserAccountManager instance. */ public UserAccountManager getUserAccountManager() { return UserAccountManager.getInstance(); } /** * Returns the administrator settings manager that's associated with SalesforceSDKManager. * * @return AdminSettingsManager instance. */ public synchronized AdminSettingsManager getAdminSettingsManager() { if (adminSettingsManager == null) { adminSettingsManager = new AdminSettingsManager(); } return adminSettingsManager; } /** * Returns the administrator permissions manager that's associated with SalesforceSDKManager. * * @return AdminPermsManager instance. */ public synchronized AdminPermsManager getAdminPermsManager() { if (adminPermsManager == null) { adminPermsManager = new AdminPermsManager(); } return adminPermsManager; } /** * Changes the passcode to a new value. * * @param oldPass Old passcode. * @param newPass New passcode. */ public synchronized void changePasscode(String oldPass, String newPass) { if (!isNewPasscode(oldPass, newPass)) { return; } // Resets the cached encryption key, since the passcode has changed. encryptionKey = null; ClientManager.changePasscode(oldPass, newPass); } /** * Indicates whether the new passcode is different from the old passcode. * * @param oldPass Old passcode. * @param newPass New passcode. * @return True if the new passcode is different from the old passcode. */ protected boolean isNewPasscode(String oldPass, String newPass) { return !((oldPass == null && newPass == null) || (oldPass != null && newPass != null && oldPass.trim().equals(newPass.trim()))); } /** * Returns the encryption key being used. * * @param actualPass Passcode. * @return Encryption key for passcode. */ public synchronized String getEncryptionKeyForPasscode(String actualPass) { if (actualPass != null && !actualPass.trim().equals("")) { return actualPass; } if (encryptionKey == null) { encryptionKey = getPasscodeManager().hashForEncryption(""); } return encryptionKey; } /** * Returns the app display name used by the passcode dialog. * * @return App display string. */ public String getAppDisplayString() { return DEFAULT_APP_DISPLAY_NAME; } /** * Returns the passcode hash being used. * * @return The hashed passcode, or null if it's not required. */ public String getPasscodeHash() { return getPasscodeManager().getPasscodeHash(); } /** * Returns the name of the application (as defined in AndroidManifest.xml). * * @return The name of the application. */ public String getApplicationName() { return context.getPackageManager().getApplicationLabel(context.getApplicationInfo()).toString(); } /** * Checks if network connectivity exists. * * @return True if a network connection is available. */ public boolean hasNetwork() { return HttpAccess.DEFAULT.hasNetwork(); } /** * Cleans up cached credentials and data. * * @param frontActivity Front activity. * @param account Account. */ protected void cleanUp(Activity frontActivity, Account account) { final List<UserAccount> users = getUserAccountManager().getAuthenticatedUsers(); // Finishes front activity if specified, and if this is the last account. if (frontActivity != null && (users == null || users.size() <= 1)) { frontActivity.finish(); } /* * Checks how many accounts are left that are authenticated. If only one * account is left, this is the account that is being removed. In this * case, we can safely reset passcode manager, admin prefs, and encryption keys. * Otherwise, we don't reset passcode manager and admin prefs since * there might be other accounts on that same org, and these policies * are stored at the org level. */ if (users == null || users.size() <= 1) { getAdminSettingsManager().resetAll(); getAdminPermsManager().resetAll(); adminSettingsManager = null; adminPermsManager = null; getPasscodeManager().reset(context); passcodeManager = null; encryptionKey = null; UUIDManager.resetUuids(); } } /** * Starts login flow if user account has been removed. */ protected void startLoginPage() { // Clears cookies. removeAllCookies(); // Restarts the application. final Intent i = new Intent(context, getMainActivityClass()); i.setPackage(getAppContext().getPackageName()); i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK); context.startActivity(i); } /** * Starts account switcher activity if an account has been removed. */ public void startSwitcherActivityIfRequired() { // Clears cookies. removeAllCookies(); /* * If the number of accounts remaining is 0, shows the login page. * If the number of accounts remaining is 1, switches to that user * automatically. If there is more than 1 account logged in, shows * the account switcher screen, so that the user can pick which * account to switch to. */ final UserAccountManager userAccMgr = getUserAccountManager(); final List<UserAccount> accounts = userAccMgr.getAuthenticatedUsers(); if (accounts == null || accounts.size() == 0) { startLoginPage(); } else if (accounts.size() == 1) { userAccMgr.switchToUser(accounts.get(0)); } else { final Intent i = new Intent(context, switcherActivityClass); i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK); context.startActivity(i); } } /** * Unregisters from push notifications for both GCM (Android) and SFDC, and waits either for * unregistration to complete or for the operation to time out. The timeout period is defined * in PUSH_UNREGISTER_TIMEOUT_MILLIS. * * If timeout occurs while the user is logged in, this method attempts to unregister the push * unregistration receiver, and then removes the user's account. * * @param clientMgr ClientManager instance. * @param showLoginPage True - if the login page should be shown, False - otherwise. * @param refreshToken Refresh token. * @param clientId Client ID. * @param loginServer Login server. * @param account Account instance. * @param frontActivity Front activity. */ private void unregisterPush(final ClientManager clientMgr, final boolean showLoginPage, final String refreshToken, final String clientId, final String loginServer, final Account account, final Activity frontActivity) { final IntentFilter intentFilter = new IntentFilter(PushMessaging.UNREGISTERED_ATTEMPT_COMPLETE_EVENT); final BroadcastReceiver pushUnregisterReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (intent.getAction().equals(PushMessaging.UNREGISTERED_ATTEMPT_COMPLETE_EVENT)) { postPushUnregister(this, clientMgr, showLoginPage, refreshToken, clientId, loginServer, account, frontActivity); } } }; getAppContext().registerReceiver(pushUnregisterReceiver, intentFilter); // Unregisters from notifications on logout. final UserAccount userAcc = getUserAccountManager().buildUserAccount(account); PushMessaging.unregister(context, userAcc); /* * Starts a background thread to wait up to the timeout period. If * another thread has already performed logout, we exit immediately. */ (new Thread() { public void run() { long startTime = System.currentTimeMillis(); while ((System.currentTimeMillis() - startTime) < PUSH_UNREGISTER_TIMEOUT_MILLIS && !loggedOut) { // Waits for half a second at a time. SystemClock.sleep(500); } postPushUnregister(pushUnregisterReceiver, clientMgr, showLoginPage, refreshToken, clientId, loginServer, account, frontActivity); }; }).start(); } /** * This method is called either when unregistration for push notifications * is complete and the user has logged out, or when a timeout occurs while waiting. * If the user has not logged out, this method attempts to unregister the push * notification unregistration receiver, and then removes the user's account. * * @param pushReceiver Broadcast receiver. * @param clientMgr ClientManager instance. * @param showLoginPage True - if the login page should be shown, False - otherwise. * @param refreshToken Refresh token. * @param clientId Client ID. * @param loginServer Login server. * @param account Account instance. * @param frontActivity Front activity. */ private synchronized void postPushUnregister(BroadcastReceiver pushReceiver, final ClientManager clientMgr, final boolean showLoginPage, final String refreshToken, final String clientId, final String loginServer, final Account account, Activity frontActivity) { if (!loggedOut) { try { context.unregisterReceiver(pushReceiver); } catch (Exception e) { Log.e("SalesforceSDKManager:postPushUnregister", "Exception occurred while un-registering.", e); } removeAccount(clientMgr, showLoginPage, refreshToken, clientId, loginServer, account, frontActivity); } } /** * Destroys the stored authentication credentials (removes the account). * * @param frontActivity Front activity. */ public void logout(Activity frontActivity) { logout(frontActivity, true); } /** * Destroys the stored authentication credentials (removes the account). * * @param account Account. * @param frontActivity Front activity. */ public void logout(Account account, Activity frontActivity) { logout(account, frontActivity, true); } /** * Destroys the stored authentication credentials (removes the account) * and, if requested, restarts the app. * * @param frontActivity Front activity. * @param showLoginPage If true, displays the login page after removing the account. */ public void logout(Activity frontActivity, final boolean showLoginPage) { final ClientManager clientMgr = new ClientManager(context, getAccountType(), null, shouldLogoutWhenTokenRevoked()); final Account account = clientMgr.getAccount(); logout(account, frontActivity, showLoginPage); } /** * Destroys the stored authentication credentials (removes the account) * and, if requested, restarts the app. * * @param account Account. * @param frontActivity Front activity. * @param showLoginPage If true, displays the login page after removing the account. */ public void logout(Account account, Activity frontActivity, final boolean showLoginPage) { final ClientManager clientMgr = new ClientManager(context, getAccountType(), null, shouldLogoutWhenTokenRevoked()); isLoggingOut = true; final AccountManager mgr = AccountManager.get(context); String refreshToken = null; String clientId = null; String loginServer = null; if (account != null) { String passcodeHash = getPasscodeHash(); refreshToken = SalesforceSDKManager.decryptWithPasscode(mgr.getPassword(account), passcodeHash); clientId = SalesforceSDKManager.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_CLIENT_ID), passcodeHash); loginServer = SalesforceSDKManager.decryptWithPasscode(mgr.getUserData(account, AuthenticatorService.KEY_INSTANCE_URL), passcodeHash); } /* * Makes a call to un-register from push notifications, only * if the refresh token is available. */ final UserAccount userAcc = getUserAccountManager().buildUserAccount(account); if (PushMessaging.isRegistered(context, userAcc) && refreshToken != null) { loggedOut = false; unregisterPush(clientMgr, showLoginPage, refreshToken, clientId, loginServer, account, frontActivity); } else { removeAccount(clientMgr, showLoginPage, refreshToken, clientId, loginServer, account, frontActivity); } } /** * Removes the account upon logout. * * @param clientMgr ClientManager instance. * @param showLoginPage If true, displays the login page after removing the account. * @param refreshToken Refresh token. * @param clientId Client ID. * @param loginServer Login server. * @param account Account instance. * @param frontActivity Front activity. */ private void removeAccount(ClientManager clientMgr, final boolean showLoginPage, String refreshToken, String clientId, String loginServer, Account account, Activity frontActivity) { loggedOut = true; cleanUp(frontActivity, account); /* * Removes the existing account, if any. 'account == null' does not * guarantee that there are no accounts to remove. In the 'Forgot Passcode' * flow there could be accounts to remove, but we don't have them, since * we don't have the passcode hash to decrypt them. Hence, we query * AccountManager directly here and remove the accounts for the case * where 'account == null'. If AccountManager doesn't have accounts * either, then there's nothing to do. */ if (account == null) { final AccountManager accMgr = AccountManager.get(context); if (accMgr != null) { final Account[] accounts = accMgr.getAccountsByType(getAccountType()); if (accounts.length > 0) { for (int i = 0; i < accounts.length - 1; i++) { clientMgr.removeAccounts(accounts); } clientMgr.removeAccountAsync(accounts[accounts.length - 1], new AccountManagerCallback<Boolean>() { @Override public void run(AccountManagerFuture<Boolean> arg0) { notifyLogoutComplete(showLoginPage); } }); } else { notifyLogoutComplete(showLoginPage); } } else { notifyLogoutComplete(showLoginPage); } } else { clientMgr.removeAccountAsync(account, new AccountManagerCallback<Boolean>() { @Override public void run(AccountManagerFuture<Boolean> arg0) { notifyLogoutComplete(showLoginPage); } }); } isLoggingOut = false; // Revokes the existing refresh token. if (shouldLogoutWhenTokenRevoked() && account != null && refreshToken != null) { new RevokeTokenTask(refreshToken, clientId, loginServer).execute(); } } private void notifyLogoutComplete(boolean showLoginPage) { EventsObservable.get().notifyEvent(EventType.LogoutComplete); if (showLoginPage) { startSwitcherActivityIfRequired(); } } /** * Returns a user agent string based on the Mobile SDK version. The user agent takes the following form: * SalesforceMobileSDK/{salesforceSDK version} android/{android OS version} appName/appVersion {Native|Hybrid} uid_{device id} * * @return The user agent string to use for all requests. */ public final String getUserAgent() { return getUserAgent(""); } public final String getUserAgent(String qualifier) { String appName = ""; String appVersion = ""; try { PackageInfo packageInfo = context.getPackageManager().getPackageInfo(context.getPackageName(), 0); appName = context.getString(packageInfo.applicationInfo.labelRes); appVersion = packageInfo.versionName; } catch (NameNotFoundException e) { Log.w("SalesforceSDKManager:getUserAgent", e); } catch (Resources.NotFoundException nfe) { // A test harness such as Gradle does NOT have an application name. Log.w("SalesforceSDKManager:getUserAgent", nfe); } String appTypeWithQualifier = getAppType() + qualifier; return String.format("SalesforceMobileSDK/%s android mobile/%s (%s) %s/%s %s uid_%s", SDK_VERSION, Build.VERSION.RELEASE, Build.MODEL, appName, appVersion, appTypeWithQualifier, uid); } /** * @return app type as String */ public String getAppType() { return "Native"; } /** * Indicates whether the application is a hybrid application. * * @return True if this is a hybrid application. */ public boolean isHybrid() { return false; } /** * Returns the authentication account type (which should match authenticator.xml). * * @return Account type string. */ public String getAccountType() { return context.getString(getSalesforceR().stringAccountType()); } /** * Indicates whether the app is running on a tablet. * * @return True if the application is running on a tablet. */ public static boolean isTablet() { if ((INSTANCE.context.getResources().getConfiguration().screenLayout & Configuration.SCREENLAYOUT_SIZE_MASK) == Configuration.SCREENLAYOUT_SIZE_XLARGE) { return true; } return false; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append(this.getClass()).append(": {\n") .append(" accountType: ").append(getAccountType()).append("\n") .append(" userAgent: ").append(getUserAgent()).append("\n") .append(" mainActivityClass: ").append(getMainActivityClass()).append("\n") .append(" isFileSystemEncrypted: ").append(Encryptor.isFileSystemEncrypted()).append("\n"); if (passcodeManager != null) { // passcodeManager may be null at startup if the app is running in debug mode. sb.append(" hasStoredPasscode: ").append(passcodeManager.hasStoredPasscode(context)).append("\n"); } sb.append("}\n"); return sb.toString(); } /** * Encrypts the given data using the given passcode as the encryption key. * * @param data Data to be encrypted. * @param passcode Encryption key. * @return Encrypted data. */ public static String encryptWithPasscode(String data, String passcode) { return Encryptor.encrypt(data, SalesforceSDKManager.INSTANCE.getEncryptionKeyForPasscode(passcode)); } /** * Decrypts the given data using the given passcode as the decryption key. * * @param data Data to be decrypted. * @param passcode Decryption key. * @return Decrypted data. */ public static String decryptWithPasscode(String data, String passcode) { return Encryptor.decrypt(data, SalesforceSDKManager.INSTANCE.getEncryptionKeyForPasscode(passcode)); } /** * Asynchronous task for revoking the refresh token on logout. * * @author bhariharan */ private class RevokeTokenTask extends AsyncTask<Void, Void, Void> { private String refreshToken; private String clientId; private String loginServer; public RevokeTokenTask(String refreshToken, String clientId, String loginServer) { this.refreshToken = refreshToken; this.clientId = clientId; this.loginServer = loginServer; } @Override protected Void doInBackground(Void... nothings) { try { OAuth2.revokeRefreshToken(HttpAccess.DEFAULT, new URI(loginServer), clientId, refreshToken); } catch (Exception e) { Log.w("SalesforceSDKManager:revokeToken", e); } return null; } } /** * Retrieves a property value that indicates whether the current run is a test run. * * @return True if the current run is a test run. */ public boolean getIsTestRun() { return INSTANCE.isTestRun; } /** * Sets a property that indicates whether the current run is a test run. * * @param isTestRun True if the current run is a test run. */ public void setIsTestRun(boolean isTestRun) { INSTANCE.isTestRun = isTestRun; } /** * Retrieves a property value that indicates whether logout is in progress. * * @return True if logout is in progress. */ public boolean isLoggingOut() { return isLoggingOut; } /** * @return ClientManager */ public ClientManager getClientManager() { return new ClientManager(getAppContext(), getAccountType(), getLoginOptions(), true); } @TargetApi(Build.VERSION_CODES.LOLLIPOP) public void removeAllCookies() { /* * TODO: Remove this conditional once 'minApi >= 21'. */ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { CookieManager.getInstance().removeAllCookies(null); } else { CookieSyncManager.createInstance(context); CookieManager.getInstance().removeAllCookie(); } } @TargetApi(Build.VERSION_CODES.LOLLIPOP) public void removeSessionCookies() { /* * TODO: Remove this conditional once 'minApi >= 21'. */ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { CookieManager.getInstance().removeSessionCookies(null); } else { CookieSyncManager.createInstance(context); CookieManager.getInstance().removeSessionCookie(); } } @TargetApi(Build.VERSION_CODES.LOLLIPOP) public void syncCookies() { /* * TODO: Remove this conditional once 'minApi >= 21'. */ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { CookieManager.getInstance().flush(); } else { CookieSyncManager.createInstance(context); CookieSyncManager.getInstance().sync(); } } }
1
15,025
This is an inaccurate API that has outlived it's purpose. We can't make the determination of phone (vs) tablet, with a 7" screen in the mix. Also, with the advent of fragments, this API means very little now. It's not being used anywhere, since we switched to `ActionBar`.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -230,7 +230,7 @@ public class GridLauncherV3 { } configureLogging(common.getLog(), common.getDebug()); - log.info(version()); + log.finest(version()); return true; }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.grid.selenium; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableMap; import com.beust.jcommander.JCommander; import org.openqa.grid.common.GridRole; import org.openqa.grid.internal.cli.CommonCliOptions; import org.openqa.grid.internal.cli.GridHubCliOptions; import org.openqa.grid.internal.cli.GridNodeCliOptions; import org.openqa.grid.internal.cli.StandaloneCliOptions; import org.openqa.grid.internal.utils.SelfRegisteringRemote; import org.openqa.grid.internal.utils.configuration.GridHubConfiguration; import org.openqa.grid.internal.utils.configuration.GridNodeConfiguration; import org.openqa.grid.internal.utils.configuration.StandaloneConfiguration; import org.openqa.grid.shared.Stoppable; import org.openqa.grid.web.Hub; import org.openqa.selenium.BuildInfo; import org.openqa.selenium.grid.log.TerseFormatter; import org.openqa.selenium.net.PortProber; import org.openqa.selenium.remote.server.SeleniumServer; import org.openqa.selenium.remote.server.log.LoggingOptions; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; import java.util.Map; import java.util.Optional; import java.util.logging.ConsoleHandler; import java.util.logging.FileHandler; import java.util.logging.Handler; import java.util.logging.Level; import java.util.logging.Logger; public class GridLauncherV3 { private static final Logger log = Logger.getLogger(GridLauncherV3.class.getName()); private static final BuildInfo buildInfo = new BuildInfo(); private PrintStream out; @FunctionalInterface private interface GridItemLauncher { Stoppable launch(String[] args); } private Map<GridRole, GridItemLauncher> LAUNCHERS = buildLaunchers(); public static void main(String[] args) { new GridLauncherV3().launch(args); } public GridLauncherV3() { this(System.out); } @VisibleForTesting public GridLauncherV3(PrintStream out) { this.out = out; System.setProperty("org.seleniumhq.jetty9.LEVEL", "WARN"); } public Stoppable launch(String[] args) { return Optional.ofNullable(buildLauncher(args)) .map(l -> l.launch(args)) .orElse(()->{}); } /** * From the {@code args}, builds a new {@link GridItemLauncher} and populates it properly. * * @return null if no role is found, or a properly populated {@link GridItemLauncher}. */ private GridItemLauncher buildLauncher(String[] args) { if (Arrays.asList(args).contains("-htmlSuite")) { out.println(Joiner.on("\n").join( "Download the Selenium HTML Runner from http://www.seleniumhq.org/download/ and", "use that to run your HTML suite.")); return null; } String role = "standalone"; for (int i = 0; i < args.length; i++) { if (args[i].startsWith("-role=")) { role = args[i].substring("-role=".length()); } else if (args[i].equals("-role")) { i++; // Increment, because we're going to need this. if (i < args.length) { role = args[i]; } else { role = null; // Will cause us to print the usage information. } } } GridRole gridRole = GridRole.get(role); if (gridRole == null || LAUNCHERS.get(gridRole) == null) { printInfoAboutRoles(role); return null; } return LAUNCHERS.get(gridRole); } private void printInfoAboutRoles(String roleCommandLineArg) { if (roleCommandLineArg != null) { printWrappedLine( "", "Error: the role '" + roleCommandLineArg + "' does not match a recognized server role: node/hub/standalone\n"); } else { printWrappedLine( "", "Error: -role option needs to be followed by the value that defines role of this " + "component in the grid\n"); } out.println( "Selenium server can run in one of the following roles:\n" + " hub as a hub of a Selenium grid\n" + " node as a node of a Selenium grid\n" + " standalone as a standalone server not being a part of a grid\n" + "\n" + "If -role option is omitted the server runs standalone\n"); printWrappedLine( "", "To get help on the options available for a specific role run the server" + " with -help option and the corresponding -role option value"); } private void printWrappedLine(String prefix, String msg) { printWrappedLine(prefix, msg, true); } private void printWrappedLine(String prefix, String msg, boolean first) { out.print(prefix); if (!first) { out.print(" "); } int defaultWrap = 70; int wrap = defaultWrap - prefix.length(); if (wrap > msg.length()) { out.println(msg); return; } String lineRaw = msg.substring(0, wrap); int spaceIndex = lineRaw.lastIndexOf(' '); if (spaceIndex == -1) { spaceIndex = lineRaw.length(); } String line = lineRaw.substring(0, spaceIndex); out.println(line); printWrappedLine(prefix, msg.substring(spaceIndex + 1), false); } private static void configureLogging(String log, boolean debug) { Level logLevel = debug ? Level.FINE : LoggingOptions.getDefaultLogLevel(); if (logLevel == null) { logLevel = Level.INFO; } Logger.getLogger("").setLevel(logLevel); String logFilename = log != null ? log : LoggingOptions.getDefaultLogOutFile(); if (logFilename != null) { for (Handler handler : Logger.getLogger("").getHandlers()) { if (handler instanceof ConsoleHandler) { Logger.getLogger("").removeHandler(handler); } } try { Handler logFile = new FileHandler(new File(logFilename).getAbsolutePath(), true); logFile.setFormatter(new TerseFormatter()); logFile.setLevel(logLevel); Logger.getLogger("").addHandler(logFile); } catch (IOException e) { throw new RuntimeException(e); } } else { for (Handler handler : Logger.getLogger("").getHandlers()) { if (handler instanceof ConsoleHandler) { handler.setLevel(logLevel); handler.setFormatter(new TerseFormatter()); } } } } private String version() { return String.format( "Selenium server version: %s, revision: %s", buildInfo.getReleaseLabel(), buildInfo.getBuildRevision()); } private boolean parse(String[] args, Object options, CommonCliOptions common) { JCommander commander = JCommander.newBuilder().addObject(options).build(); commander.parse(args); if (common.getVersion()) { out.println(version()); return false; } if (common.getHelp()) { StringBuilder toPrint = new StringBuilder(); commander.usage(toPrint); out.append(toPrint); return false; } configureLogging(common.getLog(), common.getDebug()); log.info(version()); return true; } private Map<GridRole, GridItemLauncher> buildLaunchers() { return ImmutableMap.<GridRole, GridItemLauncher>builder() .put(GridRole.NOT_GRID, (args) -> { StandaloneCliOptions options = new StandaloneCliOptions(); if (!parse(args, options, options.getCommonOptions())) { return ()->{}; } StandaloneConfiguration configuration = new StandaloneConfiguration(options); log.info(String.format( "Launching a standalone Selenium Server on port %s", configuration.port)); SeleniumServer server = new SeleniumServer(configuration); server.boot(); return server; }) .put(GridRole.HUB, (args) -> { GridHubCliOptions options = new GridHubCliOptions(); if (!parse(args, options, options.getCommonGridOptions().getCommonOptions())) { return ()->{}; } GridHubConfiguration configuration = new GridHubConfiguration(options); configuration.setRawArgs(args); // for grid console log.info(String.format( "Launching Selenium Grid hub on port %s", configuration.port)); Hub hub = new Hub(configuration); hub.start(); return hub; }) .put(GridRole.NODE, (args) -> { GridNodeCliOptions options = new GridNodeCliOptions(); if (!parse(args, options, options.getCommonGridOptions().getCommonOptions())) { return ()->{}; } GridNodeConfiguration configuration = new GridNodeConfiguration(options); if (configuration.port == null || configuration.port == -1) { configuration.port = PortProber.findFreePort(); } log.info(String.format( "Launching a Selenium Grid node on port %s", configuration.port)); SelfRegisteringRemote remote = new SelfRegisteringRemote(configuration); SeleniumServer server = new SeleniumServer(remote.getConfiguration()); remote.setRemoteServer(server); if (remote.startRemoteServer()) { log.info("Selenium Grid node is up and ready to register to the hub"); remote.startRegistrationProcess(); } return server; }) .build(); } }
1
16,456
This change means that users can't easily see which version of the selenium server they're using. This is `info` level information.
SeleniumHQ-selenium
java
@@ -455,8 +455,8 @@ class AuthTestCase(QuiltTestCase): def testCodeExpires(self): self.code_immediate_expire_mock = mock.patch('quilt_server.auth.CODE_TTL_DEFAULT', {'minutes': 0}) - self.code_immediate_expire_mock.start() token = self.getToken() + self.code_immediate_expire_mock.start() code_request = self.app.get( '/api/code', headers={
1
import itsdangerous import json import jwt import time import requests import unittest from unittest import mock from unittest.mock import patch from .utils import QuiltTestCase from quilt_server import app, db from quilt_server.models import Code, User from quilt_server.auth import (_create_user, _delete_user, issue_token, encode_code, decode_code, generate_uuid, verify_token_string, generate_activation_link, generate_reset_link, verify_activation_link, verify_reset_link, verify_hash ) CATALOG_URL = app.config['CATALOG_URL'] class AuthTestCase(QuiltTestCase): """ unit tests for Flask-based auth """ def setUp(self): super(AuthTestCase, self).setUp() self.TEST_USER_ID = User.query.filter_by(name=self.TEST_USER).one_or_none().id self.token_verify_mock.stop() # disable auth mock def getToken(self, username=None, password=None): username = username or self.TEST_USER password = password or self.TEST_USER_PASSWORD response = self.app.post( '/api/login', headers={'content-type': 'application/json'}, data=json.dumps({'username': username, 'password': password}) ) try: token = json.loads(response.data.decode('utf8')).get('token') except Exception as e: raise Exception(response.data.decode('utf8')) return token def useToken(self, token): return self.app.get( '/api/me', headers={ 'content-type': 'application/json', 'Authorization': token } ) def decodeToken(self, token): return jwt.decode(token, verify=False) def testCodeRoundtrips(self): code = {'id': generate_uuid(), 'code': generate_uuid()} assert code == decode_code(encode_code(code)) def testIssueToken(self): assert issue_token(User.query.filter_by(name=self.TEST_USER).one_or_none()) def testDeleteUser(self): assert User.query.filter_by(name=self.OTHER_USER).one_or_none() _delete_user(User.query.filter_by(name=self.OTHER_USER).one_or_none()) db.session.commit() assert not User.query.filter_by(name=self.OTHER_USER).one_or_none() def testUserExists(self): assert User.query.filter_by(name=self.TEST_USER).one_or_none() def testDuplicateUserFails(self): try: _create_user(self.TEST_USER, pasword=self.TEST_PASSWORD, email=self.TEST_USER_EMAIL, requires_activation=False) except: return True raise Exception('Creating duplicate user failed to raise') def testLoginUserPass(self): token = self.getToken() assert token def testVerifyTokenAsString(self): token = self.getToken() assert verify_token_string(token) def testRefreshToken(self): # try to exchange a token for a new one that expires later token = self.getToken() t = self.decodeToken(token) exp = t.get('exp') auth_headers = { 'Authorization': token, 'content-type': 'application/json' } api_root_request = self.app.get( '/api/me', headers=auth_headers ) assert api_root_request.status_code == 200 time.sleep(2) auth_headers = { 'Authorization': token, 'content-type': 'application/json' } new_token_request = self.app.post( '/api/refresh', headers=auth_headers ) new_token = json.loads(new_token_request.data.decode('utf8')).get('token') new_exp = self.decodeToken(new_token).get('exp') assert new_exp > exp # test re-creating user doesn't invalidate tokens try: _create_user(self.TEST_USER, password=self.TEST_PASSWORD, email='{user}{suf}'.format(user=self.TEST_USER, suf=self.email_suffix), requires_activation=False) except: pass auth_headers = { 'Authorization': new_token, 'content-type': 'application/json' } api_root_request = self.app.get( '/api/me', headers=auth_headers ) assert api_root_request.status_code == 200 def testActivationLink(self): link = generate_activation_link(self.TEST_USER_ID) assert verify_activation_link(link) def testResetLink(self): link = generate_reset_link(self.TEST_USER_ID) payload = verify_reset_link(link) assert payload assert payload['id'] def testLinksExpire(self): activate_link = generate_activation_link(self.TEST_USER_ID) reset_link = generate_reset_link(self.TEST_USER_ID) time.sleep(1) assert not verify_activation_link(activate_link, 0) assert not verify_reset_link(reset_link, 0) def testWrongLinksShouldFail(self): activate_link = generate_activation_link(self.TEST_USER_ID) reset_link = generate_reset_link(self.TEST_USER_ID) assert not verify_reset_link(activate_link) assert not verify_activation_link(reset_link) @patch('quilt_server.auth.send_activation_email') def testRegister(self, send_activation_email): user = 'new_user' email = '[email protected]' password = 'example_password' response = self.app.post( '/api/register', headers={'content-type': 'application/json'}, data=json.dumps( {'username': user, 'email': email, 'password': password} ) ) assert response.status_code == 200 assert send_activation_email.called user = send_activation_email.call_args[0][0] link = send_activation_email.call_args[0][1] activate_response = self.app.get( '/activate/{link}'.format(link=link) ) assert activate_response.status_code == 302 assert activate_response.location[-6:] == 'signin' def testLoginRedirectsToCode(self): response = self.app.get( '/login' ) assert response.status_code == 302 assert response.location == '{CATALOG_URL}/code'.format(CATALOG_URL=CATALOG_URL) @patch('quilt_server.auth.send_reset_email') def testReset(self, send_reset_email): user = self.TEST_USER email = '{user}{suf}'.format(user=user, suf=self.email_suffix) new_password = 'new_password' bad_password = 'bad' response = self.app.post( '/api/reset_password', headers={'content-type': 'application/json'}, data=json.dumps({'email': 'user-that-definitely-does-not-exist{suf}' .format(suf=self.email_suffix)}) ) assert response.status_code == 200 assert not send_reset_email.called token = self.getToken() assert token assert self.useToken(token).status_code == 200 response = self.app.post( '/api/reset_password', headers={'content-type': 'application/json'}, data=json.dumps({'email': email}) ) assert response.status_code == 200 assert send_reset_email.called assert self.getToken() # old password still works called_user = send_reset_email.call_args[0][0] assert called_user.name == user assert called_user.email == email reset_link = send_reset_email.call_args[0][1] reset_response = self.app.post( '/api/change_password', headers={'content-type': 'application/json'}, data=json.dumps({'link': reset_link, 'password': new_password}) ) assert reset_response.status_code == 200 assert not self.getToken() assert self.useToken(token).status_code == 401 new_password_request = self.app.post( '/api/login', headers={'content-type': 'application/json'}, data=json.dumps({'username': self.TEST_USER, 'password': new_password}) ) assert new_password_request.status_code == 200 assert json.loads(new_password_request.data.decode('utf8')).get('token') # test link doesn't work twice new_reset_response = self.app.post( '/api/change_password', headers={'content-type': 'application/json'}, data=json.dumps({'link': reset_link, 'password': bad_password}) ) assert new_reset_response.status_code != 200 assert not self.getToken(user, bad_password) @patch('quilt_server.auth.send_activation_email') def testActivate(self, send_activation_email): payload = { 'username' : 'new_user', 'password' : 'password', 'email' : '[email protected]' } new_user_request = self.app.post( '/api/register', headers={'content-type': 'application/json'}, data=json.dumps(payload) ) assert new_user_request.status_code == 200 assert send_activation_email.called assert not self.getToken(payload['username'], payload['password']) called_user = send_activation_email.call_args[0][0] activate_link = send_activation_email.call_args[0][1] activate_request = self.app.get( '/activate/{link}'.format(link=activate_link) ) assert activate_request.status_code == 302 assert activate_request.location[-6:] == 'signin' assert self.getToken(payload['username'], payload['password']) def testGetCode(self): token = self.getToken() code_request = self.app.get( '/api/code', headers={ 'Authorization': token, 'content-type': 'application/json' } ) assert code_request.status_code == 200 def testCompilerLogin(self): # get initial token token = self.getToken() # request code code_request = self.app.get( '/api/code', headers={ 'Authorization': token, 'content-type': 'application/json' } ) assert code_request.status_code == 200 code = json.loads(code_request.data.decode('utf8')).get('code') # exchange code for token token_request = self.app.post( '/api/token', data={'refresh_token': code} ) assert token_request.status_code == 200 # check code doesn't work twice token_request2 = self.app.post( '/api/token', data={'refresh_token': code} ) assert token_request2.status_code == 401 payload = json.loads(token_request.data.decode('utf8')) assert payload['access_token'] == payload['refresh_token'] assert 'expires_at' in payload old_exp = payload['expires_at'] new_token = payload['refresh_token'] # refresh token refresh_request = self.app.post( '/api/token', data={'refresh_token': new_token} ) assert refresh_request.status_code == 200 refreshed_token_payload = json.loads(refresh_request.data.decode('utf8')) assert 'expires_at' in refreshed_token_payload assert refreshed_token_payload['access_token'] == refreshed_token_payload['refresh_token'] def testDisabledandDeletedUsersCodesAndTokensAreRevoked(self): admin_token_request = self.app.post( '/api/login', data=json.dumps( {'username': self.TEST_ADMIN, 'password': self.TEST_ADMIN_PASSWORD}), headers={'content-type': 'application/json'} ) assert admin_token_request.status_code == 200 admin_token = json.loads(admin_token_request.data.decode('utf8'))['token'] def disable_user(username): request = self.app.post( '/api/users/disable', data=json.dumps({'username': username}), headers={ 'content-type': 'application/json', 'Authorization': admin_token } ) assert request.status_code == 200 def enable_user(username): request = self.app.post( '/api/users/enable', data=json.dumps({'username': username}), headers={ 'content-type': 'application/json', 'Authorization': admin_token } ) assert request.status_code == 200 def api_root(token): request = self.app.get( '/api/me', headers={ 'content-type': 'application/json', 'Authorization': token } ) return request def logout(token): request = self.app.post( '/api/logout', headers={ 'content-type': 'application/json', 'Authorization': 'Bearer ' + token } ) return request first_token = self.getToken('user1', 'user1') assert api_root(first_token).status_code == 200 disable_user('user1') assert not self.getToken('user1', 'user1') assert api_root(first_token).status_code == 401 enable_user('user1') new_token = self.getToken('user1', 'user1') assert api_root(new_token).status_code == 200 assert logout(new_token).status_code == 200 assert api_root(new_token).status_code == 401 def testMigratePasswordsWillWork(self): old_pw = 'quilt' old_pw_hash = 'pbkdf2_sha256$20000$PEZ6yGDDm4LK$Jx9/lOYmgbELXywYYrySjTkc1yBcpZM4fUjRtI8ajRA=' verify_hash(old_pw, old_pw_hash) def testBadLoginAttempt(self): username = 'asdf' password = 'jkl;asdf' response = self.app.post( '/api/login', # headers={'content-type': 'application/json'}, data=json.dumps({'username': username, 'password': password}) ) assert response.status_code == 400 def testMultipleCodes(self): token = self.getToken() def code_request(): code_request = self.app.get( '/api/code', headers={ 'Authorization': token, 'content-type': 'application/json' } ) return code_request def exchange_code_for_token(code): token_request = self.app.post( '/api/token', data={'refresh_token': code} ) return token_request def api_root(token_request): token = json.loads(token_request.data.decode('utf8')).get('access_token') request = self.app.get( '/api/me', headers={ 'content-type': 'application/json', 'Authorization': token } ) return request code1 = code_request() code1unpacked = json.loads(code1.data.decode('utf8')).get('code') code2 = code_request() code2unpacked = json.loads(code2.data.decode('utf8')).get('code') token1 = exchange_code_for_token(code1unpacked) assert token1.status_code == 200 token2 = exchange_code_for_token(code2unpacked) assert token2.status_code == 200 assert api_root(token1).status_code == 200 assert api_root(token2).status_code == 200 def testCodeExpires(self): self.code_immediate_expire_mock = mock.patch('quilt_server.auth.CODE_TTL_DEFAULT', {'minutes': 0}) self.code_immediate_expire_mock.start() token = self.getToken() code_request = self.app.get( '/api/code', headers={ 'Authorization': token, 'content-type': 'application/json' } ) assert code_request.status_code == 200 time.sleep(1) code = json.loads(code_request.data.decode('utf8')).get('code') token_request = self.app.post( '/api/token', data={'refresh_token': code} ) assert token_request.status_code == 401 self.code_immediate_expire_mock.stop()
1
16,908
You should just use `with patch(...):`, so it unpatches it automatically. Also, much simpler.
quiltdata-quilt
py
@@ -28,14 +28,15 @@ namespace MvvmCross.Navigation private IMvxViewDispatcher _viewDispatcher; public IMvxViewDispatcher ViewDispatcher { - get => _viewDispatcher ?? (IMvxViewDispatcher)MvxMainThreadDispatcher.Instance; + get => _viewDispatcher ?? (_viewDispatcher = Mvx.Resolve<IMvxViewDispatcher>()); set => _viewDispatcher = value; } private IMvxViewsContainer _viewsContainer; protected IMvxViewsContainer ViewsContainer { - get { + get + { if (_viewsContainer == null) _viewsContainer = Mvx.Resolve<IMvxViewsContainer>(); return _viewsContainer;
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MS-PL license. // See the LICENSE file in the project root for more information. using System; using System.Collections.Generic; using System.Linq; using System.Reflection; using System.Runtime.CompilerServices; using System.Text.RegularExpressions; using System.Threading; using System.Threading.Tasks; using MvvmCross.Base; using MvvmCross.Core; using MvvmCross.Exceptions; using MvvmCross.Logging; using MvvmCross.Navigation.EventArguments; using MvvmCross.Presenters.Hints; using MvvmCross.ViewModels; using MvvmCross.Views; namespace MvvmCross.Navigation { public class MvxNavigationService : IMvxNavigationService { protected readonly IMvxLog Log = Mvx.Resolve<IMvxLogProvider>().GetLogFor<MvxNavigationService>(); private IMvxViewDispatcher _viewDispatcher; public IMvxViewDispatcher ViewDispatcher { get => _viewDispatcher ?? (IMvxViewDispatcher)MvxMainThreadDispatcher.Instance; set => _viewDispatcher = value; } private IMvxViewsContainer _viewsContainer; protected IMvxViewsContainer ViewsContainer { get { if (_viewsContainer == null) _viewsContainer = Mvx.Resolve<IMvxViewsContainer>(); return _viewsContainer; } set => _viewsContainer = value; } protected static readonly Dictionary<Regex, Type> Routes = new Dictionary<Regex, Type>(); protected virtual IMvxNavigationCache NavigationCache { get; private set; } protected IMvxViewModelLoader ViewModelLoader { get; set; } protected ConditionalWeakTable<IMvxViewModel, TaskCompletionSource<object>> _tcsResults = new ConditionalWeakTable<IMvxViewModel, TaskCompletionSource<object>>(); public event BeforeNavigateEventHandler BeforeNavigate; public event AfterNavigateEventHandler AfterNavigate; public event BeforeCloseEventHandler BeforeClose; public event AfterCloseEventHandler AfterClose; public event BeforeChangePresentationEventHandler BeforeChangePresentation; public event AfterChangePresentationEventHandler AfterChangePresentation; public MvxNavigationService(IMvxNavigationCache navigationCache, IMvxViewModelLoader viewModelLoader) { NavigationCache = navigationCache; ViewModelLoader = viewModelLoader; } public static void LoadRoutes(IEnumerable<Assembly> assemblies) { Routes.Clear(); foreach(var routeAttr in assemblies.SelectMany(a => a.GetCustomAttributes<MvxNavigationAttribute>())) { Routes.Add(new Regex(routeAttr.UriRegex, RegexOptions.CultureInvariant | RegexOptions.IgnoreCase | RegexOptions.Singleline), routeAttr.ViewModelOrFacade); } } protected virtual bool TryGetRoute(string url, out KeyValuePair<Regex, Type> entry) { try { var matches = Routes.Where(t => t.Key.IsMatch(url)).ToList(); switch(matches.Count) { case 0: entry = default(KeyValuePair<Regex, Type>); Log.Trace("Unable to find routing for {0}", url); return false; case 1: entry = matches[0]; return true; } var directMatch = matches.Where(t => t.Key.Match(url).Groups.Count == 1).ToList(); if(directMatch.Count == 1) { entry = directMatch[0]; return true; } Log.Warn("The following regular expressions match the provided url ({0}), each RegEx must be unique (otherwise try using IMvxRoutingFacade): {1}", matches.Count - 1, string.Join(", ", matches.Select(t => t.Key.ToString()))); // there is more than one match entry = default(KeyValuePair<Regex, Type>); return false; } catch(Exception ex) { Log.Error("MvxNavigationService", "Unable to determine routability: {0}", ex); entry = default(KeyValuePair<Regex, Type>); return false; } } protected virtual IDictionary<string, string> BuildParamDictionary(Regex regex, Match match) { var paramDict = new Dictionary<string, string>(); for(var i = 1 /* 0 == Match itself */; i < match.Groups.Count; i++) { var group = match.Groups[i]; var name = regex.GroupNameFromNumber(i); var value = group.Value; paramDict.Add(name, value); } return paramDict; } protected virtual async Task<MvxViewModelInstanceRequest> NavigationRouteRequest(string path, IMvxBundle presentationBundle = null) { KeyValuePair<Regex, Type> entry; if(!TryGetRoute(path, out entry)) { throw new MvxException($"Navigation route request could not be obtained for path: {path}"); } var regex = entry.Key; var match = regex.Match(path); var paramDict = BuildParamDictionary(regex, match); var parameterValues = new MvxBundle(paramDict); var viewModelType = entry.Value; var request = new MvxViewModelInstanceRequest(viewModelType) { PresentationValues = presentationBundle?.SafeGetData(), ParameterValues = parameterValues?.SafeGetData() }; if(viewModelType.GetInterfaces().Contains(typeof(IMvxNavigationFacade))) { var facade = (IMvxNavigationFacade)Mvx.IocConstruct(viewModelType); try { var facadeRequest = await facade.BuildViewModelRequest(path, paramDict).ConfigureAwait(false); if(facadeRequest == null) { throw new MvxException($"{nameof(MvxNavigationService)}: Facade did not return a valid {nameof(MvxViewModelRequest)}."); } request.ViewModelType = facadeRequest.ViewModelType; if(facadeRequest.ParameterValues != null) { request.ParameterValues = facadeRequest.ParameterValues; } request.ViewModelInstance = ViewModelLoader.LoadViewModel(request, null); } catch(Exception ex) { throw ex.MvxWrap($"{nameof(MvxNavigationService)}: Exception thrown while processing URL: {path} with RoutingFacade: {viewModelType}"); } } else { request.ViewModelInstance = ViewModelLoader.LoadViewModel(request, null); } return request; } protected async Task<MvxViewModelInstanceRequest> NavigationRouteRequest<TParameter>(string path, TParameter param, IMvxBundle presentationBundle = null) { KeyValuePair<Regex, Type> entry; if(!TryGetRoute(path, out entry)) { throw new MvxException($"Navigation route request could not be obtained for path: {path}"); } var regex = entry.Key; var match = regex.Match(path); var paramDict = BuildParamDictionary(regex, match); var parameterValues = new MvxBundle(paramDict); var viewModelType = entry.Value; var request = new MvxViewModelInstanceRequest(viewModelType) { PresentationValues = presentationBundle?.SafeGetData(), ParameterValues = parameterValues?.SafeGetData() }; if(viewModelType.GetInterfaces().Contains(typeof(IMvxNavigationFacade))) { var facade = (IMvxNavigationFacade)Mvx.IocConstruct(viewModelType); try { var facadeRequest = await facade.BuildViewModelRequest(path, paramDict).ConfigureAwait(false); if(facadeRequest == null) { throw new MvxException($"{nameof(MvxNavigationService)}: Facade did not return a valid {nameof(MvxViewModelRequest)}."); } request.ViewModelType = facadeRequest.ViewModelType; if(facadeRequest.ParameterValues != null) { request.ParameterValues = facadeRequest.ParameterValues; } request.ViewModelInstance = ViewModelLoader.LoadViewModel(request, param, null); } catch(Exception ex) { ex.MvxWrap($"{nameof(MvxNavigationService)}: Exception thrown while processing URL: {path} with RoutingFacade: {viewModelType}"); } } else { request.ViewModelInstance = ViewModelLoader.LoadViewModel(request, param, null); } return request; } public virtual Task<bool> CanNavigate(string path) { return Task.FromResult(TryGetRoute(path, out KeyValuePair<Regex, Type> entry)); } public virtual Task<bool> CanNavigate<TViewModel>() where TViewModel : IMvxViewModel { return Task.FromResult(ViewsContainer.GetViewType(typeof(TViewModel)) != null); } public virtual Task<bool> CanNavigate(Type viewModelType) { return Task.FromResult(ViewsContainer.GetViewType(viewModelType) != null); } protected virtual async Task Navigate(MvxViewModelRequest request, IMvxViewModel viewModel, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var args = new NavigateEventArgs(viewModel); OnBeforeNavigate(this, args); ViewDispatcher.ShowViewModel(request); if(viewModel.InitializeTask?.Task != null) await viewModel.InitializeTask.Task.ConfigureAwait(false); OnAfterNavigate(this, args); } protected virtual async Task<TResult> Navigate<TResult>(MvxViewModelRequest request, IMvxViewModelResult<TResult> viewModel, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var args = new NavigateEventArgs(viewModel); OnBeforeNavigate(this, args); if(cancellationToken != default(CancellationToken)) { cancellationToken.Register(async () => { await Close(viewModel, default(TResult)); }); } var tcs = new TaskCompletionSource<object>(); viewModel.CloseCompletionSource = tcs; _tcsResults.Add(viewModel, tcs); ViewDispatcher.ShowViewModel(request); if(viewModel.InitializeTask?.Task != null) await viewModel.InitializeTask.Task.ConfigureAwait(false); OnAfterNavigate(this, args); try { return (TResult)await tcs.Task; } catch(Exception) { return default(TResult); } } protected virtual async Task<TResult> Navigate<TParameter, TResult>(MvxViewModelRequest request, IMvxViewModel<TParameter, TResult> viewModel, TParameter param, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var args = new NavigateEventArgs(viewModel); OnBeforeNavigate(this, args); if(cancellationToken != default(CancellationToken)) { cancellationToken.Register(async () => { await Close(viewModel, default(TResult)); }); } var tcs = new TaskCompletionSource<object>(); viewModel.CloseCompletionSource = tcs; _tcsResults.Add(viewModel, tcs); ViewDispatcher.ShowViewModel(request); if(viewModel.InitializeTask?.Task != null) await viewModel.InitializeTask.Task.ConfigureAwait(false); OnAfterNavigate(this, args); try { return (TResult)await tcs.Task; } catch(Exception) { return default(TResult); } } public virtual async Task Navigate(string path, IMvxBundle presentationBundle = null) { var request = await NavigationRouteRequest(path, presentationBundle).ConfigureAwait(false); await Navigate(request, request.ViewModelInstance, presentationBundle).ConfigureAwait(false); } public virtual async Task Navigate<TParameter>(string path, TParameter param, IMvxBundle presentationBundle = null) { var request = await NavigationRouteRequest(path, param, presentationBundle).ConfigureAwait(false); await Navigate(request, request.ViewModelInstance, presentationBundle).ConfigureAwait(false); } public virtual async Task<TResult> Navigate<TResult>(string path, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var request = await NavigationRouteRequest(path, presentationBundle).ConfigureAwait(false); return await Navigate<TResult>(request, (IMvxViewModelResult<TResult>)request.ViewModelInstance, presentationBundle, cancellationToken).ConfigureAwait(false); } public virtual async Task<TResult> Navigate<TParameter, TResult>(string path, TParameter param, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var request = await NavigationRouteRequest(path, param, presentationBundle).ConfigureAwait(false); return await Navigate<TParameter, TResult>(request, (IMvxViewModel<TParameter, TResult>)request.ViewModelInstance, param, presentationBundle, cancellationToken).ConfigureAwait(false); } public virtual async Task Navigate(Type viewModelType, IMvxBundle presentationBundle = null) { var request = new MvxViewModelInstanceRequest(viewModelType) { PresentationValues = presentationBundle?.SafeGetData() }; request.ViewModelInstance = ViewModelLoader.LoadViewModel(request, null); await Navigate(request, request.ViewModelInstance, presentationBundle).ConfigureAwait(false); } public virtual async Task Navigate<TParameter>(Type viewModelType, TParameter param, IMvxBundle presentationBundle = null) { var request = new MvxViewModelInstanceRequest(viewModelType) { PresentationValues = presentationBundle?.SafeGetData() }; request.ViewModelInstance = ViewModelLoader.LoadViewModel(request, param, null); await Navigate(request, request.ViewModelInstance, presentationBundle).ConfigureAwait(false); } public virtual async Task<TResult> Navigate<TResult>(Type viewModelType, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var request = new MvxViewModelInstanceRequest(viewModelType) { PresentationValues = presentationBundle?.SafeGetData() }; request.ViewModelInstance = (IMvxViewModelResult<TResult>)ViewModelLoader.LoadViewModel(request, null); return await Navigate<TResult>(request, (IMvxViewModelResult<TResult>)request.ViewModelInstance, presentationBundle, cancellationToken).ConfigureAwait(false); } public virtual async Task<TResult> Navigate<TParameter, TResult>(Type viewModelType, TParameter param, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var request = new MvxViewModelInstanceRequest(viewModelType) { PresentationValues = presentationBundle?.SafeGetData() }; request.ViewModelInstance = (IMvxViewModel<TParameter, TResult>)ViewModelLoader.LoadViewModel(request, param, null); return await Navigate<TParameter, TResult>(request, (IMvxViewModel<TParameter, TResult>)request.ViewModelInstance, param, presentationBundle, cancellationToken).ConfigureAwait(false); } public virtual Task Navigate<TViewModel>(IMvxBundle presentationBundle = null) where TViewModel : IMvxViewModel { return Navigate(typeof(TViewModel), presentationBundle); } public virtual Task Navigate<TViewModel, TParameter>(TParameter param, IMvxBundle presentationBundle = null) where TViewModel : IMvxViewModel<TParameter> { return Navigate(typeof(TViewModel), param, presentationBundle); } public virtual Task<TResult> Navigate<TViewModel, TResult>(IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) where TViewModel : IMvxViewModelResult<TResult> { return Navigate<TResult>(typeof(TViewModel), presentationBundle, cancellationToken); } public virtual Task<TResult> Navigate<TViewModel, TParameter, TResult>(TParameter param, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) where TViewModel : IMvxViewModel<TParameter, TResult> { return Navigate<TParameter, TResult>(typeof(TViewModel), param, presentationBundle, cancellationToken); } public virtual async Task Navigate(IMvxViewModel viewModel, IMvxBundle presentationBundle = null) { var request = new MvxViewModelInstanceRequest(viewModel) { PresentationValues = presentationBundle?.SafeGetData() }; ViewModelLoader.ReloadViewModel(viewModel, request, null); await Navigate(request, viewModel, presentationBundle).ConfigureAwait(false); } public virtual async Task Navigate<TParameter>(IMvxViewModel<TParameter> viewModel, TParameter param, IMvxBundle presentationBundle = null) { var request = new MvxViewModelInstanceRequest(viewModel) { PresentationValues = presentationBundle?.SafeGetData() }; ViewModelLoader.ReloadViewModel(viewModel, param, request, null); await Navigate(request, viewModel, presentationBundle).ConfigureAwait(false); } public virtual async Task<TResult> Navigate<TResult>(IMvxViewModelResult<TResult> viewModel, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var request = new MvxViewModelInstanceRequest(viewModel) { PresentationValues = presentationBundle?.SafeGetData() }; ViewModelLoader.ReloadViewModel(viewModel, request, null); return await Navigate<TResult>(request, viewModel, presentationBundle, cancellationToken).ConfigureAwait(false); } public virtual async Task<TResult> Navigate<TParameter, TResult>(IMvxViewModel<TParameter, TResult> viewModel, TParameter param, IMvxBundle presentationBundle = null, CancellationToken cancellationToken = default(CancellationToken)) { var request = new MvxViewModelInstanceRequest(viewModel) { PresentationValues = presentationBundle?.SafeGetData() }; ViewModelLoader.ReloadViewModel(viewModel, param, request, null); return await Navigate<TParameter, TResult>(request, viewModel, param, presentationBundle, cancellationToken).ConfigureAwait(false); } public virtual Task<bool> ChangePresentation(MvxPresentationHint hint) { MvxLog.Instance.Trace("Requesting presentation change"); var args = new ChangePresentationEventArgs(hint); OnBeforeChangePresentation(this, args); var result = ViewDispatcher.ChangePresentation(hint); args.Result = result; OnAfterChangePresentation(this, args); return Task.FromResult(result); } public virtual Task<bool> Close(IMvxViewModel viewModel) { var args = new NavigateEventArgs(viewModel); OnBeforeClose(this, args); var close = ViewDispatcher.ChangePresentation(new MvxClosePresentationHint(viewModel)); OnAfterClose(this, args); return Task.FromResult(close); } public virtual async Task<bool> Close<TResult>(IMvxViewModelResult<TResult> viewModel, TResult result) { _tcsResults.TryGetValue(viewModel, out TaskCompletionSource<object> _tcs); //Disable cancelation of the Task when closing ViewModel through the service viewModel.CloseCompletionSource = null; try { var closeResult = await Close(viewModel); if(closeResult) { _tcs?.TrySetResult(result); _tcsResults.Remove(viewModel); } else viewModel.CloseCompletionSource = _tcs; return closeResult; } catch(Exception ex) { _tcs?.TrySetException(ex); return false; } } protected virtual void OnBeforeNavigate(object sender, NavigateEventArgs e) { BeforeNavigate?.Invoke(sender, e); } protected virtual void OnAfterNavigate(object sender, NavigateEventArgs e) { AfterNavigate?.Invoke(sender, e); } protected virtual void OnBeforeClose(object sender, NavigateEventArgs e) { BeforeClose?.Invoke(sender, e); } protected virtual void OnAfterClose(object sender, NavigateEventArgs e) { AfterClose?.Invoke(sender, e); } protected virtual void OnBeforeChangePresentation(object sender, ChangePresentationEventArgs e) { BeforeChangePresentation?.Invoke(sender, e); } protected virtual void OnAfterChangePresentation(object sender, ChangePresentationEventArgs e) { AfterChangePresentation?.Invoke(sender, e); } } }
1
13,990
This didn't use to work for me. Are you sure that returns the correct instance from startup?
MvvmCross-MvvmCross
.cs
@@ -65,6 +65,14 @@ public final class MethodCallExpr extends Expression implements NodeWithTypeArgu this(null, scope, new NodeList<>(), new SimpleName(name), new NodeList<>()); } + public MethodCallExpr(final Expression scope, final SimpleName name) { + this(null, scope, new NodeList<>(), name, new NodeList<>()); + } + + public MethodCallExpr(final Expression scope, final String name, final NodeList<Expression> arguments) { + this(null, scope, new NodeList<>(), new SimpleName(name), arguments); + } + public MethodCallExpr(final Expression scope, final SimpleName name, final NodeList<Expression> arguments) { this(null, scope, new NodeList<>(), name, arguments); }
1
/* * Copyright (C) 2007-2010 Júlio Vilmar Gesser. * Copyright (C) 2011, 2013-2016 The JavaParser Team. * * This file is part of JavaParser. * * JavaParser can be used either under the terms of * a) the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * b) the terms of the Apache License * * You should have received a copy of both licenses in LICENCE.LGPL and * LICENCE.APACHE. Please refer to those files for details. * * JavaParser is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. */ package com.github.javaparser.ast.expr; import com.github.javaparser.Range; import com.github.javaparser.ast.AllFieldsConstructor; import com.github.javaparser.ast.NodeList; import com.github.javaparser.ast.nodeTypes.NodeWithArguments; import com.github.javaparser.ast.nodeTypes.NodeWithOptionalScope; import com.github.javaparser.ast.nodeTypes.NodeWithSimpleName; import com.github.javaparser.ast.nodeTypes.NodeWithTypeArguments; import com.github.javaparser.ast.observer.ObservableProperty; import com.github.javaparser.ast.type.Type; import com.github.javaparser.ast.visitor.GenericVisitor; import com.github.javaparser.ast.visitor.VoidVisitor; import java.util.Arrays; import java.util.List; import java.util.Optional; import static com.github.javaparser.utils.Utils.assertNotNull; import com.github.javaparser.ast.Node; import com.github.javaparser.ast.visitor.CloneVisitor; import com.github.javaparser.metamodel.MethodCallExprMetaModel; import com.github.javaparser.metamodel.JavaParserMetaModel; import javax.annotation.Generated; /** * A method call on an object. <br/><code>circle.circumference()</code> <br/>In <code>a.&lt;String&gt;bb(15);</code> a * is the scope, String is a type argument, bb is the name and 15 is an argument. * * @author Julio Vilmar Gesser */ public final class MethodCallExpr extends Expression implements NodeWithTypeArguments<MethodCallExpr>, NodeWithArguments<MethodCallExpr>, NodeWithSimpleName<MethodCallExpr>, NodeWithOptionalScope<MethodCallExpr> { private Expression scope; private NodeList<Type> typeArguments; private SimpleName name; private NodeList<Expression> arguments; public MethodCallExpr() { this(null, null, new NodeList<>(), new SimpleName(), new NodeList<>()); } public MethodCallExpr(final Expression scope, final String name) { this(null, scope, new NodeList<>(), new SimpleName(name), new NodeList<>()); } public MethodCallExpr(final Expression scope, final SimpleName name, final NodeList<Expression> arguments) { this(null, scope, new NodeList<>(), name, arguments); } @AllFieldsConstructor public MethodCallExpr(final Expression scope, final NodeList<Type> typeArguments, final SimpleName name, final NodeList<Expression> arguments) { this(null, scope, typeArguments, name, arguments); } /**This constructor is used by the parser and is considered private.*/ @Generated("com.github.javaparser.generator.core.node.MainConstructorGenerator") public MethodCallExpr(Range range, Expression scope, NodeList<Type> typeArguments, SimpleName name, NodeList<Expression> arguments) { super(range); setScope(scope); setTypeArguments(typeArguments); setName(name); setArguments(arguments); customInitialization(); } @Override public <R, A> R accept(final GenericVisitor<R, A> v, final A arg) { return v.visit(this, arg); } @Override public <A> void accept(final VoidVisitor<A> v, final A arg) { v.visit(this, arg); } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public NodeList<Expression> getArguments() { return arguments; } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public SimpleName getName() { return name; } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public Optional<Expression> getScope() { return Optional.ofNullable(scope); } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public MethodCallExpr setArguments(final NodeList<Expression> arguments) { assertNotNull(arguments); if (arguments == this.arguments) { return (MethodCallExpr) this; } notifyPropertyChange(ObservableProperty.ARGUMENTS, this.arguments, arguments); if (this.arguments != null) this.arguments.setParentNode(null); this.arguments = arguments; setAsParentNodeOf(arguments); return this; } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public MethodCallExpr setName(final SimpleName name) { assertNotNull(name); if (name == this.name) { return (MethodCallExpr) this; } notifyPropertyChange(ObservableProperty.NAME, this.name, name); if (this.name != null) this.name.setParentNode(null); this.name = name; setAsParentNodeOf(name); return this; } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public MethodCallExpr setScope(final Expression scope) { if (scope == this.scope) { return (MethodCallExpr) this; } notifyPropertyChange(ObservableProperty.SCOPE, this.scope, scope); if (this.scope != null) this.scope.setParentNode(null); this.scope = scope; setAsParentNodeOf(scope); return this; } @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public Optional<NodeList<Type>> getTypeArguments() { return Optional.ofNullable(typeArguments); } /** * Sets the typeArguments * * @param typeArguments the typeArguments, can be null * @return this, the MethodCallExpr */ @Generated("com.github.javaparser.generator.core.node.PropertyGenerator") public MethodCallExpr setTypeArguments(final NodeList<Type> typeArguments) { if (typeArguments == this.typeArguments) { return (MethodCallExpr) this; } notifyPropertyChange(ObservableProperty.TYPE_ARGUMENTS, this.typeArguments, typeArguments); if (this.typeArguments != null) this.typeArguments.setParentNode(null); this.typeArguments = typeArguments; setAsParentNodeOf(typeArguments); return this; } @Override @Generated("com.github.javaparser.generator.core.node.GetNodeListsGenerator") public List<NodeList<?>> getNodeLists() { return Arrays.asList(getArguments(), getTypeArguments().orElse(null)); } @Override @Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator") public boolean remove(Node node) { if (node == null) return false; for (int i = 0; i < arguments.size(); i++) { if (arguments.get(i) == node) { arguments.remove(i); return true; } } if (scope != null) { if (node == scope) { removeScope(); return true; } } if (typeArguments != null) { for (int i = 0; i < typeArguments.size(); i++) { if (typeArguments.get(i) == node) { typeArguments.remove(i); return true; } } } return super.remove(node); } @Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator") public MethodCallExpr removeScope() { return setScope((Expression) null); } @Override @Generated("com.github.javaparser.generator.core.node.CloneGenerator") public MethodCallExpr clone() { return (MethodCallExpr) accept(new CloneVisitor(), null); } @Override @Generated("com.github.javaparser.generator.core.node.GetMetaModelGenerator") public MethodCallExprMetaModel getMetaModel() { return JavaParserMetaModel.methodCallExprMetaModel; } }
1
11,013
Looks good. Eventually we could remove some of these constructors, but for now adding these two seems the way to go
javaparser-javaparser
java
@@ -180,7 +180,7 @@ def parse_bwids(bwolist): return list(ast.literal_eval(bwolist)) -def get_holdingpen_objects(ptags=[]): +def get_holdingpen_objects(ptags=["Need action"]): """Get BibWorkflowObject's for display in Holding Pen. Uses DataTable naming for filtering/sorting. Work in progress.
1
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2012, 2013, 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Various utility functions for use across the workflows module.""" import msgpack from invenio.ext.cache import cache from .registry import workflows def convert_marcxml_to_bibfield(marcxml): """Return a SmartJson representation of MARC XML string. This function converts a MARCXML string to a JSON-like dictionary using the the jsonalchemy (aka. BibField) config. :param marcxml: MARCXML string to parse :type marcxml: string :return: SmartJson object. """ from invenio.modules.jsonalchemy.reader import Reader from invenio.modules.jsonalchemy.wrappers import SmartJson if isinstance(marcxml, unicode): marcxml = marcxml.encode(errors='ignore') return Reader.translate(marcxml, SmartJson, master_format='marc', namespace='recordext') class BibWorkflowObjectIdContainer(object): """Mapping from an ID to BibWorkflowObject. This class is only used to be able to store a workflow ID and to retrieve easily the workflow from this ID from another process, such as a Celery worker process. It is used mainly to avoid problems with SQLAlchemy sessions when we use different processes. """ def __init__(self, bibworkflowobject=None): """Initialize the object, optionally passing a BibWorkflowObject.""" if bibworkflowobject is not None: self.id = bibworkflowobject.id else: self.id = None def get_object(self): """Get the BibWorkflowObject from self.id.""" from .models import BibWorkflowObject if self.id is not None: return BibWorkflowObject.query.filter( BibWorkflowObject.id == self.id ).one() else: return None def from_dict(self, dict_to_process): """Take a dict with special keys and set the current id. :param dict_to_process: dict created before with to_dict() :type dict_to_process: dict :return: self, BibWorkflowObjectIdContainer. """ self.id = dict_to_process[str(self.__class__)]["id"] return self def to_dict(self): """Create a dict with special keys for later retrieval.""" return {str(self.__class__): self.__dict__} def get_workflow_definition(name): """Try to load the given workflow from the system.""" if name in workflows: return getattr(workflows[name], "workflow", None) else: from .definitions import WorkflowMissing return WorkflowMissing.workflow class dictproperty(object): """Use a dict attribute as a @property. This is a minimal descriptor class that creates a proxy object, which implements __getitem__, __setitem__ and __delitem__, passing requests through to the functions that the user provided to the dictproperty constructor. """ class _proxy(object): """The proxy object.""" def __init__(self, obj, fget, fset, fdel): """Init the proxy object.""" self._obj = obj self._fget = fget self._fset = fset self._fdel = fdel def __getitem__(self, key): """Get value from key.""" return self._fget(self._obj, key) def __setitem__(self, key, value): """Set value for key.""" self._fset(self._obj, key, value) def __delitem__(self, key): """Delete value for key.""" self._fdel(self._obj, key) def __init__(self, fget=None, fset=None, fdel=None, doc=None): """Init descriptor class.""" self._fget = fget self._fset = fset self._fdel = fdel self.__doc__ = doc def __get__(self, obj, objtype=None): """Return proxy or self.""" if obj is None: return self return self._proxy(obj, self._fget, self._fset, self._fdel) def sort_bwolist(bwolist, iSortCol_0, sSortDir_0): """Sort a list of BibWorkflowObjects for DataTables.""" should_we_reverse = False if sSortDir_0 == 'desc': should_we_reverse = True if iSortCol_0 == 0: bwolist.sort(key=lambda x: x.id, reverse=should_we_reverse) elif iSortCol_0 == 1: bwolist.sort(key=lambda x: x.id, reverse=should_we_reverse) elif iSortCol_0 == 2: bwolist.sort(key=lambda x: msgpack.loads(cache.get("workflows_holdingpen_{0}".format(x.id)))["title"], reverse=should_we_reverse) elif iSortCol_0 == 3: bwolist.sort(key=lambda x: msgpack.loads(cache.get("workflows_holdingpen_{0}".format(x.id)))["description"], reverse=should_we_reverse) elif iSortCol_0 == 4: bwolist.sort(key=lambda x: x.created, reverse=should_we_reverse) elif iSortCol_0 == 5: bwolist.sort(key=lambda x: x.version, reverse=should_we_reverse) elif iSortCol_0 == 6: bwolist.sort(key=lambda x: x.data_type, reverse=should_we_reverse) elif iSortCol_0 == 7: bwolist.sort(key=lambda x: x.version, reverse=should_we_reverse) elif iSortCol_0 == 8: bwolist.sort(key=lambda x: x.version, reverse=should_we_reverse) return bwolist def parse_bwids(bwolist): """Use ast to eval a string representing a list.""" import ast return list(ast.literal_eval(bwolist)) def get_holdingpen_objects(ptags=[]): """Get BibWorkflowObject's for display in Holding Pen. Uses DataTable naming for filtering/sorting. Work in progress. """ from .models import (BibWorkflowObject, ObjectVersion) tags_copy = ptags[:] version_showing = [] for tag in ptags: if tag in ObjectVersion.MAPPING: version_showing.append(ObjectVersion.MAPPING[tag]) tags_copy.remove(tag) ssearch = tags_copy bwobject_list = BibWorkflowObject.query.filter( BibWorkflowObject.id_parent == None # noqa E711 ).filter(not version_showing or BibWorkflowObject.version.in_( version_showing)).all() if ssearch and ssearch[0]: if not isinstance(ssearch, list): if "," in ssearch: ssearch = ssearch.split(",") else: ssearch = [ssearch] bwobject_list_tmp = [] for bwo in bwobject_list: results = { "created": get_pretty_date(bwo), "type": get_type(bwo), "title": None, "description": None } results.update(get_formatted_holdingpen_object(bwo)) if check_ssearch_over_data(ssearch, results): bwobject_list_tmp.append(bwo) bwobject_list = bwobject_list_tmp return bwobject_list def get_versions_from_tags(tags): """Return a tuple with versions from tags. :param tags: list of tags :return: tuple of (versions to show, cleaned tags list) """ from .models import ObjectVersion tags_copy = tags[:] version_showing = [] for i in range(len(tags_copy) - 1, -1, -1): if tags_copy[i] in ObjectVersion.MAPPING: version_showing.append(ObjectVersion.MAPPING[tags_copy[i]]) del tags_copy[i] return version_showing, tags_copy def get_formatted_holdingpen_object(bwo, date_format='%Y-%m-%d %H:%M:%S.%f'): """Return the formatted output, from cache if available.""" results = cache.get("workflows_holdingpen_{0}".format(bwo.id)) if results: results = msgpack.loads(cache.get("workflows_holdingpen_{0}".format(bwo.id))) if results["date"] == bwo.modified.strftime(date_format): return results results = generate_formatted_holdingpen_object(bwo) cache.set("workflows_holdingpen_{0}".format(bwo.id), msgpack.dumps(results)) return results def generate_formatted_holdingpen_object(bwo, date_format='%Y-%m-%d %H:%M:%S.%f'): """Generate a dict with formatted column data from Holding Pen object.""" from .definitions import WorkflowBase workflows_name = bwo.get_workflow_name() if workflows_name and workflows_name in workflows and \ hasattr(workflows[workflows_name], 'get_description'): workflow_definition = workflows[workflows_name] else: workflow_definition = WorkflowBase results = { "name": workflows_name, "description": workflow_definition.get_description(bwo), "title": workflow_definition.get_title(bwo), "date": bwo.modified.strftime(date_format) } return results def check_ssearch_over_data(ssearch, data): """Check for DataTables search request. Checks if the data match with one of the search tags in data. :param ssearch: list of tags used for filtering. :param data: data to check. :return: True if present, False otherwise. """ total = 0 for terms in ssearch: for datum in data: if data[datum] and terms.lower() in data[datum].lower(): total += 1 break return total == len(ssearch) def get_pretty_date(bwo): """Get the pretty date from bwo.created.""" from invenio.utils.date import pretty_date return pretty_date(bwo.created) def get_type(bwo): """Get the type of the Object.""" return bwo.data_type def get_info(bwobject): """Parse the hpobject and extracts its info to a dictionary.""" info = {} if bwobject.get_extra_data()['owner'] != {}: info['owner'] = bwobject.get_extra_data()['owner'] else: info['owner'] = 'None' info['parent id'] = bwobject.id_parent info['workflow id'] = bwobject.id_workflow info['object id'] = bwobject.id info['action'] = bwobject.get_action() return info def extract_data(bwobject): """Extract needed metadata from BibWorkflowObject. Used for rendering the Record's holdingpen table row and details and action page. """ from .models import (BibWorkflowObject, Workflow) extracted_data = {} if bwobject.id_parent is not None: extracted_data['bwparent'] = \ BibWorkflowObject.query.get(bwobject.id_parent) else: extracted_data['bwparent'] = None # TODO: read the logstuff from the db extracted_data['loginfo'] = "" extracted_data['logtext'] = {} for log in extracted_data['loginfo']: extracted_data['logtext'][log.get_extra_data()['_last_task_name']] = \ log.message extracted_data['info'] = get_info(bwobject) try: extracted_data['info']['action'] = bwobject.get_action() except (KeyError, AttributeError): pass extracted_data['w_metadata'] = \ Workflow.query.filter(Workflow.uuid == bwobject.id_workflow).first() if extracted_data['w_metadata']: workflow_def = get_workflow_definition(extracted_data['w_metadata'].name) extracted_data['workflow_func'] = workflow_def else: extracted_data['workflow_func'] = [] return extracted_data def get_action_list(object_list): """Return a dict of action names mapped to halted objects. Get a dictionary mapping from action name to number of Pending actions (i.e. halted objects). Used in the holdingpen.index page. """ from .registry import actions action_dict = {} found_actions = [] # First get a list of all to count up later for bwo in object_list: action_name = bwo.get_action() if action_name is not None: found_actions.append(action_name) # Get "real" action name only once per action for action_name in set(found_actions): if action_name not in actions: # Perhaps some old action? Use stored name. action_nicename = action_name else: action = actions[action_name] action_nicename = getattr(action, "name", action_name) action_dict[action_nicename] = found_actions.count(action_name) return action_dict def get_rendered_task_results(obj): """Return a list of rendered results from BibWorkflowObject task results.""" from flask import render_template results = [] for res in obj.get_tasks_results().values(): for result in res: results.append(render_template( result.get("template", "workflows/results/default.html"), results=result, obj=obj )) return results def get_previous_next_objects(object_list, current_object_id): """Return tuple of (previous, next) object for given Holding Pen object.""" if not object_list: return None, None try: current_index = object_list.index(current_object_id) except ValueError: # current_object_id not in object_list: return None, None try: next_object_id = object_list[current_index + 1] except IndexError: next_object_id = None try: if current_index == 0: previous_object_id = None else: previous_object_id = object_list[current_index - 1] except IndexError: previous_object_id = None return previous_object_id, next_object_id def get_func_info(func): """Retrieve a function's information.""" name = func.func_name doc = func.func_doc try: nicename = func.description except AttributeError: if doc: nicename = doc.split('\n')[0] if len(nicename) > 80: nicename = name else: nicename = name parameters = [] closure = func.func_closure varnames = func.func_code.co_freevars if closure: for index, arg in enumerate(closure): parameters.append((str(varnames[index]), str(arg.cell_contents))) return { "nicename": nicename, "doc": doc, "parameters": parameters, "name": name } def get_workflow_info(func_list): """Return function info, go through lists recursively.""" funcs = [] for item in func_list: if isinstance(item, list): funcs.append(get_workflow_info(item)) else: funcs.append(get_func_info(item)) return funcs
1
13,985
@jalavik should be have somewhere enum/list/registry of possible tags?
inveniosoftware-invenio
py
@@ -155,6 +155,7 @@ class SecurityCenterClient(object): finding.get('source_properties').get('violation_data')) raise api_errors.ApiExecutionError(violation_data, e) + # pylint: disable=logging-too-many-args def list_findings(self, source_id): """Lists all the findings in CSCC.
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wrapper for Cloud Security Command Center API client.""" from builtins import object import json from googleapiclient import errors from httplib2 import HttpLib2Error from google.cloud.forseti.common.gcp_api import _base_repository from google.cloud.forseti.common.gcp_api import api_helpers from google.cloud.forseti.common.gcp_api import errors as api_errors from google.cloud.forseti.common.gcp_api import repository_mixins from google.cloud.forseti.common.util import logger LOGGER = logger.get_logger(__name__) API_NAME = 'securitycenter' class SecurityCenterRepositoryClient(_base_repository.BaseRepositoryClient): """SecurityCenter API Respository.""" def __init__(self, quota_max_calls=None, quota_period=1.0, use_rate_limiter=True): """Constructor. Args: quota_max_calls (int): Allowed requests per <quota_period> for the API. quota_period (float): The time period to track requests over. use_rate_limiter (bool): Set to false to disable the use of a rate limiter for this service. """ LOGGER.debug('Initializing SecurityCenterRepositoryClient') if not quota_max_calls: use_rate_limiter = False self._findings = None use_versioned_discovery_doc = True super(SecurityCenterRepositoryClient, self).__init__( API_NAME, versions=['v1'], quota_max_calls=quota_max_calls, quota_period=quota_period, use_rate_limiter=use_rate_limiter, use_versioned_discovery_doc=use_versioned_discovery_doc) # Turn off docstrings for properties. # pylint: disable=missing-return-doc, missing-return-type-doc @property def findings(self): """Returns _SecurityCenterOrganizationsFindingsRepository instance.""" if not self._findings: self._findings = self._init_repository( _SecurityCenterOrganizationsFindingsRepository, version='v1') return self._findings # pylint: enable=missing-return-doc, missing-return-type-doc class _SecurityCenterOrganizationsFindingsRepository( repository_mixins.CreateQueryMixin, repository_mixins.ListQueryMixin, repository_mixins.PatchResourceMixin, _base_repository.GCPRepository): """Implementation of CSCC Organizations Findings repository.""" def __init__(self, **kwargs): """Constructor. Args: **kwargs (dict): The args to pass into GCPRepository.__init__() """ LOGGER.debug( 'Creating _SecurityCenterOrganizationsFindingsRepositoryClient') component = 'organizations.sources.findings' super(_SecurityCenterOrganizationsFindingsRepository, self).__init__( key_field='name', component=component, max_results_field='pageSize', **kwargs) class SecurityCenterClient(object): """Cloud Security Command Center Client. https://cloud.google.com/security-command-center/docs/reference/rest """ def __init__(self, api_quota): """Initialize. Args: api_quota (dict): API quota configs """ max_calls, quota_period = api_helpers.get_ratelimiter_config( api_quota, API_NAME) self.repository = SecurityCenterRepositoryClient( quota_max_calls=max_calls, quota_period=quota_period) def create_finding(self, finding, source_id=None, finding_id=None): """Creates a finding in CSCC. Args: finding (dict): Forseti violation in CSCC format. source_id (str): Unique ID assigned by CSCC, to the organization that the violations are originating from. finding_id (str): id hash of the CSCC finding Returns: dict: An API response containing one page of results. """ try: LOGGER.debug('Creating finding.') # patch() will also create findings for new violations. response = self.repository.findings.patch( '{}/findings/{}'.format(source_id, finding_id), finding ) LOGGER.debug('Successfully created finding response: %s', response) return response except (errors.HttpError, HttpLib2Error) as e: raw_error = e.args[1] error = raw_error.decode('utf-8') formatted_error = json.loads(error) error_code = formatted_error['error']['code'] if error_code == 409: LOGGER.debug('Unable to create finding. Finding already exists ' 'in CSCC. %s', finding) else: LOGGER.exception('Unable to create CSCC finding: Resource: %s', finding) violation_data = ( finding.get('source_properties').get('violation_data')) raise api_errors.ApiExecutionError(violation_data, e) def list_findings(self, source_id): """Lists all the findings in CSCC. Args: source_id (str): Unique ID assigned by CSCC, to the organization that the violations are originating from. Returns: object: An API response containing all the CSCC findings. """ response = self.repository.findings.list(parent=source_id) return response def update_finding(self, finding, finding_id, source_id=None): """Updates a finding in CSCC. Args: finding (dict): Forseti violation in CSCC format. finding_id (str): id hash of the CSCC finding. source_id (str): Unique ID assigned by CSCC, to the organization that the violations are originating from. Returns: dict: An API response containing one page of results. """ try: LOGGER.debug('Updating finding.') # patch() will set the state of outdated findings to INACTIVE response = self.repository.findings.patch( '{}/findings/{}'.format(source_id, finding_id), finding, updateMask='state,event_time') LOGGER.debug('Successfully updated finding in CSCC:\n%s', finding) return response except (errors.HttpError, HttpLib2Error) as e: LOGGER.exception('Unable to update CSCC finding: Resource: %s', finding) violation_data = ( finding.get('source_properties').get('violation_data')) raise api_errors.ApiExecutionError(violation_data, e)
1
35,485
why is this pylint disable needed?
forseti-security-forseti-security
py
@@ -44,6 +44,7 @@ def GenerateConfig(context): FORSETI_HOME = '$USER_HOME/forseti-security' POLICY_LIBRARY_HOME = '$USER_HOME/policy-library' + POLICY_LIBRARY_SYNC_ENABLED = 'false' DOWNLOAD_FORSETI = ( "git clone {src_path}.git".format(
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a GCE instance template for Forseti Security.""" def get_patch_search_expression(forseti_version): """Returns a glob expression matching all patches of the given version. TODO: Update in client/forseti-instance-client if update here. Args: forseti_version (str): Installed forseti version. Should start with 'tags/v' if patches are to be updated automatically. Returns: str: Glob expression matching all patches of given forseti_version. None: Returns None if forseti_version is not in 'tags/vX.Y.Z' format. """ if forseti_version[:6] != 'tags/v': return None segments = forseti_version.replace('tags/v', '').split('.') for segment in segments: if not segment.isdigit(): return None return 'v{}.{}.{{[0-9],[0-9][0-9]}}'.format(segments[0], segments[1]) def GenerateConfig(context): """Generate configuration.""" FORSETI_HOME = '$USER_HOME/forseti-security' POLICY_LIBRARY_HOME = '$USER_HOME/policy-library' DOWNLOAD_FORSETI = ( "git clone {src_path}.git".format( src_path=context.properties['src-path'])) patch_search_expression = get_patch_search_expression(context.properties['forseti-version']) if patch_search_expression: CHECKOUT_FORSETI_VERSION = ( """versions=$(git tag -l {patch_search_expression}) versions=(${{versions//;/ }}) for version in "${{versions[@]}}" do segments=(${{version//./ }}) patch=${{segments[2]}} patch=${{patch: 0: 2}} patch=$(echo $patch | sed 's/[^0-9]*//g') # latest_version is an array [full_version, patch_number] if !((${{#latest_version[@]}})) || ((patch > ${{latest_version[1]}})); then latest_version=($version $patch) fi done git checkout ${{latest_version[0]}}""" .format(patch_search_expression=patch_search_expression)) else: CHECKOUT_FORSETI_VERSION = ( "git checkout {forseti_version}".format( forseti_version=context.properties['forseti-version'])) CLOUDSQL_CONN_STRING = '{}:{}:{}'.format( context.env['project'], '$(ref.cloudsql-instance.region)', '$(ref.cloudsql-instance.name)') SCANNER_BUCKET = context.properties['scanner-bucket'] FORSETI_DB_NAME = context.properties['database-name'] SERVICE_ACCOUNT_SCOPES = context.properties['service-account-scopes'] FORSETI_SERVER_CONF = '{}/configs/forseti_conf_server.yaml'.format(FORSETI_HOME) EXPORT_INITIALIZE_VARS = ( 'export SQL_PORT={0}\n' 'export SQL_INSTANCE_CONN_STRING="{1}"\n' 'export FORSETI_DB_NAME="{2}"\n') EXPORT_INITIALIZE_VARS = EXPORT_INITIALIZE_VARS.format( context.properties['db-port'], CLOUDSQL_CONN_STRING, FORSETI_DB_NAME) EXPORT_FORSETI_VARS = ( 'export FORSETI_HOME={forseti_home}\n' 'export FORSETI_SERVER_CONF={forseti_server_conf}\n' 'export POLICY_LIBRARY_HOME={policy_library_home}\n' ).format(forseti_home=FORSETI_HOME, forseti_server_conf=FORSETI_SERVER_CONF, policy_library_home=POLICY_LIBRARY_HOME) RUN_FREQUENCY = context.properties['run-frequency'] resources = [] deployment_name_splitted = context.env['deployment'].split('-') deployment_name_splitted.insert(len(deployment_name_splitted)-1, 'vm') instance_name = '-'.join(deployment_name_splitted) resources.append({ 'name': instance_name, 'type': 'compute.v1.instance', 'properties': { 'zone': context.properties['zone'], 'machineType': ( 'https://www.googleapis.com/compute/v1/projects/{}' '/zones/{}/machineTypes/{}'.format( context.env['project'], context.properties['zone'], context.properties['instance-type'])), 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'boot': True, 'autoDelete': True, 'initializeParams': { 'sourceImage': ( 'https://www.googleapis.com/compute/v1' '/projects/{}/global/images/family/{}'.format( context.properties['image-project'], context.properties['image-family'] ) ) } }], 'networkInterfaces': [{ 'network': ( 'https://www.googleapis.com/compute/v1/' 'projects/{}/global/networks/{}'.format( context.properties['vpc-host-project-id'], context.properties['vpc-host-network'])), 'accessConfigs': [{ 'name': 'External NAT', 'type': 'ONE_TO_ONE_NAT' }], 'subnetwork': ( 'https://www.googleapis.com/compute/v1/' 'projects/{}/regions/{}/subnetworks/{}'.format( context.properties['vpc-host-project-id'], context.properties['region'], context.properties['vpc-host-subnetwork'])) }], 'serviceAccounts': [{ 'email': context.properties['service-account'], 'scopes': SERVICE_ACCOUNT_SCOPES, }], 'metadata': { 'items': [{ 'key': 'startup-script', 'value': """#!/bin/bash exec > /tmp/deployment.log exec 2>&1 # Ubuntu available packages refresh. sudo apt-get update -y # Install Google Cloud SDK sudo apt-get --assume-yes install google-cloud-sdk USER_HOME=/home/ubuntu # Install fluentd if necessary. FLUENTD=$(ls /usr/sbin/google-fluentd) if [ -z "$FLUENTD" ]; then cd $USER_HOME curl -sSO https://dl.google.com/cloudagents/install-logging-agent.sh bash install-logging-agent.sh fi # Install collectd if necessary. COLLECTD=$(ls /opt/stackdriver/collectd/sbin/stackdriver-collectd) if [ -z "$COLLECTD" ]; then cd $USER_HOME curl -sSO https://dl.google.com/cloudagents/install-monitoring-agent.sh bash install-monitoring-agent.sh fi # Check whether Cloud SQL proxy is installed. CLOUD_SQL_PROXY=$(which cloud_sql_proxy) if [ -z "$CLOUD_SQL_PROXY" ]; then cd $USER_HOME wget https://dl.google.com/cloudsql/cloud_sql_proxy.{cloudsql_arch} sudo mv cloud_sql_proxy.{cloudsql_arch} /usr/local/bin/cloud_sql_proxy chmod +x /usr/local/bin/cloud_sql_proxy fi # Install Forseti Security. cd $USER_HOME rm -rf *forseti* # Download Forseti source code {download_forseti} cd forseti-security # Fetch tags updates tag changes which fetch all doesn't do git fetch --tags git fetch --all {checkout_forseti_version} # Forseti Host Setup sudo apt-get install -y git unzip # Forseti host dependencies sudo apt-get install -y $(cat install/dependencies/apt_packages.txt | grep -v "#" | xargs) # Forseti dependencies python3 -m pip install -q --upgrade setuptools wheel python3 -m pip install -q --upgrade -r requirements.txt # Setup Forseti logging touch /var/log/forseti.log chown ubuntu:root /var/log/forseti.log cp {forseti_home}/configs/logging/fluentd/forseti.conf /etc/google-fluentd/config.d/forseti.conf cp {forseti_home}/configs/logging/logrotate/forseti /etc/logrotate.d/forseti chmod 644 /etc/logrotate.d/forseti service google-fluentd restart logrotate /etc/logrotate.conf # Change the access level of configs/ rules/ and run_forseti.sh chmod -R ug+rwx {forseti_home}/configs {forseti_home}/rules {forseti_home}/install/gcp/scripts/run_forseti.sh # Install Forseti python3 setup.py install # Export variables required by initialize_forseti_services.sh. {export_initialize_vars} # Export variables required by run_forseti.sh {export_forseti_vars} # Store the variables in /etc/profile.d/forseti_environment.sh # so all the users will have access to them echo "echo '{export_forseti_vars}' >> /etc/profile.d/forseti_environment.sh" | sudo sh # Download server configuration from GCS gsutil cp gs://{scanner_bucket}/configs/forseti_conf_server.yaml {forseti_server_conf} gsutil cp -r gs://{scanner_bucket}/rules {forseti_home}/ # Download the Newest Config Validator constraints from GCS rm -rf {policy_library_home} gsutil cp -r gs://{scanner_bucket}/policy-library {policy_library_home}/ # Start Forseti service depends on vars defined above. bash ./install/gcp/scripts/initialize_forseti_services.sh echo "Starting services." systemctl start cloudsqlproxy systemctl start config-validator sleep 5 echo "Attempting to update database schema, if necessary." python3 $USER_HOME/forseti-security/install/gcp/upgrade_tools/db_migrator.py systemctl start forseti echo "Success! The Forseti API server has been started." # Create a Forseti env script FORSETI_ENV="$(cat <<EOF #!/bin/bash export PATH=$PATH:/usr/local/bin # Forseti environment variables export FORSETI_HOME=/home/ubuntu/forseti-security export FORSETI_SERVER_CONF=$FORSETI_HOME/configs/forseti_conf_server.yaml export SCANNER_BUCKET={scanner_bucket} EOF )" echo "$FORSETI_ENV" > $USER_HOME/forseti_env.sh USER=ubuntu # Use flock to prevent rerun of the same cron job when the previous job is still running. # If the lock file does not exist under the tmp directory, it will create the file and put a lock on top of the file. # When the previous cron job is not finished and the new one is trying to run, it will attempt to acquire the lock # to the lock file and fail because the file is already locked by the previous process. # The -n flag in flock will fail the process right away when the process is not able to acquire the lock so we won't # queue up the jobs. # If the cron job failed the acquire lock on the process, it will log a warning message to syslog. (echo "{run_frequency} (/usr/bin/flock -n /home/ubuntu/forseti-security/forseti_cron_runner.lock $FORSETI_HOME/install/gcp/scripts/run_forseti.sh || echo '[forseti-security] Warning: New Forseti cron job will not be started, because previous Forseti job is still running.') 2>&1 | logger") | crontab -u $USER - echo "Added the run_forseti.sh to crontab under user $USER" echo "Execution of startup script finished" """.format( # Cloud SQL properties cloudsql_arch = context.properties['cloudsqlproxy-os-arch'], # Install Forseti. download_forseti=DOWNLOAD_FORSETI, # If installed on a version tag, checkout latest patch. # Otherwise checkout originally installed version. checkout_forseti_version=CHECKOUT_FORSETI_VERSION, # Set ownership for Forseti conf and rules dirs forseti_home=FORSETI_HOME, policy_library_home=POLICY_LIBRARY_HOME, # Download the Forseti conf and rules. scanner_bucket=SCANNER_BUCKET, forseti_server_conf=FORSETI_SERVER_CONF, # Env variables for Explain export_initialize_vars=EXPORT_INITIALIZE_VARS, # Env variables for Forseti export_forseti_vars=EXPORT_FORSETI_VARS, # Forseti run frequency run_frequency=RUN_FREQUENCY, ) }] } } }) return {'resources': resources}
1
34,670
The new sync feature is only being supported from within Terraform. Installations using the deprecated method will have this feature disabled.
forseti-security-forseti-security
py
@@ -79,3 +79,7 @@ class DropBlock(nn.Module): factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt / self.warmup_iters) return gamma * factor + + def extra_repr(self): + return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, ' + f'warmup_iters={self.warmup_iters}')
1
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import PLUGIN_LAYERS eps = 1e-6 @PLUGIN_LAYERS.register_module() class DropBlock(nn.Module): """Randomly drop some regions of feature maps. Please refer to the method proposed in `DropBlock <https://arxiv.org/abs/1810.12890>`_ for details. Args: drop_prob (float): The probability of dropping each block. block_size (int): The size of dropped blocks. warmup_iters (int): The drop probability will linearly increase from `0` to `drop_prob` during the first `warmup_iters` iterations. Default: 2000. """ def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): super(DropBlock, self).__init__() assert block_size % 2 == 1 assert 0 < drop_prob <= 1 assert warmup_iters >= 0 self.drop_prob = drop_prob self.block_size = block_size self.warmup_iters = warmup_iters self.iter_cnt = 0 def forward(self, x): """ Args: x (Tensor): Input feature map on which some areas will be randomly dropped. Returns: Tensor: The tensor after DropBlock layer. """ if not self.training: return x self.iter_cnt += 1 N, C, H, W = list(x.shape) gamma = self._compute_gamma((H, W)) mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1) mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) mask = F.pad(mask, [self.block_size // 2] * 4, value=0) mask = F.max_pool2d( input=mask, stride=(1, 1), kernel_size=(self.block_size, self.block_size), padding=self.block_size // 2) mask = 1 - mask x = x * mask * mask.numel() / (eps + mask.sum()) return x def _compute_gamma(self, feat_size): """Compute the value of gamma according to paper. gamma is the parameter of bernoulli distribution, which controls the number of features to drop. gamma = (drop_prob * fm_area) / (drop_area * keep_area) Args: feat_size (tuple[int, int]): The height and width of feature map. Returns: float: The value of gamma. """ gamma = (self.drop_prob * feat_size[0] * feat_size[1]) gamma /= ((feat_size[0] - self.block_size + 1) * (feat_size[1] - self.block_size + 1)) gamma /= (self.block_size**2) factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt / self.warmup_iters) return gamma * factor
1
25,876
This type of return (f'xxx') is not recommended, it is recommended to return 'xxx'
open-mmlab-mmdetection
py
@@ -2836,8 +2836,11 @@ client_process_bb(dcontext_t *dcontext, build_bb_t *bb) # ifdef X86 if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { - if (instr_may_write_zmm_register(inst)) + if (instr_may_write_zmm_register(inst)) { + LOG(THREAD, LOG_INTERP, 3, "Detected AVX-512 code in use\n"); d_r_set_avx512_code_in_use(true); + proc_set_num_simd_saved(MCXT_NUM_SIMD_SLOTS); + } } } # endif
1
/* ********************************************************** * Copyright (c) 2011-2019 Google, Inc. All rights reserved. * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2001 Hewlett-Packard Company */ /* * interp.c - interpreter used for native trace selection */ #include "../globals.h" #include "../link.h" #include "../fragment.h" #include "../emit.h" #include "../dispatch.h" #include "../fcache.h" #include "../monitor.h" /* for trace_abort and monitor_data_t */ #include "arch.h" #include "instr.h" #include "instr_create.h" #include "instrlist.h" #include "decode.h" #include "decode_fast.h" #include "disassemble.h" #include "instrument.h" #include "../hotpatch.h" #ifdef RETURN_AFTER_CALL # include "../rct.h" #endif #ifdef WINDOWS # include "ntdll.h" /* for EXCEPTION_REGISTRATION */ # include "../nudge.h" /* for generic_nudge_target() address */ #endif #include "../perscache.h" #include "../native_exec.h" #include "../jit_opt.h" #ifdef CHECK_RETURNS_SSE2 # include <setjmp.h> /* for warning when see libc setjmp */ #endif #ifdef VMX86_SERVER # include "vmkuw.h" /* VMKUW_SYSCALL_GATEWAY */ #endif #ifdef ANNOTATIONS # include "../annotations.h" #endif #ifdef AARCH64 # include "build_ldstex.h" #endif enum { DIRECT_XFER_LENGTH = 5 }; /* forward declarations */ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)); static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted /*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr); bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md); /* we use a branch limit of 1 to make it easier for the trace * creation mechanism to stitch basic blocks together */ #define BRANCH_LIMIT 1 /* we limit total bb size to handle cases like infinite loop or sequence * of calls. * also, we have a limit on fragment body sizes, which should be impossible * to break since x86 instrs are max 17 bytes and we only modify ctis. * Although...selfmod mangling does really expand fragments! * -selfmod_max_writes helps for selfmod bbs (case 7893/7909). * System call mangling is also large, for degenerate cases like tests/linux/infinite. * PR 215217: also client additions: we document and assert. * FIXME: need better way to know how big will get, b/c we can construct * cases that will trigger the size assertion! */ /* define replaced by -max_bb_instrs option */ /* exported so micro routines can assert whether held */ DECLARE_CXTSWPROT_VAR(mutex_t bb_building_lock, INIT_LOCK_FREE(bb_building_lock)); /* i#1111: we do not use the lock until the 2nd thread is created */ volatile bool bb_lock_start; #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) static file_t bbdump_file = INVALID_FILE; #endif #ifdef DEBUG DECLARE_NEVERPROT_VAR(uint debug_bb_count, 0); #endif /* initialization */ void interp_init() { #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) if (INTERNAL_OPTION(bbdump_tags)) { bbdump_file = open_log_file("bbs", NULL, 0); ASSERT(bbdump_file != INVALID_FILE); } #endif } #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG /* don't bother with adding lock */ static int num_rets_removed; # endif #endif /* cleanup */ void interp_exit() { #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) if (INTERNAL_OPTION(bbdump_tags)) { close_log_file(bbdump_file); } #endif DELETE_LOCK(bb_building_lock); LOG(GLOBAL, LOG_INTERP | LOG_STATS, 1, "Total application code seen: %d KB\n", GLOBAL_STAT(app_code_seen) / 1024); #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG LOG(GLOBAL, LOG_INTERP | LOG_STATS, 1, "Total rets removed: %d\n", num_rets_removed); # endif #endif } /**************************************************************************** **************************************************************************** * * B A S I C B L O C K B U I L D I N G */ /* we have a lot of data to pass around so we package it in this struct * so we can have separate routines for readability */ typedef struct { /* in */ app_pc start_pc; bool app_interp; /* building bb to interp app, as opposed to for pc * translation or figuring out what pages a bb touches? */ bool for_cache; /* normal to-be-executed build? */ bool record_vmlist; /* should vmareas be updated? */ bool mangle_ilist; /* should bb ilist be mangled? */ bool record_translation; /* store translation info for each instr_t? */ bool has_bb_building_lock; /* usually ==for_cache; used for aborting bb building */ bool checked_start_vmarea; /* caller called check_new_page_start() on start_pc */ file_t outf; /* send disassembly and notes to a file? * we use this mainly for dumping trace origins */ app_pc stop_pc; /* Optional: NULL for normal termination rules. * Only checked for full_decode. */ #ifdef CLIENT_INTERFACE bool pass_to_client; /* pass to client, if a bb hook exists; * we store this up front to avoid race conditions * between full_decode setting and hook calling time. */ bool post_client; /* has the client already processed the bb? */ bool for_trace; /* PR 299808: we tell client if building a trace */ #endif /* in and out */ overlap_info_t *overlap_info; /* if non-null, records overlap information here; * caller must initialize region_start and region_end */ /* out */ instrlist_t *ilist; uint flags; void *vmlist; app_pc end_pc; bool native_exec; /* replace cur ilist with a native_exec version */ bool native_call; /* the gateway is a call */ #ifdef CLIENT_INTERFACE instrlist_t **unmangled_ilist; /* PR 299808: clone ilist pre-mangling */ #endif /* internal usage only */ bool full_decode; /* decode every instruction into a separate instr_t? */ bool follow_direct; /* elide unconditional branches? */ bool check_vm_area; /* whether to call check_thread_vm_area() */ uint num_elide_jmp; uint num_elide_call; app_pc last_page; app_pc cur_pc; app_pc instr_start; app_pc checked_end; /* end of current vmarea checked */ cache_pc exit_target; /* fall-through target of final instr */ uint exit_type; /* indirect branch type */ ibl_branch_type_t ibl_branch_type; /* indirect branch type as an IBL selector */ #ifdef UNIX bool invalid_instr_hack; #endif instr_t *instr; /* the current instr */ int eflags; app_pc pretend_pc; /* selfmod only: decode from separate pc */ #ifdef ARM dr_pred_type_t svc_pred; /* predicate for conditional svc */ #endif DEBUG_DECLARE(bool initialized;) } build_bb_t; /* forward decl */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb); static void init_build_bb(build_bb_t *bb, app_pc start_pc, bool app_interp, bool for_cache, bool mangle_ilist, bool record_translation, file_t outf, uint known_flags, overlap_info_t *overlap_info) { memset(bb, 0, sizeof(*bb)); #if defined(LINUX) && defined(X86_32) /* With SA_RESTART (i#2659) we end up interpreting the int 0x80 in vsyscall, * whose fall-through hits our hook. We avoid interpreting our own hook * by shifting it to the displaced pc. */ if (DYNAMO_OPTION(hook_vsyscall) && start_pc == vsyscall_sysenter_return_pc) start_pc = vsyscall_sysenter_displaced_pc; #endif bb->check_vm_area = true; bb->start_pc = start_pc; bb->app_interp = app_interp; bb->for_cache = for_cache; if (bb->for_cache) bb->record_vmlist = true; bb->mangle_ilist = mangle_ilist; bb->record_translation = record_translation; bb->outf = outf; bb->overlap_info = overlap_info; bb->follow_direct = !TEST(FRAG_SELFMOD_SANDBOXED, known_flags); bb->flags = known_flags; bb->ibl_branch_type = IBL_GENERIC; /* initialization only */ #ifdef ARM bb->svc_pred = DR_PRED_NONE; #endif DODEBUG(bb->initialized = true;); } static void reset_overlap_info(dcontext_t *dcontext, build_bb_t *bb) { bb->overlap_info->start_pc = bb->start_pc; bb->overlap_info->min_pc = bb->start_pc; bb->overlap_info->max_pc = bb->start_pc; bb->overlap_info->contiguous = true; bb->overlap_info->overlap = false; } static void update_overlap_info(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc, bool jmp) { if (new_pc < bb->overlap_info->min_pc) bb->overlap_info->min_pc = new_pc; if (new_pc > bb->overlap_info->max_pc) bb->overlap_info->max_pc = new_pc; /* we get called at end of all contiguous intervals, so ignore jmps */ LOG(THREAD, LOG_ALL, 5, "\t app_bb_overlaps " PFX ".." PFX " %s\n", bb->last_page, new_pc, jmp ? "jmp" : ""); if (!bb->overlap_info->overlap && !jmp) { /* contiguous interval: prev_pc..new_pc (open-ended) */ if (bb->last_page < bb->overlap_info->region_end && new_pc > bb->overlap_info->region_start) { LOG(THREAD_GET, LOG_ALL, 5, "\t it overlaps!\n"); bb->overlap_info->overlap = true; } } if (bb->overlap_info->contiguous && jmp) bb->overlap_info->contiguous = false; } #ifdef DEBUG # define BBPRINT(bb, level, ...) \ do { \ LOG(THREAD, LOG_INTERP, level, __VA_ARGS__); \ if (bb->outf != INVALID_FILE && bb->outf != (THREAD)) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); #else # ifdef INTERNAL # define BBPRINT(bb, level, ...) \ do { \ if (bb->outf != INVALID_FILE) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); # else # define BBPRINT(bb, level, ...) /* nothing */ # endif #endif #ifdef WINDOWS extern void intercept_load_dll(void); extern void intercept_unload_dll(void); # ifdef INTERNAL extern void DllMainThreadAttach(void); # endif #endif /* forward declarations */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb); static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb); static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)); #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc); #endif /*************************************************************************** * Image entry */ static bool reached_image_entry = false; static INLINE_FORCED bool check_for_image_entry(app_pc bb_start) { if (!reached_image_entry && bb_start == get_image_entry()) { LOG(THREAD_GET, LOG_ALL, 1, "Reached image entry point " PFX "\n", bb_start); set_reached_image_entry(); return true; } return false; } void set_reached_image_entry() { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); reached_image_entry = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } bool reached_image_entry_yet() { return reached_image_entry; } /*************************************************************************** * Whether to inline or elide callees */ /* Return true if pc is a call target that should NOT be entered but should * still be mangled. */ static inline bool must_not_be_entered(app_pc pc) { return false #ifdef DR_APP_EXPORTS /* i#1237: DR will change dr_app_running_under_dynamorio return value * on seeing a bb starting at dr_app_running_under_dynamorio. */ || pc == (app_pc)dr_app_running_under_dynamorio #endif ; } /* Return true if pc is a call target that should NOT be inlined and left native. */ static inline bool leave_call_native(app_pc pc) { return ( #ifdef INTERNAL !dynamo_options.inline_calls #else 0 #endif #ifdef WINDOWS || pc == (app_pc)intercept_load_dll || pc == (app_pc)intercept_unload_dll /* we're guaranteed to have direct calls to the next routine since our * own DllMain calls it! */ # ifdef INTERNAL || pc == (app_pc)DllMainThreadAttach # endif /* check for nudge handling escape from cache */ || (pc == (app_pc)generic_nudge_handler) #else /* PR 200203: long-term we want to control loading of client * libs, but for now we have to let the loader call _fini() * in the client, which may end up calling __wrap_free(). * It's simpler to let those be interpreted and make a native * call to the real heap routine here as this is a direct * call whereas we'd need native_exec for the others: */ || pc == (app_pc)global_heap_free #endif ); } /* return true if pc is a direct jmp target that should NOT be elided and followed */ static inline bool must_not_be_elided(app_pc pc) { #ifdef WINDOWS /* Allow only the return jump in the landing pad to be elided, as we * interpret the return path from trampolines. The forward jump leads to * the trampoline and shouldn't be elided. */ if (is_on_interception_initial_route(pc)) return true; #endif return (0 #ifdef WINDOWS /* we insert trampolines by adding direct jmps to our interception code buffer * we don't want to interpret the code in that buffer, as it may swap to the * dstack and mess up a return-from-fcache. * N.B.: if use this routine anywhere else, pay attention to the * hack for is_syscall_trampoline() in the use here! */ || (is_in_interception_buffer(pc)) #else /* UNIX */ #endif ); } #ifdef DR_APP_EXPORTS /* This function allows automatically injected dynamo to ignore * dynamo API routines that would really mess things up */ static inline bool must_escape_from(app_pc pc) { /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's caller-saved * FIXME: is this ok? */ /* Note that we can't just look for direct calls to these functions * because of stubs, etc. that end up doing indirect jumps to them! */ bool res = false # ifdef DR_APP_EXPORTS || (automatic_startup && (pc == (app_pc)dynamorio_app_init || pc == (app_pc)dr_app_start || pc == (app_pc)dynamo_thread_init || pc == (app_pc)dynamorio_app_exit || /* dr_app_stop is a nop already */ pc == (app_pc)dynamo_thread_exit)) # endif ; # ifdef DEBUG if (res) { # ifdef DR_APP_EXPORTS LOG(THREAD_GET, LOG_INTERP, 3, "must_escape_from: found "); if (pc == (app_pc)dynamorio_app_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_init\n"); else if (pc == (app_pc)dr_app_start) LOG(THREAD_GET, LOG_INTERP, 3, "dr_app_start\n"); /* FIXME: are dynamo_thread_* still needed hered? */ else if (pc == (app_pc)dynamo_thread_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_init\n"); else if (pc == (app_pc)dynamorio_app_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_exit\n"); else if (pc == (app_pc)dynamo_thread_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_exit\n"); # endif } # endif return res; } #endif /* DR_APP_EXPORTS */ /* Adds bb->instr, which must be a direct call or jmp, to bb->ilist for native * execution. Makes sure its target is reachable from the code cache, which * is critical for jmps b/c they're native for our hooks of app code which may * not be reachable from the code cache. Also needed for calls b/c in the future * (i#774) the DR lib (and thus our leave_call_native() calls) won't be reachable * from the cache. */ static void bb_add_native_direct_xfer(dcontext_t *dcontext, build_bb_t *bb, bool appended) { #if defined(X86) && defined(X64) /* i#922: we're going to run this jmp from our code cache so we have to * make sure it still reaches its target. We could try to check * reachability from the likely code cache slot, but these should be * rare enough that making them indirect won't matter and then we have * fewer reachability dependences. * We do this here rather than in d_r_mangle() b/c we'd have a hard time * distinguishing native jmp/call due to DR's own operations from a * client's inserted meta jmp/call. */ /* Strategy: write target into xax (DR-reserved) slot and jmp through it. * Alternative would be to embed the target into the code stream. * We don't need to set translation b/c these are meta instrs and they * won't fault. */ ptr_uint_t tgt = (ptr_uint_t)opnd_get_pc(instr_get_target(bb->instr)); opnd_t tls_slot = opnd_create_sized_tls_slot(os_tls_offset(TLS_XAX_SLOT), OPSZ_4); instrlist_meta_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, tls_slot, OPND_CREATE_INT32((int)tgt))); opnd_set_disp(&tls_slot, opnd_get_disp(tls_slot) + 4); instrlist_meta_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, tls_slot, OPND_CREATE_INT32((int)(tgt >> 32)))); if (instr_is_ubr(bb->instr)) { instrlist_meta_append( bb->ilist, INSTR_CREATE_jmp_ind(dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); bb->exit_type |= instr_branch_type(bb->instr); } else { ASSERT(instr_is_call_direct(bb->instr)); instrlist_meta_append( bb->ilist, INSTR_CREATE_call_ind(dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); } if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; #elif defined(ARM) ASSERT_NOT_IMPLEMENTED(false); /* i#1582 */ #else if (appended) { /* avoid assert about meta w/ translation but no restore_state callback */ instr_set_translation(bb->instr, NULL); } else instrlist_append(bb->ilist, bb->instr); /* Indicate that relative target must be * re-encoded, and that it is not an exit cti. * However, we must mangle this to ensure it reaches (i#992) * which we special-case in d_r_mangle(). */ instr_set_meta(bb->instr); instr_set_raw_bits_valid(bb->instr, false); #endif } /* Perform checks such as looking for dynamo stopping points and bad places * to be. We assume we only have to check after control transfer instructions, * i.e., we assume that all of these conditions are procedures that are only * entered by calling or jumping, never falling through. */ static inline bool check_for_stopping_point(dcontext_t *dcontext, build_bb_t *bb) { #ifdef DR_APP_EXPORTS if (must_escape_from(bb->cur_pc)) { /* x64 will zero-extend to rax, so we use eax here */ reg_id_t reg = IF_X86_ELSE(REG_EAX, DR_REG_R0); BBPRINT(bb, 3, "interp: emergency exit from " PFX "\n", bb->cur_pc); /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's * caller-saved. * FIXME: is this ok? */ /* move 0 into xax/r0 -- our functions return 0 to indicate success */ instrlist_append( bb->ilist, XINST_CREATE_load_int(dcontext, opnd_create_reg(reg), OPND_CREATE_INT32(0))); /* insert a ret instruction */ instrlist_append(bb->ilist, XINST_CREATE_return(dcontext)); /* should this be treated as a real return? */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->exit_target = get_ibl_routine(dcontext, IBL_LINKED, DEFAULT_IBL_BB(), IBL_RETURN); return true; } #endif /* DR_APP_EXPORTS */ #ifdef CHECK_RETURNS_SSE2 if (bb->cur_pc == (app_pc)longjmp) { SYSLOG_INTERNAL_WARNING("encountered longjmp, which will cause ret mismatch!"); } #endif return is_stopping_point(dcontext, bb->cur_pc); } /* Arithmetic eflags analysis to see if sequence of instrs reads an * arithmetic flag prior to writing it. * Usage: first initialize status to 0 and eflags_6 to 0. * Then call this routine for each instr in sequence, assigning result to status. * eflags_6 holds flags written and read so far. * Uses these flags, defined in instr.h, as status values: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-onlY) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information yet * On ARM, Q and GE flags are ignored. */ static inline int eflags_analysis(instr_t *instr, int status, uint *eflags_6) { uint e6 = *eflags_6; /* local copy */ uint e6_w2r = EFLAGS_WRITE_TO_READ(e6); uint instr_eflags = instr_get_arith_flags(instr, DR_QUERY_DEFAULT); /* Keep going until result is non-zero, also keep going if * result is writes to OF to see if later writes to rest of flags * before reading any, and keep going if reads one of the 6 to see * if later writes to OF before reading it. */ if (instr_eflags == 0 || status == EFLAGS_WRITE_ARITH IF_X86(|| status == EFLAGS_READ_OF)) return status; /* we ignore interrupts */ if ((instr_eflags & EFLAGS_READ_ARITH) != 0 && (!instr_opcode_valid(instr) || !instr_is_interrupt(instr))) { /* store the flags we're reading */ e6 |= (instr_eflags & EFLAGS_READ_ARITH); *eflags_6 = e6; if ((e6_w2r | (instr_eflags & EFLAGS_READ_ARITH)) != e6_w2r) { /* we're reading a flag that has not been written yet */ status = EFLAGS_READ_ARITH; /* some read before all written */ LOG(THREAD_GET, LOG_INTERP, 4, "\treads flag before writing it!\n"); #ifdef X86 if ((instr_eflags & EFLAGS_READ_OF) != 0 && (e6 & EFLAGS_WRITE_OF) == 0) { status = EFLAGS_READ_OF; /* reads OF before writing! */ LOG(THREAD_GET, LOG_INTERP, 4, "\t reads OF prior to writing it!\n"); } #endif } } else if ((instr_eflags & EFLAGS_WRITE_ARITH) != 0) { /* store the flags we're writing */ e6 |= (instr_eflags & EFLAGS_WRITE_ARITH); *eflags_6 = e6; /* check if all written but none read yet */ if ((e6 & EFLAGS_WRITE_ARITH) == EFLAGS_WRITE_ARITH && (e6 & EFLAGS_READ_ARITH) == 0) { status = EFLAGS_WRITE_ARITH; /* all written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote all 6 flags now!\n"); } #ifdef X86 /* check if at least OF was written but not read */ else if ((e6 & EFLAGS_WRITE_OF) != 0 && (e6 & EFLAGS_READ_OF) == 0) { status = EFLAGS_WRITE_OF; /* OF written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote overflow flag before reading it!\n"); } #endif } return status; } /* check origins of code for several purposes: * 1) we need list of areas where this thread's fragments come * from, for faster flushing on munmaps * 2) also for faster flushing, each vmarea has a list of fragments * 3) we need to mark as read-only any writable region that * has a fragment come from it, to handle self-modifying code * 4) for PROGRAM_SHEPHERDING restricted code origins for security * 5) for restricted execution environments: not letting bb cross regions */ /* FIXME CASE 7380: since report security violation before execute off bad page, can be false positive due to: - a faulting instruction in middle of bb would have prevented getting there - ignorable syscall in middle - self-mod code would have ended bb sooner than bad page One solution is to have check_thread_vm_area() return false and have bb building stop at checked_end if a violation will occur when we get there. Then we only raise the violation once building a bb starting there. */ static inline void check_new_page_start(dcontext_t *dcontext, build_bb_t *bb) { DEBUG_DECLARE(bool ok;) if (!bb->check_vm_area) return; DEBUG_DECLARE(ok =) check_thread_vm_area(dcontext, bb->start_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, false /*!xfer*/); ASSERT(ok); /* cannot return false on non-xfer */ bb->last_page = bb->start_pc; if (bb->overlap_info != NULL) reset_overlap_info(dcontext, bb); } /* Walk forward in straight line from prev_pc to new_pc. * FIXME: with checked_end we don't need to call this on every contig end * while bb building like we used to. Should revisit the overlap info and * walk_app_bb reasons for keeping those contig() calls and see if we can * optimize them away for bb building at least. * i#993: new_pc points to the last byte of the current instruction and is not * an open-ended endpoint. */ static inline bool check_new_page_contig(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { bool is_first_instr = (bb->instr_start == bb->start_pc); if (!bb->check_vm_area) return true; if (bb->checked_end == NULL) { ASSERT(new_pc == bb->start_pc); } else if (new_pc >= bb->checked_end) { if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, /* i#989: We don't want to fall through to an * incompatible vmarea, so we treat fall * through like a transfer. We can't end the * bb before the first instruction, so we pass * false to forcibly merge in the vmarea * flags. */ !is_first_instr /*xfer*/)) { return false; } } if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, false /*not jmp*/); DOLOG(4, LOG_INTERP, { if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); }); bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } /* Direct cti from prev_pc to new_pc */ static bool check_new_page_jmp(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { /* For tracking purposes, check the last byte of the cti. */ bool ok = check_new_page_contig(dcontext, bb, bb->cur_pc - 1); ASSERT(ok && "should have checked cur_pc-1 in decode loop"); if (!ok) /* Don't follow the jmp in release build. */ return false; /* cur sandboxing doesn't handle direct cti * not good enough to only check this at top of interp -- could walk contig * from non-selfmod to selfmod page, and then do a direct cti, which * check_thread_vm_area would allow (no flag changes on direct cti)! * also not good enough to put this check in check_thread_vm_area, as that * only checks across pages. */ if ((bb->flags & FRAG_SELFMOD_SANDBOXED) != 0) return false; if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); /* do not walk into a native exec dll (we assume not currently there, * though could happen if bypass a gateway -- even then this is a feature * to allow getting back to native ASAP) * FIXME: we could assume that such direct calls only * occur from DGC, and rely on check_thread_vm_area to disallow, * as an (unsafe) optimization */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_dircalls) && !vmvector_empty(native_exec_areas) && is_native_pc(new_pc)) return false; #ifdef CLIENT_INTERFACE /* i#805: If we're crossing a module boundary between two modules that are * and aren't on null_instrument_list, don't elide the jmp. * XXX i#884: if we haven't yet executed from the 2nd module, the client * won't receive the module load event yet and we might include code * from it here. It would be tricky to solve that, and it should only happen * if the client turns on elision, so we leave it. */ if ((!!os_module_get_flag(bb->cur_pc, MODULE_NULL_INSTRUMENT)) != (!!os_module_get_flag(new_pc, MODULE_NULL_INSTRUMENT))) return false; #endif if (!bb->check_vm_area) return true; /* need to check this even if an intra-page jmp b/c we allow sub-page vm regions */ if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, true /*xfer*/)) return false; if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, true /*jmp*/); bb->flags |= FRAG_HAS_DIRECT_CTI; bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } static inline void bb_process_single_step(dcontext_t *dcontext, build_bb_t *bb) { LOG(THREAD, LOG_INTERP, 2, "interp: single step exception bb at " PFX "\n", bb->instr_start); /* FIXME i#2144 : handling a rep string operation. * In this case, we should test if only one iteration is done * before the single step exception. */ instrlist_append(bb->ilist, bb->instr); instr_set_translation(bb->instr, bb->instr_start); /* Mark instruction as special exit. */ instr_branch_set_special_exit(bb->instr, true); bb->exit_type |= LINK_SPECIAL_EXIT; /* Make this bb thread-private and a trace barrier. */ bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } static inline void bb_process_invalid_instr(dcontext_t *dcontext, build_bb_t *bb) { /* invalid instr: end bb BEFORE the instr, we'll throw exception if we * reach the instr itself */ LOG(THREAD, LOG_INTERP, 2, "interp: invalid instr at " PFX "\n", bb->instr_start); /* This routine is called by more than just bb builder, also used * for recreating state, so check bb->app_interp parameter to find out * if building a real app bb to be executed */ if (bb->app_interp && bb->instr_start == bb->start_pc) { /* This is first instr in bb so it will be executed for sure and * we need to generate an invalid instruction exception. * A benefit of being first instr is that the state is easy * to translate. */ #ifdef WINDOWS /* Copying the invalid bytes and having the processor generate * the exception would be cleaner in every way except our fear * of a new processor making those bytes valid and us inadvertently * executing the unexamined instructions afterward, since we do not * know the proper amount of bytes to copy. Copying is cleaner * since Windows splits invalid instructions into different cases, * an invalid lock prefix and maybe some other distinctions * (it's all interrupt 6 to the processor), and it is hard to * duplicate Windows' behavior in our forged exception. */ /* FIXME case 10672: provide a runtime option to specify new * instruction formats to avoid this app exception */ ASSERT(dcontext->bb_build_info == bb); bb_build_abort(dcontext, true /*clean vm area*/, true /*unlock*/); /* FIXME : we use illegal instruction here, even though we * know windows uses different exception codes for different * types of invalid instructions (for ex. STATUS_INVALID_LOCK * _SEQUENCE for lock prefix on a jmp instruction) */ if (TEST(DUMPCORE_FORGE_ILLEGAL_INST, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: Encountered Illegal Instruction"); os_forge_exception(bb->instr_start, ILLEGAL_INSTRUCTION_EXCEPTION); ASSERT_NOT_REACHED(); #else /* FIXME: Linux hack until we have a real os_forge_exception implementation: * copy the bytes and have the process generate the exception. * Once remove this, also disable check at top of insert_selfmod_sandbox * FIXME PR 307880: we now have a preliminary * os_forge_exception impl, but I'm leaving this hack until * we're more comfortable w/ our forging. */ uint sz; instrlist_append(bb->ilist, bb->instr); /* pretend raw bits valid to get it encoded * For now we just do 17 bytes, being wary of unreadable pages. * FIXME: better solution is to have decoder guess at length (if * ok opcode just bad lock prefix or something know length, if * bad opcode just bytes up until know it's bad). */ if (!is_readable_without_exception(bb->instr_start, MAX_INSTR_LENGTH)) { app_pc nxt_page = (app_pc)ALIGN_FORWARD(bb->instr_start, PAGE_SIZE); sz = nxt_page - bb->instr_start; } else { sz = MAX_INSTR_LENGTH; } bb->cur_pc += sz; /* just in case, should have a non-self target */ ASSERT(bb->cur_pc > bb->instr_start); /* else still a self target */ instr_set_raw_bits(bb->instr, bb->instr_start, sz); bb->invalid_instr_hack = true; #endif } else { instr_destroy(dcontext, bb->instr); bb->instr = NULL; } } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* returns true to indicate "elide and continue" and false to indicate "end bb now" * should be used both for converted indirect jumps and * FIXME: for direct jumps by bb_process_ubr */ static inline bool follow_direct_jump(dcontext_t *dcontext, build_bb_t *bb, app_pc target) { if (bb->follow_direct && !must_not_be_entered(target) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= target)) { if (check_new_page_jmp(dcontext, bb, target)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = target; BBPRINT(bb, 4, " continuing at target " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following jmp from " PFX " to " PFX "\n", bb->instr_start, target); } } else { BBPRINT(bb, 3, " NOT attempting to follow jump from " PFX " to " PFX "\n", bb->instr_start, target); } return false; /* stop bb */ } #endif /* X86 */ /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_ubr(dcontext_t *dcontext, build_bb_t *bb) { app_pc tgt = (byte *)opnd_get_pc(instr_get_target(bb->instr)); BBPRINT(bb, 4, "interp: direct jump at " PFX "\n", bb->instr_start); if (must_not_be_elided(tgt)) { #ifdef WINDOWS byte *wrapper_start; if (is_syscall_trampoline(tgt, &wrapper_start)) { /* HACK to avoid entering the syscall trampoline that is meant * only for native syscalls -- we replace the jmp with the * original app mov immed that it replaced */ BBPRINT(bb, 3, "interp: replacing syscall trampoline @" PFX " w/ orig mov @" PFX "\n", bb->instr_start, wrapper_start); instr_reset(dcontext, bb->instr); /* leave bb->cur_pc unchanged */ decode(dcontext, wrapper_start, bb->instr); /* ASSUMPTION: syscall trampoline puts hooked instruction * (usually mov_imm but can be lea if hooked_deeper) here */ ASSERT(instr_get_opcode(bb->instr) == OP_mov_imm || (instr_get_opcode(bb->instr) == OP_lea && DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_HOOK_DEEPER)); instrlist_append(bb->ilist, bb->instr); /* translation should point to the trampoline at the * original application address */ if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); if (instr_get_opcode(bb->instr) == OP_lea) { app_pc translation = bb->instr_start + instr_length(dcontext, bb->instr); ASSERT_CURIOSITY(instr_length(dcontext, bb->instr) == 4); /* we hooked deep need to add the int 2e instruction */ /* can't use create_syscall_instr because of case 5217 hack */ ASSERT(get_syscall_method() == SYSCALL_METHOD_INT); bb->instr = INSTR_CREATE_int(dcontext, opnd_create_immed_int((char)0x2e, OPSZ_1)); if (bb->record_translation) instr_set_translation(bb->instr, translation); ASSERT(instr_is_syscall(bb->instr) && instr_get_opcode(bb->instr) == OP_int); instrlist_append(bb->ilist, bb->instr); return bb_process_syscall(dcontext, bb); } return true; /* keep bb going */ } #endif BBPRINT(bb, 3, "interp: NOT following jmp to " PFX "\n", tgt); /* add instruction to instruction list */ bb_add_native_direct_xfer(dcontext, bb, false /*!appended*/); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); return false; /* end bb now */ } else { if (bb->follow_direct && !must_not_be_entered(tgt) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= tgt)) { if (check_new_page_jmp(dcontext, bb, tgt)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = tgt; BBPRINT(bb, 4, " continuing at target " PFX "\n", bb->cur_pc); /* pretend never saw this ubr: delete instr, then continue */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following direct jmp from " PFX " to " PFX "\n", bb->instr_start, tgt); } } /* End this bb now */ bb->exit_target = opnd_get_pc(instr_get_target(bb->instr)); instrlist_append(bb->ilist, bb->instr); return false; /* end bb */ } return true; /* keep bb going */ } #ifdef X86 /* returns true if call is elided, * and false if not following due to hitting a limit or other reason */ static bool follow_direct_call(dcontext_t *dcontext, build_bb_t *bb, app_pc callee) { /* FIXME: This code should be reused in bb_process_convertible_indcall() * and in bb_process_call_direct() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going in callee */ } else { BBPRINT(bb, 3, " NOT following direct (or converted) call from " PFX " to " PFX "\n", bb->instr_start, callee); } } else { BBPRINT(bb, 3, " NOT attempting to follow call from " PFX " to " PFX "\n", bb->instr_start, callee); } return false; /* stop bb */ } #endif /* X86 */ static inline void bb_stop_prior_to_instr(dcontext_t *dcontext, build_bb_t *bb, bool appended) { if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; bb->cur_pc = bb->instr_start; } /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_call_direct(dcontext_t *dcontext, build_bb_t *bb) { byte *callee = (byte *)opnd_get_pc(instr_get_target(bb->instr)); #ifdef CUSTOM_TRACES_RET_REMOVAL if (callee == bb->instr_start + 5) { LOG(THREAD, LOG_INTERP, 4, "found call to next instruction\n"); } else dcontext->num_calls++; #endif STATS_INC(num_all_calls); BBPRINT(bb, 4, "interp: direct call at " PFX "\n", bb->instr_start); if (leave_call_native(callee)) { BBPRINT(bb, 3, "interp: NOT inlining or mangling call to " PFX "\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti. * If we allow this fragment to be coarse we must kill the freeze * nudge thread! */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); bb_add_native_direct_xfer(dcontext, bb, true /*appended*/); return true; /* keep bb going, w/o inlining call */ } else { if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); return false; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* FIXME: use follow_direct_call() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } } BBPRINT(bb, 3, " NOT following direct call from " PFX " to " PFX "\n", bb->instr_start, callee); /* End this bb now */ if (instr_is_cbr(bb->instr)) { /* Treat as cbr, not call */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); } else { bb->exit_target = callee; } return false; /* end bb now */ } return true; /* keep bb going */ } #ifdef WINDOWS /* We check if the instrs call, mov, and sysenter are * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * and "sysenter". */ bool instr_is_call_sysenter_pattern(instr_t *call, instr_t *mov, instr_t *sysenter) { instr_t *instr; if (call == NULL || mov == NULL || sysenter == NULL) return false; if (instr_is_meta(call) || instr_is_meta(mov) || instr_is_meta(sysenter)) return false; if (instr_get_next(call) != mov || instr_get_next(mov) != sysenter) return false; /* check sysenter */ if (instr_get_opcode(sysenter) != OP_sysenter) return false; /* FIXME Relax the pattern matching on the "mov; call" pair so that small * changes in the register dataflow and call construct are tolerated. */ /* Did we find a "mov %xsp -> %xdx"? */ instr = mov; if (!(instr != NULL && instr_get_opcode(instr) == OP_mov_ld && instr_num_srcs(instr) == 1 && instr_num_dsts(instr) == 1 && opnd_is_reg(instr_get_dst(instr, 0)) && opnd_get_reg(instr_get_dst(instr, 0)) == REG_XDX && opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XSP)) { return false; } /* Did we find a "call (%xdx) or "call %xdx" that's already marked * for ind->direct call conversion? */ instr = call; if (!(instr != NULL && TEST(INSTR_IND_CALL_DIRECT, instr->flags) && instr_is_call_indirect(instr) && /* The 2nd src operand should always be %xsp. */ opnd_is_reg(instr_get_src(instr, 1)) && opnd_get_reg(instr_get_src(instr, 1)) == REG_XSP && /* Match 'call (%xdx)' for post-SP2. */ ((opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || /* Match 'call %xdx' for pre-SP2. */ (opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XDX)))) { return false; } return true; } /* Walk up from the bb->instr and verify that the preceding instructions * match the pattern that we expect to precede a sysenter. */ static instr_t * bb_verify_sysenter_pattern(dcontext_t *dcontext, build_bb_t *bb) { /* Walk back up 2 instructions and verify that there's a * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * just prior to the sysenter. * We use "xsp" and "xdx" to be ready for x64 sysenter though we don't * expect to see it. */ instr_t *mov, *call; mov = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); if (mov == NULL) return NULL; call = instr_get_prev_expanded(dcontext, bb->ilist, mov); if (call == NULL) return NULL; if (!instr_is_call_sysenter_pattern(call, mov, bb->instr)) { BBPRINT(bb, 3, "bb_verify_sysenter_pattern -- pattern didn't match\n"); return NULL; } return call; } /* Only used for the Borland SEH exemption. */ /* FIXME - we can't really tell a push from a pop since both are typically a * mov to fs:[0], but double processing doesn't hurt. */ /* NOTE we don't see dynamic SEH frame pushes, we only see the first SEH push * per mov -> fs:[0] instruction in the app. So we don't see modified in place * handler addresses (see at_Borland_SEH_rct_exemption()) or handler addresses * that are passed into a shared routine that sets up the frame (not yet seen, * note that MS dlls that have a _SEH_prolog hardcode the handler address in * the _SEH_prolog routine, only the data is passed in). */ static void bb_process_SEH_push(dcontext_t *dcontext, build_bb_t *bb, void *value) { if (value == NULL || value == (void *)PTR_UINT_MINUS_1) { /* could be popping off the last frame (leaving -1) of the SEH stack */ STATS_INC(num_endlist_SEH_write); ASSERT_CURIOSITY(value != NULL); return; } LOG(THREAD, LOG_INTERP, 3, "App moving " PFX " to fs:[0]\n", value); # ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(borland_SEH_rct)) { /* xref case 5752, the Borland compiler SEH implementation uses a push * imm ret motif for fall through to the finally of a try finally block * (very similar to what the Microsoft NT at_SEH_rct_exception() is * doing). The layout will always look like this : * push e: (imm32) (e should be in the .E/.F table) * a: * ... * b: ret * c: jmp rel32 (c should be in the .E/.F table) * d: jmp a: (rel8/32) * ... (usually nothing) * e: * (where ret at b is targeting e, or a valid after call). The * exception dispatcher calls c (the SEH frame has c as the handler) * which jmps to the exception handler which, in turn, calls d to * execute the finally block. Fall through is as shown above. So, * we see a .E violation for the handlers call to d and a .C violation * for the fall trough case of the ret @ b targeting e. We may also * see a .E violation for a call to a as sometimes the handler computes * the target of the jmp @ d an passes that to a different exception * handler. * * For try-except we see the following layout : * I've only seen jmp ind in the case that led to needing * at_Borland_SEH_rct_exemption() to be added, not that * it makes any difference. * [ jmp z: (rel8/32) || (rarely) ret || (very rarely) jmp ind] * x: jmp rel32 (x should be in the .E/.F table) * y: * ... * call rel32 * [z: ... || ret ] * Though there may be other optimized layouts (the ret instead of the * jmp z: is one such) so we may not want to rely on anything other * then x y. The exception dispatcher calls x (the SEH frame has x as * the handler) which jmps to the exception handler which, in turn, * jmps to y to execute the except block. We see a .F violation from * the handler's jmp to y. at_Borland_SEH_rct_exemption() covers a * case where the address of x (and thus y) in an existing SEH frame * is changed in place instead of popping and pushing a new frame. * * All addresses (rel and otherwise) should be in the same module. So * we need to recognize the patter and add d:/y: to the .E/.F table * as well as a: (sometimes the handler calculates the target of d and * passes that up to a higher level routine, though I don't see the * point) and add e: to the .C table. * * It would be preferable to handle these exemptions reactively at * the violation point, but unfortunately, by the time we get to the * violation the SEH frame information has been popped off the stack * and is lost, so we have to do it pre-emptively here (pattern * matching at violation time has proven to difficult in the face of * certain compiler optimizations). See at_Borland_SEH_rct_exemption() * in callback.c, that could handle all ind branches to y and ind calls * to d (see below) at an acceptable level of security if we desired. * Handling the ret @ b to e reactively would require the ability to * recreate the exact src cti (so we can use the addr of the ret to * pattern match) at the violation point (something that can't always * currently be done, reset flushing etc.). Handling the ind call to * a (which I've never acutally seen, though I've seen the address * computed and it looks like it could likely be hit) reactively is * more tricky. Prob. the only way to handle that is to allow .E/.F * transistions to any address after a push imm32 of an address in the * same module, but that might be too permissive. FIXME - should still * revisit doing the exemptions reactively at some point, esp. once we * can reliably get the src cti. */ extern bool seen_Borland_SEH; /* set for callback.c */ /* First read in the SEH frame, this is the observed structure and * the first two fields (which are all that we use) are constrained by * ntdll exception dispatcher (see EXCEPTION_REGISTRATION decleration * in ntdll.h). */ /* FIXME - could just use EXCEPTION_REGISTRATION period since all we * need is the handler address and it would allow simpler curiosity * [see 8181] below. If, as is expected, other options make use of * this routine we'll probably have one shared get of the SEH frame * anyways. */ typedef struct _borland_seh_frame_t { EXCEPTION_REGISTRATION reg; reg_t xbp; /* not used by us */ } borland_seh_frame_t; borland_seh_frame_t frame; /* will hold [b,e] or [x-1,y] */ byte target_buf[RET_0_LENGTH + 2 * JMP_LONG_LENGTH]; app_pc handler_jmp_target = NULL; if (!d_r_safe_read(value, sizeof(frame), &frame)) { /* We already checked for NULL and -1 above so this should be * a valid SEH frame. Xref 8181, borland_seh_frame_t struct is * bigger then EXCEPTION_REGISTRATION (which is all that is * required) so verify smaller size is readable. */ ASSERT_CURIOSITY( sizeof(EXCEPTION_REGISTRATION) < sizeof(frame) && d_r_safe_read(value, sizeof(EXCEPTION_REGISTRATION), &frame)); goto post_borland; } /* frame.reg.handler is c or y, read extra prior bytes to look for b */ if (!d_r_safe_read((app_pc)frame.reg.handler - RET_0_LENGTH, sizeof(target_buf), target_buf)) { goto post_borland; } if (is_jmp_rel32(&target_buf[RET_0_LENGTH], (app_pc)frame.reg.handler, &handler_jmp_target)) { /* we have a possible match, now do the more expensive checking */ app_pc base; LOG(THREAD, LOG_INTERP, 3, "Read possible borland SEH frame @" PFX "\n\t" "next=" PFX " handler=" PFX " xbp=" PFX "\n\t", value, frame.reg.prev, frame.reg.handler, frame.xbp); DOLOG(3, LOG_INTERP, { dump_buffer_as_bytes(THREAD, target_buf, sizeof(target_buf), 0); }); /* optimize check if we've already processed this frame once */ if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED) && rct_ind_branch_target_lookup( dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH)) { /* we already processed this SEH frame once, this is prob. a * frame pop, no need to continue */ STATS_INC(num_borland_SEH_dup_frame); LOG(THREAD, LOG_INTERP, 3, "Processing duplicate Borland SEH frame\n"); goto post_borland; } base = get_module_base((app_pc)frame.reg.handler); STATS_INC(num_borland_SEH_initial_match); /* Perf opt, we use the cheaper get_allocation_base() below instead * of get_module_base(). We are checking the result against a * known module base (base) so no need to duplicate the is module * check. FIXME - the checks prob. aren't even necessary given the * later is_in_code_section checks. Xref case 8171. */ /* FIXME - (perf) we could cache the region from the first * is_in_code_section() call and check against that before falling * back on is_in_code_section in case of multiple code sections. */ if (base != NULL && get_allocation_base(handler_jmp_target) == base && get_allocation_base(bb->instr_start) == base && /* FIXME - with -rct_analyze_at_load we should be able to * verify that frame->handler (x: c:) is on the .E/.F * table already. We could also try to match known pre x: * post y: patterns. */ is_in_code_section(base, bb->instr_start, NULL, NULL) && is_in_code_section(base, handler_jmp_target, NULL, NULL) && is_range_in_code_section(base, (app_pc)frame.reg.handler, (app_pc)frame.reg.handler + JMP_LONG_LENGTH + 1, NULL, NULL)) { app_pc finally_target; byte push_imm_buf[PUSH_IMM32_LENGTH]; DEBUG_DECLARE(bool ok;) /* we have a match, add handler+JMP_LONG_LENGTH (y: d:) * to .E/.F table */ STATS_INC(num_borland_SEH_try_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH frame adding " PFX " to .E/.F table\n", (app_pc)frame.reg.handler + JMP_LONG_LENGTH); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { d_r_mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target( dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH); d_r_mutex_unlock(&rct_module_lock); } /* we set this as an enabler for another exemption in * callback .C, see notes there */ if (!seen_Borland_SEH) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); seen_Borland_SEH = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } /* case 8648: used to decide which RCT entries to persist */ DEBUG_DECLARE(ok =) os_module_set_flag(base, MODULE_HAS_BORLAND_SEH); ASSERT(ok); /* look for .C addresses for try finally */ if (target_buf[0] == RAW_OPCODE_ret && (is_jmp_rel32(&target_buf[RET_0_LENGTH + JMP_LONG_LENGTH], (app_pc)frame.reg.handler + JMP_LONG_LENGTH, &finally_target) || is_jmp_rel8(&target_buf[RET_0_LENGTH + JMP_LONG_LENGTH], (app_pc)frame.reg.handler + JMP_LONG_LENGTH, &finally_target)) && d_r_safe_read(finally_target - sizeof(push_imm_buf), sizeof(push_imm_buf), push_imm_buf) && push_imm_buf[0] == RAW_OPCODE_push_imm32) { app_pc push_val = *(app_pc *)&push_imm_buf[1]; /* do a few more, expensive, sanity checks */ /* FIXME - (perf) see earlier note on get_allocation_base() * and is_in_code_section() usage. */ if (get_allocation_base(finally_target) == base && is_in_code_section(base, finally_target, NULL, NULL) && get_allocation_base(push_val) == base && /* FIXME - could also check that push_val is in * .E/.F table, at least for -rct_analyze_at_load */ is_in_code_section(base, push_val, NULL, NULL)) { /* Full match, add push_val (e:) to the .C table * and finally_target (a:) to the .E/.F table */ STATS_INC(num_borland_SEH_finally_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH finally frame adding " PFX " to" " .C table and " PFX " to .E/.F table\n", push_val, finally_target); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { d_r_mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target(dcontext, finally_target); d_r_mutex_unlock(&rct_module_lock); } if (DYNAMO_OPTION(ret_after_call)) { fragment_add_after_call(dcontext, push_val); } } else { ASSERT_CURIOSITY(false && "partial borland seh finally match"); } } } } } post_borland: # endif /* RETURN_AFTER_CALL */ return; } /* helper routine for bb_process_fs_ref * return true if bb should be continued, false if it shouldn't */ static bool bb_process_fs_ref_opnd(dcontext_t *dcontext, build_bb_t *bb, opnd_t dst, bool *is_to_fs0) { ASSERT(is_to_fs0 != NULL); *is_to_fs0 = false; if (opnd_is_far_base_disp(dst) && /* FIXME - check size? */ opnd_get_segment(dst) == SEG_FS) { /* is a write to fs:[*] */ if (bb->instr_start != bb->start_pc) { /* Not first instruction in the bb, end bb before this * instruction, so we can see it as the first instruction of a * new bb where we can use the register state. */ /* As is, always ending the bb here has a mixed effect on mem usage * with default options. We do end up with slightly more bb's * (and associated bookeeping costs), but frequently with MS dlls * we reduce code cache dupliaction from jmp/call ellision * (_SEH_[Pro,Epi]log otherwise ends up frequently duplicated for * instance). */ /* FIXME - we must stop the bb here even if there's already * a bb built for the next instruction, as we have to have * reproducible bb building for recreate app state. We should * only get here through code duplication (typically jmp/call * inlining, though can also be through multiple entry points into * the same block of non cti instructions). */ bb_stop_prior_to_instr(dcontext, bb, false /*not appended yet*/); return false; /* stop bb */ } /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { /* check is write to fs:[0] */ /* XXX: this won't identify all memory references (need to switch to * instr_compute_address_ex_priv() in order to handle VSIB) but the * current usage is just to identify the Borland pattern so that's ok. */ if (opnd_compute_address_priv(dst, get_mcontext(dcontext)) == NULL) { /* we have new mov to fs:[0] */ *is_to_fs0 = true; } } } return true; } /* While currently only used for Borland SEH exemptions, this analysis could * also be helpful for other SEH tasks (xref case 5824). */ static bool bb_process_fs_ref(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); /* If this is the first instruction of a bb for the cache we * want to fully decode it, check if it's pushing an SEH frame * and, if so, pass it to the SEH checking routines (currently * just used for the Borland SEH rct handling). If this is not * the first instruction of the bb then we want to stop the bb * just before this instruction so that when we do process this * instruction it will be the first in the bb (allowing us to * use the register state). */ if (!bb->full_decode) { instr_decode(dcontext, bb->instr); /* is possible this is an invalid instr that made it through the fast * decode, FIXME is there a better way to handle this? */ if (!instr_valid(bb->instr)) { ASSERT_NOT_TESTED(); if (bb->cur_pc == NULL) bb->cur_pc = bb->instr_start; bb_process_invalid_instr(dcontext, bb); return false; /* stop bb */ } ASSERT(instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); } /* expect to see only simple mov's to fs:[0] for new SEH frames * FIXME - might we see other types we'd want to intercept? * do we want to proccess pop instructions (usually just for removing * a frame)? */ if (instr_get_opcode(bb->instr) == OP_mov_st) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, 0); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) return false; /* end bb */ /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { if (is_to_fs0) { ptr_int_t value = 0; opnd_t src = instr_get_src(bb->instr, 0); if (opnd_is_immed_int(src)) { value = opnd_get_immed_int(src); } else if (opnd_is_reg(src)) { value = reg_get_value_priv(opnd_get_reg(src), get_mcontext(dcontext)); } else { ASSERT_NOT_REACHED(); } STATS_INC(num_SEH_pushes_processed); LOG(THREAD, LOG_INTERP, 3, "found mov to fs:[0] @ " PFX "\n", bb->instr_start); bb_process_SEH_push(dcontext, bb, (void *)value); } else { STATS_INC(num_fs_movs_not_SEH); } } } # if defined(DEBUG) && defined(INTERNAL) else if (INTERNAL_OPTION(check_for_SEH_push)) { /* Debug build Sanity check that we aren't missing SEH frame pushes */ int i; int num_dsts = instr_num_dsts(bb->instr); for (i = 0; i < num_dsts; i++) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, i); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) { STATS_INC(num_process_SEH_bb_early_terminate_debug); return false; /* end bb */ } /* common case is pop instructions to fs:[0] when popping an * SEH frame stored on tos */ if (is_to_fs0) { if (instr_get_opcode(bb->instr) == OP_pop) { LOG(THREAD, LOG_INTERP, 4, "found pop to fs:[0] @ " PFX "\n", bb->instr_start); STATS_INC(num_process_SEH_pop_fs0); } else { /* an unexpected SEH frame push */ LOG(THREAD, LOG_INTERP, 1, "found unexpected write to fs:[0] @" PFX "\n", bb->instr_start); DOLOG(1, LOG_INTERP, { d_r_loginst(dcontext, 1, bb->instr, ""); }); ASSERT_CURIOSITY(!is_to_fs0); } } } } # endif return true; /* continue bb */ } #endif /* win32 */ #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) /* The basic strategy for mangling mov_seg instruction is: * For mov fs/gs => reg/[mem], simply mangle it to write * the app's fs/gs selector value into dst. * For mov reg/mem => fs/gs, we make it as the first instruction * of bb, and mark that bb not linked and has mov_seg instr, * and change that instruction to be a nop. * Then whenever before entering code cache, we check if that's the bb * has mov_seg. If yes, we will update the information we maintained * about the app's fs/gs. */ /* check if the basic block building should continue on a mov_seg instr. */ static bool bb_process_mov_seg(dcontext_t *dcontext, build_bb_t *bb) { reg_id_t seg; if (!INTERNAL_OPTION(mangle_app_seg)) return true; /* continue bb */ /* if it is a read, we only need mangle the instruction. */ ASSERT(instr_num_srcs(bb->instr) == 1); if (opnd_is_reg(instr_get_src(bb->instr, 0)) && reg_is_segment(opnd_get_reg(instr_get_src(bb->instr, 0)))) return true; /* continue bb */ /* it is an update, we need set to be the first instr of bb */ ASSERT(instr_num_dsts(bb->instr) == 1); ASSERT(opnd_is_reg(instr_get_dst(bb->instr, 0))); seg = opnd_get_reg(instr_get_dst(bb->instr, 0)); ASSERT(reg_is_segment(seg)); /* we only need handle fs/gs */ if (seg != SEG_GS && seg != SEG_FS) return true; /* continue bb */ /* if no private loader, we only need mangle the non-tls seg */ if (seg == IF_X64_ELSE(SEG_FS, SEG_FS) && IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true)) return true; /* continue bb */ if (bb->instr_start == bb->start_pc) { /* the first instruction, we can continue build bb. */ /* this bb cannot be part of trace! */ bb->flags |= FRAG_CANNOT_BE_TRACE; bb->flags |= FRAG_HAS_MOV_SEG; return true; /* continue bb */ } LOG(THREAD, LOG_INTERP, 3, "ending bb before mov_seg\n"); /* Set cur_pc back to the start of this instruction and delete this * instruction from the bb ilist. */ bb->cur_pc = instr_get_raw_bits(bb->instr); instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace * breaking traces here shouldn't be a perf issue b/c this is so rare, * it should happen only once per thread on setting up tls. */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* stop bb here */ } #endif /* UNIX && X86 */ /* Returns true to indicate that ignorable syscall processing is completed * with *continue_bb indicating if the bb should be continued or not. * When returning false, continue_bb isn't pertinent. */ static bool bb_process_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum, bool *continue_bb) { STATS_INC(ignorable_syscalls); BBPRINT(bb, 3, "found ignorable system call 0x%04x\n", sysnum); #ifdef WINDOWS if (get_syscall_method() != SYSCALL_METHOD_SYSENTER) { DOCHECK(1, { if (get_syscall_method() == SYSCALL_METHOD_WOW64) ASSERT_NOT_TESTED(); }); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* Can we continue interp after the sysenter at the instruction * after the call to sysenter? */ instr_t *call = bb_verify_sysenter_pattern(dcontext, bb); if (call != NULL) { /* If we're continuing code discovery at the after-call address, * change the cur_pc to continue at the after-call addr. This is * safe since the preceding call is in the fragment and * %xsp/(%xsp) hasn't changed since the call. Obviously, we assume * that the sysenter breaks control flow in fashion such any * instruction that follows it isn't reached by DR. */ if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) { bb->cur_pc = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* End this bb now. We set the exit target so that control * skips the vsyscall 'ret' that's executed natively after the * syscall and ends up at the correct place. */ /* FIXME Assigning exit_target causes the fragment to end * with a direct exit stub to the after-call address, which * is fine. If bb->exit_target < bb->start_pc, the future * fragment for exit_target is marked as a trace head which * isn't intended. A potentially undesirable side effect * is that exit_target's fragment can't be included in * trace for start_pc. */ bb->exit_target = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = false; return true; } } STATS_INC(ignorable_syscalls_failed_sysenter_pattern); /* Pattern match failed but the syscall is ignorable so maybe we * can try shared syscall? */ /* Decrement the stat to prevent double counting. We rarely expect to hit * this case. */ STATS_DEC(ignorable_syscalls); return false; } #elif defined(MACOS) if (instr_get_opcode(bb->instr) == OP_sysenter) { /* To continue after the sysenter we need to go to the ret ibl, as user-mode * sysenter wrappers put the retaddr into edx as the post-kernel continuation. */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->ibl_branch_type = IBL_RETURN; bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "sysenter exit target = " PFX "\n", bb->exit_target); if (continue_bb != NULL) *continue_bb = false; } else if (continue_bb != NULL) *continue_bb = true; return true; #else if (continue_bb != NULL) *continue_bb = true; return true; #endif } #ifdef WINDOWS /* Process a syscall that is executed via shared syscall. */ static void bb_process_shared_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { ASSERT(DYNAMO_OPTION(shared_syscalls)); DODEBUG({ if (ignorable_system_call(sysnum, bb->instr, NULL)) STATS_INC(ignorable_syscalls); else STATS_INC(optimizable_syscalls); }); BBPRINT(bb, 3, "found %soptimizable system call 0x%04x\n", INTERNAL_OPTION(shared_eq_ignore) ? "ignorable-" : "", sysnum); LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & NOT removing the interrupt itself\n"); /* Mark the instruction as pointing to shared syscall */ bb->instr->flags |= INSTR_SHARED_SYSCALL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; /* we redirect all optimizable syscalls to a single shared piece of code. * Once a fragment reaches the shared syscall code, it can be safely * deleted, for example, if the thread is interrupted for a callback and * DR needs to delete fragments for cache management. * * Note that w/shared syscall, syscalls can be executed from TWO * places -- shared_syscall and do_syscall. */ bb->exit_target = shared_syscall_routine(dcontext); /* make sure translation for ending jmp ends up right, mangle will * remove this instruction, so set to NULL so translation does the * right thing */ bb->instr = NULL; } #endif /* WINDOWS */ #ifdef ARM /* This routine walks back to find the IT instr for the current IT block * and the position of instr in the current IT block, and returns whether * instr is the last instruction in the block. */ static bool instr_is_last_in_it_block(instr_t *instr, instr_t **it_out, uint *pos_out) { instr_t *it; int num_instrs; ASSERT(instr != NULL && instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB && instr_is_predicated(instr) && instr_is_app(instr)); /* walk backward to find the IT instruction */ for (it = instr_get_prev(instr), num_instrs = 1; /* meta and app instrs are treated identically here */ it != NULL && num_instrs <= 4 /* max 4 instr in an IT block */; it = instr_get_prev(it)) { if (instr_is_label(it)) continue; if (instr_get_opcode(it) == OP_it) break; num_instrs++; } ASSERT(it != NULL && instr_get_opcode(it) == OP_it); ASSERT(num_instrs <= instr_it_block_get_count(it)); if (it_out != NULL) *it_out = it; if (pos_out != NULL) *pos_out = num_instrs - 1; /* pos starts from 0 */ if (num_instrs == instr_it_block_get_count(it)) return true; return false; } static void adjust_it_instr_for_split(dcontext_t *dcontext, instr_t *it, uint pos) { dr_pred_type_t block_pred[IT_BLOCK_MAX_INSTRS]; uint i, block_count = instr_it_block_get_count(it); byte firstcond[2], mask[2]; DEBUG_DECLARE(bool ok;) ASSERT(pos < instr_it_block_get_count(it) - 1); for (i = 0; i < block_count; i++) block_pred[i] = instr_it_block_get_pred(it, i); DOCHECK(CHKLVL_ASSERTS, { instr_t *instr; for (instr = instr_get_next_app(it), i = 0; instr != NULL; instr = instr_get_next_app(instr)) { ASSERT(instr_is_predicated(instr) && i <= pos); ASSERT(block_pred[i++] == instr_get_predicate(instr)); } }); DEBUG_DECLARE(ok =) instr_it_block_compute_immediates( block_pred[0], (pos > 0) ? block_pred[1] : DR_PRED_NONE, (pos > 1) ? block_pred[2] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[0], &mask[0]); ASSERT(ok); DOCHECK(CHKLVL_ASSERTS, { DEBUG_DECLARE(ok =) instr_it_block_compute_immediates( block_pred[pos + 1], (block_count > pos + 2) ? block_pred[pos + 2] : DR_PRED_NONE, (block_count > pos + 3) ? block_pred[pos + 3] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[1], &mask[1]); ASSERT(ok); }); /* firstcond should be unchanged */ ASSERT(opnd_get_immed_int(instr_get_src(it, 0)) == firstcond[0]); instr_set_src(it, 1, OPND_CREATE_INT(mask[0])); LOG(THREAD, LOG_INTERP, 3, "ending bb in an IT block & adjusting the IT instruction\n"); /* FIXME i#1669: NYI on passing split it block info to next bb */ ASSERT_NOT_IMPLEMENTED(false); } #endif /* ARM */ static bool bb_process_non_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { BBPRINT(bb, 3, "found non-ignorable system call 0x%04x\n", sysnum); STATS_INC(non_ignorable_syscalls); bb->exit_type |= LINK_NI_SYSCALL; /* destroy the interrupt instruction */ LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & removing the interrupt itself\n"); /* Indicate that this is a non-ignorable syscall so mangle will remove */ /* FIXME i#1551: maybe we should union int80 and svc as both are inline syscall? */ #ifdef UNIX if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { # if defined(MACOS) && defined(X86) int num = instr_get_interrupt_number(bb->instr); if (num == 0x81 || num == 0x82) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->instr->flags |= INSTR_BRANCH_SPECIAL_EXIT; } else { ASSERT(num == 0x80); # endif /* MACOS && X86 */ bb->exit_type |= LINK_NI_SYSCALL_INT; bb->instr->flags |= INSTR_NI_SYSCALL_INT; # ifdef MACOS } # endif } else #endif bb->instr->flags |= INSTR_NI_SYSCALL; #ifdef ARM /* we assume all conditional syscalls are treated as non-ignorable */ if (instr_is_predicated(bb->instr)) { instr_t *it; uint pos; ASSERT(instr_is_syscall(bb->instr)); bb->svc_pred = instr_get_predicate(bb->instr); if (instr_get_isa_mode(bb->instr) == DR_ISA_ARM_THUMB && !instr_is_last_in_it_block(bb->instr, &it, &pos)) { /* FIXME i#1669: we violate the transparency and clients will see * modified IT instr. We should adjust the IT instr at mangling * stage after client instrumentation, but that is complex. */ adjust_it_instr_for_split(dcontext, it, pos); } } #endif /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* end bb now */ } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb) { int sysnum; #ifdef CLIENT_INTERFACE /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. */ if (bb->pass_to_client && !bb->post_client) return false; #endif #ifdef DGC_DIAGNOSTICS if (TEST(FRAG_DYNGEN, bb->flags) && !is_dyngen_vsyscall(bb->instr_start)) { LOG(THREAD, LOG_INTERP, 1, "WARNING: syscall @ " PFX " in dyngen code!\n", bb->instr_start); } #endif BBPRINT(bb, 4, "interp: syscall @ " PFX "\n", bb->instr_start); check_syscall_method(dcontext, bb->instr); bb->flags |= FRAG_HAS_SYSCALL; /* if we can identify syscall number and it is an ignorable syscall, * we let bb keep going, else we end bb and flag it */ sysnum = find_syscall_num(dcontext, bb->ilist, bb->instr); #ifdef VMX86_SERVER DOSTATS({ if (instr_get_opcode(bb->instr) == OP_int && instr_get_interrupt_number(bb->instr) == VMKUW_SYSCALL_GATEWAY) { STATS_INC(vmkuw_syscall_sites); LOG(THREAD, LOG_SYSCALLS, 2, "vmkuw system call site: #=%d\n", sysnum); } }); #endif BBPRINT(bb, 3, "syscall # is %d\n", sysnum); #ifdef CLIENT_INTERFACE if (sysnum != -1 && instrument_filter_syscall(dcontext, sysnum)) { BBPRINT(bb, 3, "client asking to intercept => pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif #ifdef ARM if (sysnum != -1 && instr_is_predicated(bb->instr)) { BBPRINT(bb, 3, "conditional system calls cannot be inlined => " "pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif if (sysnum != -1 && DYNAMO_OPTION(ignore_syscalls) && ignorable_system_call(sysnum, bb->instr, NULL) #ifdef X86 /* PR 288101: On Linux we do not yet support inlined sysenter instrs as we * do not have in-cache support for the post-sysenter continuation: we rely * for now on very simple sysenter handling where d_r_dispatch uses asynch_target * to know where to go next. */ IF_LINUX(&&instr_get_opcode(bb->instr) != OP_sysenter) #endif /* X86 */ ) { bool continue_bb; if (bb_process_ignorable_syscall(dcontext, bb, sysnum, &continue_bb)) { if (!DYNAMO_OPTION(inline_ignored_syscalls)) continue_bb = false; return continue_bb; } } #ifdef WINDOWS if (sysnum != -1 && DYNAMO_OPTION(shared_syscalls) && optimizable_system_call(sysnum)) { bb_process_shared_syscall(dcontext, bb, sysnum); return false; } #endif /* Fall thru and handle as a non-ignorable syscall. */ return bb_process_non_ignorable_syscall(dcontext, bb, sysnum); } /* Case 3922: for wow64 we treat "call *fs:0xc0" as a system call. * Only sets continue_bb if it returns true. */ static bool bb_process_indcall_syscall(dcontext_t *dcontext, build_bb_t *bb, bool *continue_bb) { ASSERT(continue_bb != NULL); #ifdef WINDOWS if (instr_is_wow64_syscall(bb->instr)) { /* we could check the preceding instrs but we don't bother */ *continue_bb = bb_process_syscall(dcontext, bb); return true; } #endif return false; } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_interrupt(dcontext_t *dcontext, build_bb_t *bb) { #if defined(DEBUG) || defined(INTERNAL) || defined(WINDOWS) int num = instr_get_interrupt_number(bb->instr); #endif #ifdef CLIENT_INTERFACE /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. * PR 550752: we cannot end at int 0x2d: we live w/ client consequences */ if (bb->pass_to_client && !bb->post_client IF_WINDOWS(&&num != 0x2d)) return false; #endif BBPRINT(bb, 3, "int 0x%x @ " PFX "\n", num, bb->instr_start); #ifdef WINDOWS if (num == 0x2b) { /* interrupt 0x2B signals return from callback */ /* end block here and come back to dynamo to perform interrupt */ bb->exit_type |= LINK_CALLBACK_RETURN; BBPRINT(bb, 3, "ending bb at cb ret & removing the interrupt itself\n"); /* Set instr to NULL in order to get translation of exit cti * correct. mangle will destroy the instruction */ bb->instr = NULL; bb->flags |= FRAG_MUST_END_TRACE; STATS_INC(num_int2b); return false; } else { SYSLOG_INTERNAL_INFO_ONCE("non-syscall, non-int2b 0x%x @ " PFX " from " PFX, num, bb->instr_start, bb->start_pc); } #endif /* WINDOWS */ return true; } /* If the current instr in the BB is an indirect call that can be converted into a * direct call, process it and return true, else, return false. * FIXME PR 288327: put in linux call* to vsyscall page */ static bool bb_process_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ instr_t *instr; opnd_t src0; instr_t *call_instr; int call_src_reg; app_pc callee; bool vsyscall = false; /* Check if this BB can be extended and the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) return false; /* Check if we have a "mov <imm> -> %reg; call %reg" or a * "mov <imm> -> %reg; call (%reg)" pair. First check for the call. */ /* The 'if' conditions are broken up to make the code more readable * while #ifdef-ing the WINDOWS case. It's still ugly though. */ instr = bb->instr; if (!( # ifdef WINDOWS /* Match 'call (%xdx)' for a post-SP2 indirect call to sysenter. */ (opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || # endif /* Match 'call %reg'. */ opnd_is_reg(instr_get_src(instr, 0)))) return false; /* If there's no CTI in the BB, we can check if there are 5+ preceding * bytes and if they could hold a "mov" instruction. */ if (!TEST(FRAG_HAS_DIRECT_CTI, bb->flags) && bb->instr_start - 5 >= bb->start_pc) { byte opcode = *((byte *)bb->instr_start - 5); /* Check the opcode. Do we see a "mov ... -> %reg"? Valid opcodes are in * the 0xb8-0xbf range (Intel IA-32 ISA ref, v.2) and specify the * destination register, i.e., 0xb8 means that %xax is the destination. */ if (opcode < 0xb8 || opcode > 0xbf) return false; } /* Check the previous instruction -- is it really a "mov"? */ src0 = instr_get_src(instr, 0); call_instr = instr; instr = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); call_src_reg = opnd_is_near_base_disp(src0) ? opnd_get_base(src0) : opnd_get_reg(src0); if (instr == NULL || instr_get_opcode(instr) != OP_mov_imm || opnd_get_reg(instr_get_dst(instr, 0)) != call_src_reg) return false; /* For the general case, we don't try to optimize a call * thru memory -- just check that the call uses a register. */ callee = NULL; if (opnd_is_reg(src0)) { /* Extract the target address. */ callee = (app_pc)opnd_get_immed_int(instr_get_src(instr, 0)); # ifdef WINDOWS # ifdef PROGRAM_SHEPHERDING /* FIXME - is checking for on vsyscall page better or is checking == to * VSYSCALL_BOOTSTRAP_ADDR? Both are hacky. */ if (is_dyngen_vsyscall((app_pc)opnd_get_immed_int(instr_get_src(instr, 0)))) { LOG(THREAD, LOG_INTERP, 4, "Pre-SP2 style indirect call " "to sysenter found at " PFX "\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); vsyscall = true; ASSERT(opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR); ASSERT(!use_ki_syscall_routines()); /* double check our determination */ } else # endif # endif STATS_INC(num_convertible_indcalls); } # ifdef WINDOWS /* Match the "call (%xdx)" to sysenter case for SP2-patched os's. Memory at * address VSYSCALL_BOOTSTRAP_ADDR (0x7ffe0300) holds the address of * KiFastSystemCall or (FIXME - not handled) on older platforms KiIntSystemCall. * FIXME It's unsavory to hard-code 0x7ffe0300, but the constant has little * context in an SP2 os. It's a hold-over from pre-SP2. */ else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && call_src_reg == REG_XDX && opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR) { /* Extract the target address. We expect that the memory read using the * value in the immediate field is ok as it's the vsyscall page * which 1) cannot be made unreadable and 2) cannot be made writable so * the stored value will not change. Of course, it's possible that the * os could change the page contents. */ callee = (app_pc) * ((ptr_uint_t *)opnd_get_immed_int(instr_get_src(instr, 0))); if (get_app_sysenter_addr() == NULL) { /* For the first call* we've yet to decode an app syscall, yet we * cannot have later recreations have differing behavior, so we must * handle that case (even though it doesn't matter performance-wise * as the first call* is usually in runtime init code that's * executed once). So we do a raw byte compare to: * ntdll!KiFastSystemCall: * 7c82ed50 8bd4 mov xdx,xsp * 7c82ed52 0f34 sysenter */ uint raw; if (!d_r_safe_read(callee, sizeof(raw), &raw) || raw != 0x340fd48b) callee = NULL; } else { /* The callee should be a 2 byte "mov %xsp -> %xdx" followed by the * sysenter -- check the sysenter's address as 2 bytes past the callee. */ if (callee + 2 != get_app_sysenter_addr()) callee = NULL; } vsyscall = (callee != NULL); ASSERT(use_ki_syscall_routines()); /* double check our determination */ DODEBUG({ if (callee == NULL) ASSERT_CURIOSITY(false && "call* to vsyscall unexpected mismatch"); else { LOG(THREAD, LOG_INTERP, 4, "Post-SP2 style indirect call " "to sysenter found at " PFX "\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); } }); } # endif /* Check if register dataflow matched and we were able to extract * the callee address. */ if (callee == NULL) return false; if (vsyscall) { /* Case 8917: abandon coarse-grainness in favor of performance */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_indcall); } LOG(THREAD, LOG_INTERP, 4, "interp: possible convertible" " indirect call from " PFX " to " PFX "\n", bb->instr_start, callee); if (leave_call_native(callee) || must_not_be_entered(callee)) { BBPRINT(bb, 3, " NOT inlining indirect call to " PFX "\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); ASSERT_CURIOSITY_ONCE(!vsyscall && "leaving call* to vsyscall"); /* no need for bb_add_native_direct_xfer() b/c it's already indirect */ return true; /* keep bb going, w/o inlining call */ } if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { /* FIXME This is identical to the code for evaluating a * direct call's callee. If such code appears in another * (3rd) place, we should outline it. * FIXME: use follow_direct_call() */ if (vsyscall) { /* As a flag to allow our xfer from now-non-coarse to coarse * (for vsyscall-in-ntdll) we pre-emptively mark as has-syscall. */ ASSERT(!TEST(FRAG_HAS_SYSCALL, bb->flags)); bb->flags |= FRAG_HAS_SYSCALL; } if (check_new_page_jmp(dcontext, bb, callee)) { if (vsyscall) /* Restore */ bb->flags &= ~FRAG_HAS_SYSCALL; bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; /* FIXME: when using follow_direct_call don't forget to set this */ call_instr->flags |= INSTR_IND_CALL_DIRECT; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } if (vsyscall) { /* Case 8917: Restore, just in case, though we certainly expect to have * this flag set as soon as we decode a few more instrs and hit the * syscall itself -- but for pre-sp2 we currently could be elsewhere on * the same page, so let's be safe here. */ bb->flags &= ~FRAG_HAS_SYSCALL; } } /* FIXME: we're also not converting to a direct call - was this intended? */ BBPRINT(bb, 3, " NOT following indirect call from " PFX " to " PFX "\n", bb->instr_start, callee); DODEBUG({ if (vsyscall) { DO_ONCE({ /* Case 9095: don't complain so loudly if user asked for no elision */ if (DYNAMO_OPTION(max_elide_call) <= 2) SYSLOG_INTERNAL_WARNING("leaving call* to vsyscall"); else ASSERT_CURIOSITY(false && "leaving call* to vsyscall"); }); } }); ; #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86 */ return false; /* stop bb */ } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* if we make the IAT sections unreadable we will need to map to proper location */ static inline app_pc read_from_IAT(app_pc iat_reference) { /* FIXME: we should have looked up where the real IAT should be at * the time of checking whether is_in_IAT */ return *(app_pc *)iat_reference; } /* returns whether target is an IAT of a module that we convert. Note * users still have to check the referred to value to verify targeting * a native module. */ static bool is_targeting_convertible_IAT(dcontext_t *dcontext, instr_t *instr, app_pc *iat_reference /* OUT */) { /* FIXME: we could give up on optimizing a particular module, * if too many writes to its IAT are found, * even 1 may be too much to handle! */ /* We only allow constant address, * any registers used for effective address calculation * can not be guaranteed to be constant dynamically. */ /* FIXME: yet a 'call %reg' if that value is an export would be a * good sign that we should go backwards and look for a possible * mov IAT[func] -> %reg and then optimize that as well - case 1948 */ app_pc memory_reference = NULL; opnd_t opnd = instr_get_target(instr); LOG(THREAD, LOG_INTERP, 4, "is_targeting_convertible_IAT: "); /* A typical example of a proper call * ff 15 8810807c call dword ptr [kernel32+0x1088 (7c801088)] * where * [7c801088] = 7c90f04c ntdll!RtlAnsiStringToUnicodeString * * The ModR/M byte for a displacement only with no SIB should be * 15 for CALL, 25 for JMP, (no far versions for IAT) */ if (opnd_is_near_base_disp(opnd)) { /* FIXME PR 253930: pattern-match x64 IAT calls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); memory_reference = (app_pc)(ptr_uint_t)opnd_get_disp(opnd); /* now should check all other fields */ if (opnd_get_base(opnd) != REG_NULL || opnd_get_index(opnd) != REG_NULL) { /* this is not a pure memory reference, can't be IAT */ return false; } ASSERT(opnd_get_scale(opnd) == 0); } else { return false; } LOG(THREAD, LOG_INTERP, 3, "is_targeting_convertible_IAT: memory_reference " PFX "\n", memory_reference); /* FIXME: if we'd need some more additional structures those can * be looked up in a separate hashtable based on the IAT base, or * we'd have to extend the vmareas with custom fields */ ASSERT(DYNAMO_OPTION(IAT_convert)); if (vmvector_overlap(IAT_areas, memory_reference, memory_reference + 1)) { /* IAT has to be in the same module as current instruction, * but even in the unlikely reference by address from another * module there is really no problem, so not worth checking */ ASSERT_CURIOSITY(get_module_base(instr->bytes) == get_module_base(memory_reference)); /* FIXME: now that we know it is in IAT/GOT, * we have to READ the contents and return that * safely to the caller so they can convert accordingly */ /* FIXME: we would want to add the IAT section to the vmareas * of a region that has a converted block. Then on a write to * IAT we can flush efficiently only blocks affected by a * particular module, for a first hack though flushing * everything on a hooker will do. */ *iat_reference = memory_reference; return true; } else { /* plain global function * e.g. ntdll!RtlUnicodeStringToAnsiString+0x4c: * ff15c009917c call dword ptr [ntdll!RtlAllocateStringRoutine (7c9109c0)] */ return false; } } #endif /* X86 */ /* If the current instr in the BB is an indirect call through IAT that * can be converted into a direct call, process it and return true, * else, return false. */ static bool bb_process_IAT_convertible_indjmp(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* Check if the instr is a (near) indirect jump */ if (instr_get_opcode(bb->instr) != OP_jmp_ind) { ASSERT_CURIOSITY(false && "far ind jump"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { DOSTATS({ if (EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* see how often we mark as likely a PLT a JMP which in * fact is not going through IAT */ STATS_INC(num_indirect_jumps_PLT_not_IAT); LOG(THREAD, LOG_INTERP, 3, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT instr=" PFX "\n", bb->instr->bytes); } }); return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: target=" PFX " %s\n", target, name); }); STATS_INC(num_indirect_jumps_IAT); DOSTATS({ if (!EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* count any other known uses for an indirect jump to go * through the IAT other than PLT uses, although a block * reaching max_elide_call would prevent the above * match */ STATS_INC(num_indirect_jumps_IAT_not_PLT); /* FIXME: case 6459 for further inquiry */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT target=" PFX "\n", target); } }); if (must_not_be_elided(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect jmp to must_not_be_elided " PFX "\n", target); return false; /* do not convert indirect jump, will stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ /* IAT_elide should definitely not touch native_exec modules. * * FIXME: we also prevent IAT_convert from optimizing imports in * native_exec_list DLLs, although we could let that convert to a * direct jump and require native_exec_dircalls to be always on to * intercept those jmps. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect jump to native exec module " PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_native); return false; /* do not convert indirect jump, stop bb */ } /* mangle mostly as such as direct jumps would be mangled in * bb_process_ubr(dcontext, bb) but note bb->instr has already * been appended so has to reverse some of its actions */ /* pretend never saw an indirect JMP, we'll either add a new direct JMP or we'll just continue in target */ instrlist_remove(bb->ilist, bb->instr); /* bb->instr has been appended already */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct jmp would have been elided */ /* We could have used follow_direct_call instead since * commonly this really is a disguised CALL*. Yet for PLT use * of the form of CALL PLT[foo]; JMP* IAT[foo] we would have * already counted the CALL. If we have tail call elimination * that converts a CALL* into a JMP* it is also OK to treat as * a JMP instead of a CALL just as if sharing tails. */ if (follow_direct_jump(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: eliding jmp* target=" PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct jump without eliding */ /* we set bb->instr to NULL so unlike bb_process_ubr * we get the final exit_target added by build_bb_ilist * FIXME: case 85: which will work only when we're using bb->mangle_ilist * FIXME: what are callers supposed to see when we do NOT mangle? */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: converting jmp* target=" PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_converted); /* end basic block with a direct JMP to target */ bb->exit_target = target; *elide_continue = false; /* matching, but should stop bb */ return true; /* matching */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Returns true if the current instr in the BB is an indirect call * through IAT that can be converted into a direct call, process it * and sets elide_continue. Otherwise function return false. * OUT elide_continue is set when bb building should continue in target, * and not set when bb building should be stopped. */ static bool bb_process_IAT_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* FIXME: the code structure is the same as * bb_process_IAT_convertible_indjmp, could fuse the two */ /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ /* Check if the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) { ASSERT_CURIOSITY(false && "far call"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: target=" PFX " %s\n", target, name); }); STATS_INC(num_indirect_calls_IAT); /* mangle mostly as such as direct calls are mangled with * bb_process_call_direct(dcontext, bb) */ if (leave_call_native(target) || must_not_be_entered(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect call to leave_call_native " PFX "\n", target); return false; /* do not convert indirect call, stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect call to native exec module " PFX "\n", target); STATS_INC(num_indirect_calls_IAT_native); return false; /* do not convert indirect call, stop bb */ } /* mangle_indirect_call and calculate return address as of * bb->instr and will remove bb->instr * FIXME: it would have been * better to replace in instrlist with a direct call and have * mangle_{in,}direct_call use other than the raw bytes, but this for now does the * job. */ bb->instr->flags |= INSTR_IND_CALL_DIRECT; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct call would have been elided */ if (follow_direct_call(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: eliding call* flags=0x%08x " "target=" PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct call without eliding */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: converting call* flags=0x%08x target=" PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_converted); /* bb->instr has been appended already, and will get removed by * mangle_indirect_call. We don't need to set to NULL, since this * instr is a CTI and the final jump's translation target should * still be the original indirect call. */ bb->exit_target = target; /* end basic block with a direct CALL to target. With default * options it should get mangled to a PUSH; JMP */ *elide_continue = false; /* matching, but should stop bb */ return true; /* converted indirect to direct */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Called on instructions that save the FPU state */ static void bb_process_float_pc(dcontext_t *dcontext, build_bb_t *bb) { /* i#698: for instructions that save the floating-point state * (e.g., fxsave), we go back to d_r_dispatch to translate the fp pc. * We rule out being in a trace (and thus a potential alternative * would be to use a FRAG_ flag). These are rare instructions so that * shouldn't have a significant perf impact: except we've been hitting * libm code that uses fnstenv and is not rare, so we have non-inlined * translation under an option for now. */ if (DYNAMO_OPTION(translate_fpu_pc)) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->flags |= FRAG_CANNOT_BE_TRACE; } /* If we inline the pc update, we can't persist. Simplest to keep fine-grained. */ bb->flags &= ~FRAG_COARSE_GRAIN; } static bool instr_will_be_exit_cti(instr_t *inst) { /* can't use instr_is_exit_cti() on pre-mangled instrs */ return (instr_is_app(inst) && instr_is_cti(inst) && (!instr_is_near_call_direct(inst) || !leave_call_native(instr_get_branch_target_pc(inst))) /* PR 239470: ignore wow64 syscall, which is an ind call */ IF_WINDOWS(&&!instr_is_wow64_syscall(inst))); } #ifdef CLIENT_INTERFACE /* PR 215217: check syscall restrictions */ static bool client_check_syscall(instrlist_t *ilist, instr_t *inst, bool *found_syscall, bool *found_int) { int op_int = IF_X86_ELSE(OP_int, OP_svc); /* We do consider the wow64 call* a syscall here (it is both * a syscall and a call*: PR 240258). */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == op_int) { if (instr_is_syscall(inst) && found_syscall != NULL) *found_syscall = true; /* Xref PR 313869 - we should be ignoring int 3 here. */ if (instr_get_opcode(inst) == op_int && found_int != NULL) *found_int = true; /* For linux an ignorable syscall is not a problem. Our * pre-syscall-exit jmp is added post client mangling so should * be robust. * FIXME: now that we have -no_inline_ignored_syscalls should * we assert on ignorable also? Probably we'd have to have * an exception for the middle of a trace? */ if (IF_UNIX(TEST(INSTR_NI_SYSCALL, inst->flags)) /* PR 243391: only block-ending interrupt 2b matters */ IF_WINDOWS(instr_is_syscall(inst) || ((instr_get_opcode(inst) == OP_int && instr_get_interrupt_number(inst) == 0x2b)))) { /* This check means we shouldn't hit the exit_type flags * check below but we leave it in place in case we add * other flags in future */ if (inst != instrlist_last(ilist)) { CLIENT_ASSERT(false, "a syscall or interrupt must terminate the block"); return false; } /* should we forcibly delete the subsequent instrs? * or the client has to deal w/ bad behavior in release build? */ } } return true; } /* Pass bb to client, and afterward check for criteria we require and rescan for * eflags and other flags that might have changed. * Returns true normally; returns false to indicate "go native". */ static bool client_process_bb(dcontext_t *dcontext, build_bb_t *bb) { dr_emit_flags_t emitflags = DR_EMIT_DEFAULT; instr_t *inst; bool found_exit_cti = false; bool found_syscall = false; bool found_int = false; # ifdef ANNOTATIONS app_pc trailing_annotation_pc = NULL, instrumentation_pc = NULL; bool found_instrumentation_pc = false; instr_t *annotation_label = NULL; # endif instr_t *last_app_instr = NULL; /* This routine is called by more than just bb builder, also used * for recreating state, so only call if caller requested it * (usually that coincides w/ bb->app_interp being set, but not * when recreating state on a fault (PR 214962)). * FIXME: hot patches shouldn't be injected during state recreations; * does predicating on bb->app_interp take care of this issue? */ if (!bb->pass_to_client) return true; /* i#995: DR may build a bb with one invalid instruction, which won't be * passed to cliennt. * FIXME: i#1000, we should present the bb to the client. * i#1000-c#1: the bb->ilist could be empty. */ if (instrlist_first(bb->ilist) == NULL) return true; if (!instr_opcode_valid(instrlist_first(bb->ilist)) && /* For -fast_client_decode we can have level 0 instrs so check * to ensure this is a single-instr bb that was built just to * raise the fault for us. * XXX i#1000: shouldn't we pass this to the client? It might not handle an * invalid instr properly though. */ instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { return true; } /* Call the bb creation callback(s) */ if (!instrument_basic_block(dcontext, /* DrMem#1735: pass app pc, not selfmod copy pc */ (bb->pretend_pc == NULL ? bb->start_pc : bb->pretend_pc), bb->ilist, bb->for_trace, !bb->app_interp, &emitflags)) { /* although no callback was called we must process syscalls/ints (PR 307284) */ } if (bb->for_cache && TEST(DR_EMIT_GO_NATIVE, emitflags)) { LOG(THREAD, LOG_INTERP, 2, "client requested that we go native\n"); SYSLOG_INTERNAL_INFO("thread " TIDFMT " is going native at client request", d_r_get_thread_id()); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); return false; } bb->post_client = true; /* FIXME: instrumentor may totally mess us up -- our flags * or syscall info might be wrong. xref PR 215217 */ /* PR 215217, PR 240265: * We need to check for client changes that require a new exit * target. We can't practically analyze the instrlist to decipher * the exit, so we'll search backwards and require that the last * cti is the exit cti. Typically, the last instruction in the * block should be the exit. Post-mbr and post-syscall positions * are particularly fragile, as our mangling code sets state up for * the exit that could be messed up by instrs inserted after the * mbr/syscall. We thus disallow such instrs (except for * dr_insert_mbr_instrumentation()). xref cases 10503, 10782, 10784 * * Here's what we support: * - more than one exit cti; all but the last must be a ubr * - an exit cbr or call must be the final instr in the block * - only one mbr; must be the final instr in the block and the exit target * - clients can't change the exit of blocks ending in a syscall * (or int), and the syscall must be the final instr in the block; * client can, however, remove the syscall and then add a different exit * - client can't add a translation target that's outside of the original * source code bounds, or else our cache consistency breaks down * (the one exception to this is that a jump can translate to its target) */ /* we set to NULL to have a default of fall-through */ bb->exit_target = NULL; bb->exit_type = 0; /* N.B.: we're walking backward */ for (inst = instrlist_last(bb->ilist); inst != NULL; inst = instr_get_prev(inst)) { if (!instr_opcode_valid(inst)) continue; # ifdef X86 if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { if (instr_may_write_zmm_register(inst)) d_r_set_avx512_code_in_use(true); } } # endif if (instr_is_cti(inst) && inst != instrlist_last(bb->ilist)) { /* PR 213005: coarse_units can't handle added ctis (meta or not) * since decode_fragment(), used for state recreation, can't * distinguish from exit cti. * i#665: we now support intra-fragment meta ctis * to make persistence usable for clients */ if (!opnd_is_instr(instr_get_target(inst)) || instr_is_app(inst)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (instr_is_meta(inst)) { # ifdef ANNOTATIONS /* Save the trailing_annotation_pc in case a client truncated the bb there. */ if (is_annotation_label(inst) && last_app_instr == NULL) { dr_instr_label_data_t *label_data = instr_get_label_data_area(inst); trailing_annotation_pc = GET_ANNOTATION_APP_PC(label_data); instrumentation_pc = GET_ANNOTATION_INSTRUMENTATION_PC(label_data); annotation_label = inst; } # endif continue; } # ifdef ANNOTATIONS if (instrumentation_pc != NULL && !found_instrumentation_pc && instr_get_translation(inst) == instrumentation_pc) found_instrumentation_pc = true; # endif /* in case bb was truncated, find last non-meta fall-through */ if (last_app_instr == NULL) last_app_instr = inst; /* PR 215217: client should not add new source code regions, else our * cache consistency (both page prot and selfmod) will fail */ ASSERT(!bb->for_cache || bb->vmlist != NULL); /* For selfmod recreation we don't check vmareas so we don't have vmlist. * We live w/o the checks there. */ CLIENT_ASSERT( !bb->for_cache || vm_list_overlaps(dcontext, bb->vmlist, instr_get_translation(inst), instr_get_translation(inst) + 1) || (instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && instr_get_translation(inst) == opnd_get_pc(instr_get_target(inst))) /* the displaced code and jmp return from intercept buffer * has translation fields set to hooked app routine */ IF_WINDOWS(|| dr_fragment_app_pc(bb->start_pc) != bb->start_pc), "block's app sources (instr_set_translation() targets) " "must remain within original bounds"); # ifdef AARCH64 if (instr_get_opcode(inst) == OP_isb) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "OP_isb must be last instruction in block"); } # endif /* PR 307284: we didn't process syscalls and ints pre-client * so do so now to get bb->flags and bb->exit_type set */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == IF_X86_ELSE(OP_int, OP_svc)) { instr_t *tmp = bb->instr; bb->instr = inst; if (instr_is_syscall(bb->instr)) bb_process_syscall(dcontext, bb); else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ bb_process_interrupt(dcontext, bb); } if (inst != instrlist_last(bb->ilist)) bb->instr = tmp; } /* ensure syscall/int2b terminates block */ client_check_syscall(bb->ilist, inst, &found_syscall, &found_int); if (instr_will_be_exit_cti(inst)) { if (!found_exit_cti) { /* We're about to clobber the exit_type and could lose any * special flags set above, even if the client doesn't change * the exit target. We undo such flags after this ilist walk * to support client removal of syscalls/ints. * EXIT_IS_IND_JMP_PLT() is used for -IAT_{convert,elide}, which * is off by default for CI; it's also used for native_exec, * but we're not sure if we want to support that with CI. * xref case 10846 and i#198 */ CLIENT_ASSERT( !TEST(~(LINK_DIRECT | LINK_INDIRECT | LINK_CALL | LINK_RETURN | LINK_JMP | LINK_NI_SYSCALL_ALL | LINK_SPECIAL_EXIT IF_WINDOWS(| LINK_CALLBACK_RETURN)), bb->exit_type) && !EXIT_IS_IND_JMP_PLT(bb->exit_type), "client unsupported block exit type internal error"); found_exit_cti = true; bb->instr = inst; if ((instr_is_near_ubr(inst) || instr_is_near_call_direct(inst)) /* conditional OP_bl needs the cbr code below */ IF_ARM(&&!instr_is_cbr(inst))) { CLIENT_ASSERT(instr_is_near_ubr(inst) || inst == instrlist_last(bb->ilist) || /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0, "an exit call must terminate the block"); /* a ubr need not be the final instr */ if (inst == last_app_instr) { bb->exit_target = instr_get_branch_target_pc(inst); bb->exit_type = instr_branch_type(inst); } } else if (instr_is_mbr(inst) || instr_is_far_cti(inst) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(inst) == OP_blx)) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit mbr or far cti must terminate the block"); bb->exit_type = instr_branch_type(inst); # ifdef ARM if (instr_get_opcode(inst) == OP_blx) bb->ibl_branch_type = IBL_INDCALL; else # endif bb->ibl_branch_type = get_ibl_branch_type(inst); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); } else { ASSERT(instr_is_cbr(inst)); CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit cbr must terminate the block"); /* A null exit target specifies a cbr (see below). */ bb->exit_target = NULL; bb->exit_type = 0; instr_exit_branch_set_type(bb->instr, instr_branch_type(inst)); } /* since we're walking backward, at the first exit cti * we can check for post-cti code */ if (inst != instrlist_last(bb->ilist)) { if (TEST(FRAG_COARSE_GRAIN, bb->flags)) { /* PR 213005: coarse can't handle code beyond ctis */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } /* decode_fragment can't handle code beyond ctis */ if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; } } /* Case 10784: Clients can confound trace building when they * introduce more than one exit cti; we'll just disable traces * for these fragments. * PR 215179: we're currently later marking them no-trace for pad_jmps * reasons as well. */ else { CLIENT_ASSERT(instr_is_near_ubr(inst) || (instr_is_near_call_direct(inst) && /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0), "a second exit cti must be a ubr"); if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; /* our cti check above should have already turned off coarse */ ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags)); } } } /* To handle the client modifying syscall numbers we cannot inline * syscalls in the middle of a bb. */ ASSERT(!DYNAMO_OPTION(inline_ignored_syscalls)); ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && found_syscall) || (!TEST(FRAG_HAS_SYSCALL, bb->flags) && !found_syscall)); IF_WINDOWS(ASSERT(!TEST(LINK_CALLBACK_RETURN, bb->exit_type) || found_int)); /* Note that we do NOT remove, or set, FRAG_HAS_DIRECT_CTI based on * client modifications: setting it for a selfmod fragment could * result in an infinite loop, and it is mainly used for elision, which we * are not doing for client ctis. Clients are not supposed add new * app source regions (PR 215217). */ /* Client might have truncated: re-set fall-through, accounting for annotations. */ if (last_app_instr != NULL) { bool adjusted_cur_pc = false; app_pc xl8 = instr_get_translation(last_app_instr); # ifdef ANNOTATIONS if (annotation_label != NULL) { if (found_instrumentation_pc) { /* i#1613: if the last app instruction precedes an annotation, extend the * translation footprint of `bb` to include the annotation (such that * the next bb starts after the annotation, avoiding duplication). */ bb->cur_pc = trailing_annotation_pc; adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends immediately prior to an annotation. " "Setting `bb->cur_pc` (for fall-through) to " PFX " so that the " "annotation will be included.\n", bb->cur_pc); } else { /* i#1613: the client removed the app instruction prior to an annotation. * We infer that the client wants to skip the annotation. Remove it now. */ instr_t *annotation_next = instr_get_next(annotation_label); instrlist_remove(bb->ilist, annotation_label); instr_destroy(dcontext, annotation_label); if (is_annotation_return_placeholder(annotation_next)) { instrlist_remove(bb->ilist, annotation_next); instr_destroy(dcontext, annotation_next); } } } # endif # if defined(WINDOWS) && !defined(STANDALONE_DECODER) /* i#1632: if the last app instruction was taken from an intercept because it was * occluded by the corresponding hook, `bb->cur_pc` should point to the original * app pc (where that instruction was copied from). Cannot use `decode_next_pc()` * on the original app pc because it is now in the middle of the hook. */ if (!adjusted_cur_pc && could_be_hook_occluded_pc(xl8)) { app_pc intercept_pc = get_intercept_pc_from_app_pc( xl8, true /* occlusions only */, false /* exclude start */); if (intercept_pc != NULL) { app_pc next_intercept_pc = decode_next_pc(dcontext, intercept_pc); bb->cur_pc = xl8 + (next_intercept_pc - intercept_pc); adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends in the middle of an intercept. " "Offsetting `bb->cur_pc` (for fall-through) to " PFX " in parallel " "to intercept instr at " PFX "\n", intercept_pc, bb->cur_pc); } } # endif /* We do not take instr_length of what the client put in, but rather * the length of the translation target */ if (!adjusted_cur_pc) { bb->cur_pc = decode_next_pc(dcontext, xl8); LOG(THREAD, LOG_INTERP, 3, "setting cur_pc (for fall-through) to " PFX "\n", bb->cur_pc); } /* don't set bb->instr if last instr is still syscall/int. * FIXME: I'm not 100% convinced the logic here covers everything * build_bb_ilist does. * FIXME: what about if last instr was invalid, or if client adds * some invalid instrs: xref bb_process_invalid_instr() */ if (bb->instr != NULL || (!found_int && !found_syscall)) bb->instr = last_app_instr; } else bb->instr = NULL; /* no app instrs left */ /* PR 215217: re-scan for accurate eflags. * FIXME: should we not do eflags tracking while decoding, then, and always * do it afterward? */ /* for -fast_client_decode, we don't support the client changing the app code */ if (!INTERNAL_OPTION(fast_client_decode)) { bb->eflags = forward_eflags_analysis(dcontext, bb->ilist, instrlist_first(bb->ilist)); } if (TEST(DR_EMIT_STORE_TRANSLATIONS, emitflags)) { /* PR 214962: let client request storage instead of recreation */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; /* if we didn't have record on from start, can't store translation info */ CLIENT_ASSERT(!INTERNAL_OPTION(fast_client_decode), "-fast_client_decode not compatible with " "DR_EMIT_STORE_TRANSLATIONS"); ASSERT(bb->record_translation && bb->full_decode); } if (DYNAMO_OPTION(coarse_enable_freeze)) { /* If we're not persisting, ignore the presence or absence of the flag * so we avoid undoing savings from -opt_memory with a tool that * doesn't support persistence. */ if (!TEST(DR_EMIT_PERSISTABLE, emitflags)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (TEST(DR_EMIT_MUST_END_TRACE, emitflags)) { /* i#848: let client terminate traces */ bb->flags |= FRAG_MUST_END_TRACE; } return true; } #endif /* CLIENT_INTERFACE */ #ifdef DR_APP_EXPORTS static void mangle_pre_client(dcontext_t *dcontext, build_bb_t *bb) { if (bb->start_pc == (app_pc)dr_app_running_under_dynamorio) { /* i#1237: set return value to be true in dr_app_running_under_dynamorio */ instr_t *ret = instrlist_last(bb->ilist); instr_t *mov = instr_get_prev(ret); LOG(THREAD, LOG_INTERP, 3, "Found dr_app_running_under_dynamorio\n"); ASSERT(ret != NULL && instr_is_return(ret) && mov != NULL && IF_X86(instr_get_opcode(mov) == OP_mov_imm &&) IF_ARM(instr_get_opcode(mov) == OP_mov && OPND_IS_IMMED_INT(instr_get_src(mov, 0)) &&) IF_AARCH64(instr_get_opcode(mov) == OP_movz &&)( bb->start_pc == instr_get_raw_bits(mov) || /* the translation field might be NULL */ bb->start_pc == instr_get_translation(mov))); /* i#1998: ensure the instr is Level 3+ */ instr_decode(dcontext, mov); instr_set_src(mov, 0, OPND_CREATE_INT32(1)); } } #endif /* DR_APP_EXPORTS */ /* This routine is called from build_bb_ilist when the number of instructions reaches or * exceeds max_bb_instr. It checks if bb is safe to stop after instruction stop_after. * On ARM, we do not stop bb building in the middle of an IT block unless there is a * conditional syscall. */ static bool bb_safe_to_stop(dcontext_t *dcontext, instrlist_t *ilist, instr_t *stop_after) { #ifdef ARM ASSERT(ilist != NULL && instrlist_last(ilist) != NULL); /* only thumb mode could have IT blocks */ if (dr_get_isa_mode(dcontext) != DR_ISA_ARM_THUMB) return true; if (stop_after == NULL) stop_after = instrlist_last_app(ilist); if (instr_get_opcode(stop_after) == OP_it) return false; if (!instr_is_predicated(stop_after)) return true; if (instr_is_cti(stop_after) /* must be the last instr if in IT block */ || /* we do not stop in the middle of an IT block unless it is a syscall */ instr_is_syscall(stop_after) || instr_is_interrupt(stop_after)) return true; return instr_is_last_in_it_block(stop_after, NULL, NULL); #endif /* ARM */ return true; } /* Interprets the application's instructions until the end of a basic * block is found, and prepares the resulting instrlist for creation of * a fragment, but does not create the fragment, just returns the instrlist. * Caller is responsible for freeing the list and its instrs! * * Input parameters in bb control aspects of creation: * If app_interp is true, this is considered real app code. * If pass_to_client is true, * calls instrument routine on bb->ilist before mangling * If mangle_ilist is true, mangles the ilist, else leaves it in app form * If record_vmlist is true, updates the vmareas data structures * If for_cache is true, bb building lock is assumed to be held. * record_vmlist should also be true. * Caller must set and later clear dcontext->bb_build_info. * For !for_cache, build_bb_ilist() sets and clears it, making the * assumption that the caller is doing no other reading from the region. * If record_translation is true, records translation for inserted instrs * If outf != NULL, does full disassembly with comments to outf * If overlap_info != NULL, records overlap information for the block in * the overlap_info (caller must fill in region_start and region_end). * * FIXME: now that we have better control over following direct ctis, * should we have adaptive mechanism to decided whether to follow direct * ctis, since some bmarks are better doing so (gap, vortex, wupwise) * and others are worse (apsi, perlbmk)? */ static void build_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { /* Design decision: we will not try to identify branches that target * instructions in this basic block, when we take those branches we will * just make a new basic block and duplicate part of this one */ int total_branches = 0; uint total_instrs = 0; /* maximum number of instructions for current basic block */ uint cur_max_bb_instrs = DYNAMO_OPTION(max_bb_instrs); uint total_writes = 0; /* only used for selfmod */ instr_t *non_cti; /* used if !full_decode */ byte *non_cti_start_pc; /* used if !full_decode */ uint eflags_6 = 0; /* holds arith eflags written so far (in read slots) */ #ifdef HOT_PATCHING_INTERFACE bool hotp_should_inject = false, hotp_injected = false; #endif app_pc page_start_pc = (app_pc)NULL; bool bb_build_nested = false; /* Caller will free objects allocated here so we must use the passed-in * dcontext for allocation; we need separate var for non-global dcontext. */ dcontext_t *my_dcontext = get_thread_private_dcontext(); DEBUG_DECLARE(bool regenerated = false;) bool stop_bb_on_fallthrough = false; ASSERT(bb->initialized); /* note that it's ok for bb->start_pc to be NULL as our check_new_page_start * will catch it */ /* vmlist must start out empty (or N/A) */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); ASSERT(!bb->for_cache || bb->record_vmlist); /* for_cache assumes record_vmlist */ #ifdef CUSTOM_TRACES_RET_REMOVAL my_dcontext->num_calls = 0; my_dcontext->num_rets = 0; #endif /* Support bb abort on decode fault */ if (my_dcontext != NULL) { if (bb->for_cache) { /* Caller should have set! */ ASSERT(bb == (build_bb_t *)my_dcontext->bb_build_info); } else if (my_dcontext->bb_build_info == NULL) { my_dcontext->bb_build_info = (void *)bb; } else { /* For nested we leave the original, which should be the only vmlist, * and we give up on freeing dangling instr_t and instrlist_t from this * decode. * We need the original's for_cache so we know to free the bb_building_lock. * FIXME: use TRY to handle decode exceptions locally? Shouldn't have * violation remediations on a !for_cache build. */ ASSERT(bb->vmlist == NULL && !bb->for_cache && ((build_bb_t *)my_dcontext->bb_build_info)->for_cache); /* FIXME: add nested as a field so we can have stat on nested faults */ bb_build_nested = true; } } else ASSERT(dynamo_exited); if ((bb->record_translation IF_CLIENT_INTERFACE( &&!INTERNAL_OPTION(fast_client_decode))) || !bb->for_cache /* to split riprel, need to decode every instr */ /* in x86_to_x64, need to translate every x86 instr */ IF_X64(|| DYNAMO_OPTION(coarse_split_riprel) || DYNAMO_OPTION(x86_to_x64)) IF_CLIENT_INTERFACE(|| INTERNAL_OPTION(full_decode))) bb->full_decode = true; else { #if defined(STEAL_REGISTER) || defined(CHECK_RETURNS_SSE2) bb->full_decode = true; #endif } LOG(THREAD, LOG_INTERP, 3, "\ninterp%s: ", IF_X86_64_ELSE(X64_MODE_DC(dcontext) ? "" : " (x86 mode)", "")); BBPRINT(bb, 3, "start_pc = " PFX "\n", bb->start_pc); DOSTATS({ if (bb->app_interp) { if (fragment_lookup_deleted(dcontext, bb->start_pc)) { /* this will look up private 1st, so yes we will get * dup stats if multiple threads have regnerated the * same private tag, or if a shared tag is deleted and * multiple privates created */ regenerated = true; STATS_INC(num_fragments_deja_vu); } } }); /* start converting instructions into IR */ if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); #if defined(WINDOWS) && !defined(STANDALONE_DECODER) && defined(CLIENT_INTERFACE) /* i#1632: if `bb->start_pc` points into the middle of a DR intercept hook, change * it so instructions are taken from the intercept instead (note that * `instr_set_translation` will hide this adjustment from the client). N.B.: this * must follow `check_new_page_start()` (above) or `bb.vmlist` will be wrong. */ if (could_be_hook_occluded_pc(bb->start_pc)) { app_pc intercept_pc = get_intercept_pc_from_app_pc( bb->start_pc, true /* occlusions only */, true /* exclude start pc */); if (intercept_pc != NULL) { LOG(THREAD, LOG_INTERP, 3, "Changing start_pc from hook-occluded app pc " PFX " to intercept pc " PFX "\n", bb->start_pc, intercept_pc); bb->start_pc = intercept_pc; } } #endif bb->cur_pc = bb->start_pc; /* for translation in case we break out of loop before decoding any * instructions, (i.e. check_for_stopping_point()) */ bb->instr_start = bb->cur_pc; /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory -- though we now properly clean up and won't leak * on unreadable on any check_thread_vm_area call */ bb->ilist = instrlist_create(dcontext); bb->instr = NULL; /* avoid discrepancy in finding invalid instructions between fast decode * and the full decode of sandboxing by doing full decode up front */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { bb->full_decode = true; bb->follow_direct = false; } if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) { bb->full_decode = true; bb->record_translation = true; } if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->start_pc) { /* Decodes only one instruction because of single step exception. */ cur_max_bb_instrs = 1; } KSTART(bb_decoding); while (true) { if (check_for_stopping_point(dcontext, bb)) { BBPRINT(bb, 3, "interp: found DynamoRIO stopping point at " PFX "\n", bb->cur_pc); break; } /* fill in a new instr structure and update bb->cur_pc */ bb->instr = instr_create(dcontext); /* if !full_decode: * All we need to decode are control-transfer instructions * For efficiency, put all non-cti into a single instr_t structure */ non_cti_start_pc = bb->cur_pc; do { /* If the thread's vmareas aren't being added to, indicate the * page that's being decoded. */ if (!bb->record_vmlist && page_start_pc != (app_pc)PAGE_START(bb->cur_pc)) { page_start_pc = (app_pc)PAGE_START(bb->cur_pc); set_thread_decode_page_start(my_dcontext == NULL ? dcontext : my_dcontext, page_start_pc); } bb->instr_start = bb->cur_pc; if (bb->full_decode) { /* only going through this do loop once! */ bb->cur_pc = IF_AARCH64_ELSE(decode_with_ldstex, decode)(dcontext, bb->cur_pc, bb->instr); if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); } else { /* must reset, may go through loop multiple times */ instr_reset(dcontext, bb->instr); bb->cur_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, bb->cur_pc, bb->instr); #if defined(ANNOTATIONS) && !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_encoded_valgrind_annotation_tail(bb->instr_start)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc)PAGE_START(bb->cur_pc))) { /* Valgrind annotation needs full decode; clean up and repeat. */ KSTOP(bb_decoding); instr_destroy(dcontext, bb->instr); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->full_decode = true; build_bb_ilist(dcontext, bb); return; } } #endif } ASSERT(!bb->check_vm_area || bb->checked_end != NULL); if (bb->check_vm_area && bb->cur_pc != NULL && bb->cur_pc - 1 >= bb->checked_end) { /* We're beyond the vmarea allowed -- so check again. * Ideally we'd want to check BEFORE we decode from the * subsequent page, as it could be inaccessible, but not worth * the time estimating the size from a variable number of bytes * before the page boundary. Instead we rely on other * mechanisms to handle faults while decoding, which we need * anyway to handle racy unmaps by the app. */ uint old_flags = bb->flags; DEBUG_DECLARE(bool is_first_instr = (bb->instr_start == bb->start_pc)); if (!check_new_page_contig(dcontext, bb, bb->cur_pc - 1)) { /* i#989: Stop bb building before falling through to an * incompatible vmarea. */ ASSERT(!is_first_instr); bb->cur_pc = NULL; stop_bb_on_fallthrough = true; break; } if (!TEST(FRAG_SELFMOD_SANDBOXED, old_flags) && TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { /* Restart the decode loop with full_decode and * !follow_direct, which are needed for sandboxing. This * can't happen more than once because sandboxing is now on. */ ASSERT(is_first_instr); bb->full_decode = true; bb->follow_direct = false; bb->cur_pc = bb->instr_start; instr_reset(dcontext, bb->instr); continue; } } total_instrs++; DOELOG(3, LOG_INTERP, { disassemble_with_bytes(dcontext, bb->instr_start, THREAD); }); #if defined(INTERNAL) || defined(CLIENT_INTERFACE) if (bb->outf != INVALID_FILE) disassemble_with_bytes(dcontext, bb->instr_start, bb->outf); #endif /* INTERNAL || CLIENT_INTERFACE */ if (!instr_valid(bb->instr)) break; /* before eflags analysis! */ #ifdef X86 /* If the next instruction at bb->cur_pc fires a debug register, * then we should stop this basic block before getting to it. */ if (my_dcontext != NULL && debug_register_fire_on_addr(bb->instr_start)) { stop_bb_on_fallthrough = true; break; } if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { if (instr_get_prefix_flag(bb->instr, PREFIX_EVEX)) { /* For AVX-512 detection in bb builder, we're checking only for * the prefix flag, which for example can be set by decode_cti. In * client_process_bb, post-client instructions are checked with * instr_may_write_zmm_register. */ d_r_set_avx512_code_in_use(true); } } } #endif /* Eflags analysis: * We do this even if -unsafe_ignore_eflags_prefix b/c it doesn't cost that * much and we can use the analysis to detect any bb that reads a flag * prior to writing it. */ if (bb->eflags != EFLAGS_WRITE_ARITH IF_X86(&&bb->eflags != EFLAGS_READ_OF)) bb->eflags = eflags_analysis(bb->instr, bb->eflags, &eflags_6); /* stop decoding at an invalid instr (tested above) or a cti *(== opcode valid) or a possible SEH frame push (if * -process_SEH_push). */ #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { STATS_INC(num_bb_build_fs); break; } #endif #ifdef X64 if (instr_has_rel_addr_reference(bb->instr)) { /* PR 215397: we need to split these out for re-relativization */ break; } #endif #if defined(UNIX) && defined(X86) if (INTERNAL_OPTION(mangle_app_seg) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS | PREFIX_SEG_GS)) { /* These segment prefix flags are not persistent and are * only used as hints just after decoding. * They are not accurate later and can be misleading. * This can only be used right after decoding for quick check, * and a walk of operands should be performed to look for * actual far mem refs. */ /* i#107, mangle reference with segment register */ /* we up-decode the instr when !full_decode to make sure it will * pass the instr_opcode_valid check in mangle and be mangled. */ instr_get_opcode(bb->instr); break; } #endif /* i#107, opcode mov_seg will be set in decode_cti, * so instr_opcode_valid(bb->instr) is true, and terminates the loop. */ } while (!instr_opcode_valid(bb->instr) && total_instrs <= cur_max_bb_instrs); if (bb->cur_pc == NULL) { /* invalid instr or vmarea change: reset bb->cur_pc, will end bb * after updating stats */ bb->cur_pc = bb->instr_start; } /* We need the translation when mangling calls and jecxz/loop*. * May as well set it for all cti's since there's * really no extra overhead in doing so. Note that we go * through the above loop only once for cti's, so it's safe * to set the translation here. */ if (instr_opcode_valid(bb->instr) && (instr_is_cti(bb->instr) || bb->record_translation)) instr_set_translation(bb->instr, bb->instr_start); #ifdef HOT_PATCHING_INTERFACE /* If this lookup succeeds then the current bb needs to be patched. * In hotp_inject(), address lookup will be done for each instruction * pc in this bb and patching will be done if an exact match is found. * * Hot patching should be done only for app interp and recreating * pc, not for reproducing app code. Hence we use mangle_ilist. * See case 5981. * * FIXME: this lookup can further be reduced by determining whether or * not the current bb's module needs patching via check_new_page* */ if (DYNAMO_OPTION(hot_patching) && bb->mangle_ilist && !hotp_should_inject) { /* case 8780: we may hold the lock; FIXME: figure out if this can * be avoided - messy to hold hotp_vul_table lock like this for * unnecessary operations. */ bool owns_hotp_lock = self_owns_write_lock(hotp_get_lock()); if (hotp_does_region_need_patch(non_cti_start_pc, bb->cur_pc, owns_hotp_lock)) { BBPRINT(bb, 2, "hotpatch match in " PFX ": " PFX "-" PFX "\n", bb->start_pc, non_cti_start_pc, bb->cur_pc); hotp_should_inject = true; /* Don't elide if we are going to hot patch this bb because * the patch point can be a direct cti; eliding would result * in the patch not being applied. See case 5901. * FIXME: we could make this more efficient by only turning * off follow_direct if the instr is direct cti. */ bb->follow_direct = false; DOSTATS({ if TEST(FRAG_HAS_DIRECT_CTI, bb->flags) STATS_INC(hotp_num_frag_direct_cti); }); } } #endif if (bb->full_decode) { if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && instr_valid(bb->instr) && instr_writes_memory(bb->instr)) { /* to allow tailing non-writes, end prior to the write beyond the max */ total_writes++; if (total_writes > DYNAMO_OPTION(selfmod_max_writes)) { BBPRINT(bb, 3, "reached selfmod write limit %d, stopping\n", DYNAMO_OPTION(selfmod_max_writes)); STATS_INC(num_max_selfmod_writes_enforced); bb_stop_prior_to_instr(dcontext, bb, false /*not added to bb->ilist*/); break; } } } else if (bb->instr_start != non_cti_start_pc) { /* instr now holds the cti, so create an instr_t for the non-cti */ non_cti = instr_create(dcontext); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(bb->instr_start - non_cti_start_pc))); instr_set_raw_bits(non_cti, non_cti_start_pc, (uint)(bb->instr_start - non_cti_start_pc)); if (bb->record_translation) instr_set_translation(non_cti, non_cti_start_pc); /* add non-cti instructions to instruction list */ instrlist_append(bb->ilist, non_cti); } DOSTATS({ /* This routine is also called for recreating state, we only want * to count app code when we build new bbs, which is indicated by * the bb->app_interp parameter */ if (bb->app_interp && !regenerated) { /* avoid double-counting for adaptive working set */ /* FIXME - ubr ellision leads to double couting. We also * double count when we have multiple entry points into the * same block of cti free instructinos. */ STATS_ADD(app_code_seen, (bb->cur_pc - non_cti_start_pc)); LOG(THREAD, LOG_INTERP, 5, "adding %d bytes to total app code seen\n", bb->cur_pc - non_cti_start_pc); } }); if (!instr_valid(bb->instr)) { bb_process_invalid_instr(dcontext, bb); break; } if (stop_bb_on_fallthrough) { bb_stop_prior_to_instr(dcontext, bb, false /*not appended*/); break; } #ifdef ANNOTATIONS # if !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_decoded_valgrind_annotation_tail(bb->instr)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc)PAGE_START(bb->cur_pc))) { instrument_valgrind_annotation(dcontext, bb->ilist, bb->instr, bb->instr_start, bb->cur_pc, total_instrs); continue; } } else /* Top-level annotation recognition is unambiguous (xchg vs. jmp). */ # endif if (is_annotation_jump_over_dead_code(bb->instr)) { instr_t *substitution = NULL; if (instrument_annotation( dcontext, &bb->cur_pc, &substitution _IF_WINDOWS_X64(bb->cur_pc < bb->checked_end))) { instr_destroy(dcontext, bb->instr); if (substitution == NULL) continue; /* ignore annotation if no handlers are registered */ else bb->instr = substitution; } } #endif #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { DEBUG_DECLARE(ssize_t dbl_count = bb->cur_pc - bb->instr_start); if (!bb_process_fs_ref(dcontext, bb)) { DOSTATS({ if (bb->app_interp) { LOG(THREAD, LOG_INTERP, 3, "stopping bb at fs-using instr @ " PFX "\n", bb->instr_start); STATS_INC(num_process_SEH_bb_early_terminate); /* don't double count the fs instruction itself * since we removed it from this bb */ if (!regenerated) STATS_ADD(app_code_seen, -dbl_count); } }); break; } } #else # ifdef X86 if (instr_get_prefix_flag(bb->instr, (SEG_TLS == SEG_GS) ? PREFIX_SEG_GS : PREFIX_SEG_FS) /* __errno_location is interpreted when global, though it's hidden in TOT */ IF_UNIX(&&!is_in_dynamo_dll(bb->instr_start)) && /* i#107 allows DR/APP using the same segment register. */ !INTERNAL_OPTION(mangle_app_seg)) { /* On linux we use a segment register and do not yet * support the application using the same register! */ CLIENT_ASSERT(false, "no support yet for application using non-NPTL segment"); ASSERT_BUG_NUM(205276, false); } # endif /* X86 */ #endif /* WINDOWS */ if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { bb_process_single_step(dcontext, bb); /* Stops basic block right now. */ break; } /* far direct is treated as indirect (i#823) */ if (instr_is_near_ubr(bb->instr)) { if (bb_process_ubr(dcontext, bb)) continue; else { if (bb->instr != NULL) /* else, bb_process_ubr() set exit_type */ bb->exit_type |= instr_branch_type(bb->instr); break; } } else instrlist_append(bb->ilist, bb->instr); #ifdef RETURN_AFTER_CALL if (bb->app_interp && dynamo_options.ret_after_call) { if (instr_is_call(bb->instr)) { /* add after call instruction to valid return targets */ add_return_target(dcontext, bb->instr_start, bb->instr); } } #endif /* RETURN_AFTER_CALL */ #ifdef X64 /* must be prior to mbr check since mbr location could be rip-rel */ if (DYNAMO_OPTION(coarse_split_riprel) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags) && instr_has_rel_addr_reference(bb->instr)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have ref be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); break; /* stop bb */ } else { /* single-instr fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_riprel); } } #endif if (instr_is_near_call_direct(bb->instr)) { if (!bb_process_call_direct(dcontext, bb)) { if (bb->instr != NULL) bb->exit_type |= instr_branch_type(bb->instr); break; } } else if (instr_is_mbr(bb->instr) /* including indirect calls */ IF_X86( /* far direct is treated as indirect (i#823) */ || instr_get_opcode(bb->instr) == OP_jmp_far || instr_get_opcode(bb->instr) == OP_call_far) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(bb->instr) == OP_blx)) { /* Manage the case where we don't need to perform 'normal' * indirect branch processing. */ bool normal_indirect_processing = true; bool elide_and_continue_if_converted = true; if (instr_is_return(bb->instr)) { bb->ibl_branch_type = IBL_RETURN; STATS_INC(num_returns); } else if (instr_is_call_indirect(bb->instr)) { STATS_INC(num_all_calls); STATS_INC(num_indirect_calls); if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); break; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* If the indirect call can be converted into a direct one, * bypass normal indirect call processing. * First, check for a call* that we treat as a syscall. */ if (bb_process_indcall_syscall(dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else if (DYNAMO_OPTION(indcall2direct) && bb_process_convertible_indcall(dcontext, bb)) { normal_indirect_processing = false; elide_and_continue_if_converted = true; } else if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indcall( dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else bb->ibl_branch_type = IBL_INDCALL; #ifdef X86 } else if (instr_get_opcode(bb->instr) == OP_jmp_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDJMP; } else if (instr_get_opcode(bb->instr) == OP_call_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDCALL; #elif defined(ARM) } else if (instr_get_opcode(bb->instr) == OP_blx) { /* mode-changing direct call is treated as indirect */ bb->ibl_branch_type = IBL_INDCALL; #endif /* X86 */ } else { /* indirect jump */ /* was prev instr a direct call? if so, this is a PLT-style ind call */ instr_t *prev = instr_get_prev(bb->instr); if (prev != NULL && instr_opcode_valid(prev) && instr_is_call_direct(prev)) { bb->exit_type |= INSTR_IND_JMP_PLT_EXIT; /* just because we have a CALL to JMP* makes it only a _likely_ PLT call, we still have to make sure it goes through IAT - see case 4269 */ STATS_INC(num_indirect_jumps_likely_PLT); } elide_and_continue_if_converted = true; if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indjmp(dcontext, bb, &elide_and_continue_if_converted)) { /* Clear the IND_JMP_PLT_EXIT flag since we've converted * the PLT to a direct transition (and possibly elided). * Xref case 7867 for why leaving this flag in the eliding * case can cause later failures. */ bb->exit_type &= ~INSTR_CALL_EXIT; /* leave just JMP */ normal_indirect_processing = false; } else /* FIXME: this can always be set */ bb->ibl_branch_type = IBL_INDJMP; STATS_INC(num_indirect_jumps); } #ifdef CUSTOM_TRACES_RET_REMOVAL if (instr_is_return(bb->instr)) my_dcontext->num_rets++; else if (instr_is_call_indirect(bb->instr)) my_dcontext->num_calls++; #endif /* set exit type since this instruction will get mangled */ if (normal_indirect_processing) { bb->exit_type |= instr_branch_type(bb->instr); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "mbr exit target = " PFX "\n", bb->exit_target); break; } else { /* decide whether to stop bb here */ if (!elide_and_continue_if_converted) break; /* fall through for -max_bb_instrs check */ } } else if (instr_is_cti(bb->instr) && (!instr_is_call(bb->instr) || instr_is_cbr(bb->instr))) { total_branches++; if (total_branches >= BRANCH_LIMIT) { /* set type of 1st exit cti for cbr (bb->exit_type is for fall-through) */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); break; } } else if (instr_is_syscall(bb->instr)) { if (!bb_process_syscall(dcontext, bb)) break; } /* end syscall */ else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ if (!bb_process_interrupt(dcontext, bb)) break; } #ifdef AARCH64 /* OP_isb, when mangled, has a potential side exit. */ else if (instr_get_opcode(bb->instr) == OP_isb) break; #endif #if 0 /*i#1313, i#1314*/ else if (instr_get_opcode(bb->instr) == OP_getsec) { /* XXX i#1313: if we support CPL0 in the future we'll need to * dynamically handle the leaf functions here, which can change eip * and other state. We'll need OP_getsec in decode_cti(). */ } else if (instr_get_opcode(bb->instr) == OP_xend || instr_get_opcode(bb->instr) == OP_xabort) { /* XXX i#1314: support OP_xend failing and setting eip to the * fallback pc recorded by OP_xbegin. We'll need both in decode_cti(). */ } #endif #ifdef CHECK_RETURNS_SSE2 /* There are SSE and SSE2 instrs that operate on MMX instead of XMM, but * we perform a simple coarse-grain check here. */ else if (instr_is_sse_or_sse2(bb->instr)) { FATAL_USAGE_ERROR(CHECK_RETURNS_SSE2_XMM_USED, 2, get_application_name(), get_application_pid()); } #endif #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) else if (instr_get_opcode(bb->instr) == OP_mov_seg) { if (!bb_process_mov_seg(dcontext, bb)) break; } #endif else if (instr_saves_float_pc(bb->instr)) { bb_process_float_pc(dcontext, bb); break; } if (bb->cur_pc == bb->stop_pc) { /* We only check stop_pc for full_decode, so not in inner loop. */ BBPRINT(bb, 3, "reached end pc " PFX ", stopping\n", bb->stop_pc); break; } if (total_instrs > DYNAMO_OPTION(max_bb_instrs)) { /* this could be an enormous basic block, or it could * be some degenerate infinite-loop case like a call * to a function that calls exit() and then calls itself, * so just end it here, we'll pick up where we left off * if it's legit */ BBPRINT(bb, 3, "reached -max_bb_instrs(%d): %d, ", DYNAMO_OPTION(max_bb_instrs), total_instrs); if (bb_safe_to_stop(dcontext, bb->ilist, NULL)) { BBPRINT(bb, 3, "stopping\n"); STATS_INC(num_max_bb_instrs_enforced); break; } else { /* XXX i#1669: cannot stop bb now, what's the best way to handle? * We can either roll-back and find previous safe stop point, or * simply extend the bb with a few more instructions. * We can always lower the -max_bb_instrs to offset the additional * instructions. In contrast, roll-back seems complex and * potentially problematic. */ BBPRINT(bb, 3, "cannot stop, continuing\n"); } } } /* end of while (true) */ KSTOP(bb_decoding); #ifdef DEBUG_MEMORY /* make sure anyone who destroyed also set to NULL */ ASSERT(bb->instr == NULL || (bb->instr->bytes != (byte *)HEAP_UNALLOCATED_PTR_UINT && bb->instr->bytes != (byte *)HEAP_ALLOCATED_PTR_UINT && bb->instr->bytes != (byte *)HEAP_PAD_PTR_UINT)); #endif if (!check_new_page_contig(dcontext, bb, bb->cur_pc - 1)) { ASSERT(false && "Should have checked cur_pc-1 in decode loop"); } bb->end_pc = bb->cur_pc; BBPRINT(bb, 3, "end_pc = " PFX "\n\n", bb->end_pc); /* We could put this in check_new_page_jmp where it already checks * for native_exec overlap, but selfmod ubrs don't even call that routine */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_callcall) && !vmvector_empty(native_exec_areas) && bb->app_interp && bb->instr != NULL && (instr_is_near_ubr(bb->instr) || instr_is_near_call_direct(bb->instr)) && instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { /* Case 4564/3558: handle .NET COM method table where a call* targets * a call to a native_exec dll -- we need to put the gateway at the * call* to avoid retaddr mangling of the method table call. * As a side effect we can also handle call*, jmp. * We don't actually verify or care that it was specifically a call*, * whatever at_native_exec_gateway() requires to assure itself that we're * at a return-address-clobberable point. */ app_pc tgt = opnd_get_pc(instr_get_target(bb->instr)); if (is_native_pc(tgt) && at_native_exec_gateway(dcontext, tgt, &bb->native_call _IF_DEBUG(true /*xfer tgt*/))) { /* replace this ilist w/ a native exec one */ LOG(THREAD, LOG_INTERP, 2, "direct xfer @gateway @" PFX " to native_exec module " PFX "\n", bb->start_pc, tgt); bb->native_exec = true; /* add this ubr/call to the native_exec_list, both as an optimization * for future entrances and b/c .NET changes its method table call * from targeting a native_exec image to instead target DGC directly, * thwarting our gateway! * FIXME: if heap region de-allocated, we'll remove, but what if re-used * w/o going through syscalls? Just written over w/ something else? * We'll keep it on native_exec_list... */ ASSERT(bb->end_pc == bb->start_pc + DIRECT_XFER_LENGTH); vmvector_add(native_exec_areas, bb->start_pc, bb->end_pc, NULL); DODEBUG({ report_native_module(dcontext, tgt); }); STATS_INC(num_native_module_entrances_callcall); return; } } #ifdef UNIX /* XXX: i#1247: After a call to a native module throught plt, DR * loses control of the app b/c of _dl_runtime_resolve */ int ret_imm; if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_opt) && bb->app_interp && bb->instr != NULL && instr_is_return(bb->instr) && at_dl_runtime_resolve_ret(dcontext, bb->start_pc, &ret_imm)) { dr_insert_clean_call(dcontext, bb->ilist, bb->instr, (void *)native_module_at_runtime_resolve_ret, false, 2, opnd_create_reg(REG_XSP), OPND_CREATE_INT32(ret_imm)); } #endif STATS_TRACK_MAX(max_instrs_in_a_bb, total_instrs); #ifdef UNIX if (bb->invalid_instr_hack) { /* turn off selfmod -- we assume bb will hit exception right away */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; /* decode_fragment() can't handle invalid instrs, so store translations */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; } #endif if (stop_bb_on_fallthrough && TEST(FRAG_HAS_DIRECT_CTI, bb->flags)) { /* If we followed a direct cti to an instruction straddling a vmarea * boundary, we can't actually do the elision. See the * sandbox_last_byte() test case in security-common/sandbox.c. Restart * bb building without follow_direct. Alternatively, we could check the * vmareas of the targeted instruction before performing elision. */ /* FIXME: a better assert is needed because this can trigger if * hot patching turns off follow_direct, the current bb was elided * earlier and is marked as selfmod. hotp_num_frag_direct_cti will * track this for now. */ ASSERT(bb->follow_direct); /* else, infinite loop possible */ BBPRINT(bb, 2, "*** must rebuild bb to avoid following direct cti to " "incompatible vmarea\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } /* Remove FRAG_HAS_DIRECT_CTI, since we're turning off follow_direct. * Try to keep the known flags. We stopped the bb before merging in any * incompatible flags. */ bb->flags &= ~FRAG_HAS_DIRECT_CTI; bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ build_bb_ilist(dcontext, bb); return; } if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { ASSERT(bb->full_decode); ASSERT(!bb->follow_direct); ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); } #ifdef HOT_PATCHING_INTERFACE /* CAUTION: This can't be moved below client interface as the basic block * can be changed by the client. This will mess up hot patching. * The same is true for mangling. */ if (hotp_should_inject) { ASSERT(DYNAMO_OPTION(hot_patching)); hotp_injected = hotp_inject(dcontext, bb->ilist); /* Fix for 5272. Hot patch injection uses dr clean call api which * accesses dcontext fields directly, so the injected bbs can't be * shared until that is changed or the clean call mechanism is replaced * with bb termination to execute hot patchces. * Case 9995 assumes that hotp fragments are fine-grained, which we * achieve today by being private; if we make shared we must explicitly * prevent from being coarse-grained. */ if (hotp_injected) { bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } } #endif /* Until we're more confident in our decoder/encoder consistency this is * at the default debug build -checklevel 2. */ IF_ARM(DOCHECK(2, check_encode_decode_consistency(dcontext, bb->ilist);)); #ifdef DR_APP_EXPORTS /* changes by DR that are visible to clients */ mangle_pre_client(dcontext, bb); #endif /* DR_APP_EXPORTS */ #ifdef DEBUG /* This is a special debugging feature */ if (bb->for_cache && INTERNAL_OPTION(go_native_at_bb_count) > 0 && debug_bb_count++ >= INTERNAL_OPTION(go_native_at_bb_count)) { SYSLOG_INTERNAL_INFO("thread " TIDFMT " is going native @%d bbs to " PFX, d_r_get_thread_id(), debug_bb_count - 1, bb->start_pc); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); /* i#1582: required for now on ARM */ IF_UNIX(os_swap_context_go_native(dcontext, DR_STATE_GO_NATIVE)); /* i#1921: for now we do not support re-attach, so remove handlers */ os_process_not_under_dynamorio(dcontext); bb_build_abort(dcontext, true /*free vmlist*/, false /*don't unlock*/); return; } #endif #ifdef CLIENT_INTERFACE if (!client_process_bb(dcontext, bb)) { bb_build_abort(dcontext, true /*free vmlist*/, false /*don't unlock*/); return; } /* i#620: provide API to set fall-through and retaddr targets at end of bb */ if (instrlist_get_return_target(bb->ilist) != NULL || instrlist_get_fall_through_target(bb->ilist) != NULL) { CLIENT_ASSERT(instr_is_cbr(instrlist_last(bb->ilist)) || instr_is_call(instrlist_last(bb->ilist)), "instr_set_return_target/instr_set_fall_through_target" " can only be used in a bb ending with call/cbr"); /* the bb cannot be added to a trace */ bb->flags |= FRAG_CANNOT_BE_TRACE; } if (bb->unmangled_ilist != NULL) *bb->unmangled_ilist = instrlist_clone(dcontext, bb->ilist); #endif if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_far_cti(bb->instr)) { /* Simplify far_ibl (i#823) vs trace_cmp ibl as well as * cross-mode direct stubs varying in a trace by disallowing * far cti in middle of trace */ bb->flags |= FRAG_MUST_END_TRACE; /* Simplify coarse by not requiring extra prefix stubs */ bb->flags &= ~FRAG_COARSE_GRAIN; } /* create a final instruction that will jump to the exit stub * corresponding to the fall-through of the conditional branch or * the target of the final indirect branch (the indirect branch itself * will get mangled into a non-cti) */ if (bb->exit_target == NULL) { /* not set by ind branch, etc. */ /* fall-through pc */ #ifdef CLIENT_INTERFACE /* i#620: provide API to set fall-through target at end of bb */ bb->exit_target = instrlist_get_fall_through_target(bb->ilist); #endif /* CLIENT_INTERFACE */ if (bb->exit_target == NULL) bb->exit_target = (cache_pc)bb->cur_pc; #ifdef CLIENT_INTERFACE else { LOG(THREAD, LOG_INTERP, 3, "set fall-throught target " PFX " by client\n", bb->exit_target); } #endif /* CLIENT_INTERFACE */ if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_cbr(bb->instr) && (int)(bb->exit_target - bb->start_pc) <= SHRT_MAX && (int)(bb->exit_target - bb->start_pc) >= SHRT_MIN && /* rule out jecxz, etc. */ !instr_is_cti_loop(bb->instr)) bb->flags |= FRAG_CBR_FALLTHROUGH_SHORT; } /* we share all basic blocks except selfmod (since want no-synch quick deletion) * or syscall-containing ones (to bound delay on threads exiting shared cache, * for cache management, both consistency and capacity) * bbs injected with hot patches are also not shared (see case 5272). */ if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && !TEST(FRAG_TEMP_PRIVATE, bb->flags) #ifdef HOT_PATCHING_INTERFACE && !hotp_injected #endif && (my_dcontext == NULL || my_dcontext->single_step_addr != bb->instr_start)) { /* If the fragment doesn't have a syscall or contains a * non-ignorable one -- meaning that the frag will exit the cache * to execute the syscall -- it can be shared. * We don't support ignorable syscalls in shared fragments, as they * don't set at_syscall and so are incompatible w/ -syscalls_synch_flush. */ if (!TEST(FRAG_HAS_SYSCALL, bb->flags) || TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type) || TEST(LINK_SPECIAL_EXIT, bb->exit_type)) bb->flags |= FRAG_SHARED; #ifdef WINDOWS /* A fragment can be shared if it contains a syscall that will be * executed via the version of shared syscall that can be targetted by * shared frags. */ else if (TEST(FRAG_HAS_SYSCALL, bb->flags) && DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)) bb->flags |= FRAG_SHARED; else { ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && (DYNAMO_OPTION(ignore_syscalls) || (!DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)))) && "BB not shared for unknown reason"); } #endif } else if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { /* Field exit_type might have been cleared by client_process_bb. */ bb->exit_type |= LINK_SPECIAL_EXIT; } if (TEST(FRAG_COARSE_GRAIN, bb->flags) && (!TEST(FRAG_SHARED, bb->flags) || /* Ignorable syscalls on linux are mangled w/ intra-fragment jmps, which * decode_fragment() cannot handle -- and on win32 this overlaps w/ * FRAG_MUST_END_TRACE and LINK_NI_SYSCALL */ TEST(FRAG_HAS_SYSCALL, bb->flags) || TEST(FRAG_MUST_END_TRACE, bb->flags) || TEST(FRAG_CANNOT_BE_TRACE, bb->flags) || TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) || /* PR 214142: coarse units does not support storing translations */ TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) || /* FRAG_HAS_DIRECT_CTI: we never elide (assert is below); * not-inlined call/jmp: we turn off FRAG_COARSE_GRAIN up above */ #ifdef WINDOWS TEST(LINK_CALLBACK_RETURN, bb->exit_type) || #endif TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type))) { /* Currently not supported in a coarse unit */ STATS_INC(num_fine_in_coarse); DOSTATS({ if (!TEST(FRAG_SHARED, bb->flags)) STATS_INC(coarse_prevent_private); else if (TEST(FRAG_HAS_SYSCALL, bb->flags)) STATS_INC(coarse_prevent_syscall); else if (TEST(FRAG_MUST_END_TRACE, bb->flags)) STATS_INC(coarse_prevent_end_trace); else if (TEST(FRAG_CANNOT_BE_TRACE, bb->flags)) STATS_INC(coarse_prevent_no_trace); else if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) STATS_INC(coarse_prevent_selfmod); else if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) STATS_INC(coarse_prevent_translation); else if (IF_WINDOWS_ELSE_0(TEST(LINK_CALLBACK_RETURN, bb->exit_type))) STATS_INC(coarse_prevent_cbret); else if (TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type)) STATS_INC(coarse_prevent_syscall); else ASSERT_NOT_REACHED(); }); bb->flags &= ~FRAG_COARSE_GRAIN; } ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags) || !TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); /* now that we know whether shared, ensure we have the right ibl routine */ if (!TEST(FRAG_SHARED, bb->flags) && TEST(LINK_INDIRECT, bb->exit_type)) { ASSERT(bb->exit_target == get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type)); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), IBL_BB_PRIVATE, bb->ibl_branch_type); } if (bb->mangle_ilist && (bb->instr == NULL || !instr_opcode_valid(bb->instr) || !instr_is_near_ubr(bb->instr) || instr_is_meta(bb->instr))) { instr_t *exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) { app_pc translation = NULL; if (bb->instr == NULL || !instr_opcode_valid(bb->instr)) { /* we removed (or mangle will remove) the last instruction * for special handling (invalid/syscall/int 2b) or there were * no instructions added (i.e. check_stopping_point in which * case instr_start == cur_pc), use last instruction's start * address for the translation */ translation = bb->instr_start; } else if (instr_is_cti(bb->instr)) { /* last instruction is a cti, consider the exit jmp part of * the mangling of the cti (since we might not know the target * if, for ex., its indirect) */ translation = instr_get_translation(bb->instr); } else { /* target is the instr after the last instr in the list */ translation = bb->cur_pc; ASSERT(bb->cur_pc == bb->exit_target); } ASSERT(translation != NULL); instr_set_translation(exit_instr, translation); } /* PR 214962: we need this jmp to be marked as "our mangling" so that * we won't relocate a thread there and re-do a ret pop or call push */ instr_set_our_mangling(exit_instr, true); /* here we need to set exit_type */ LOG(THREAD, LOG_EMIT, 3, "exit_branch_type=0x%x bb->exit_target=" PFX "\n", bb->exit_type, bb->exit_target); instr_exit_branch_set_type(exit_instr, bb->exit_type); instrlist_append(bb->ilist, exit_instr); #ifdef ARM if (bb->svc_pred != DR_PRED_NONE) { /* we have a conditional syscall, add predicate to current exit */ instr_set_predicate(exit_instr, bb->svc_pred); /* add another ubr exit as the fall-through */ exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) instr_set_translation(exit_instr, bb->cur_pc); instr_set_our_mangling(exit_instr, true); instr_exit_branch_set_type(exit_instr, LINK_DIRECT | LINK_JMP); instrlist_append(bb->ilist, exit_instr); /* XXX i#1734: instr svc.cc will be deleted later in mangle_syscall, * so we need reset encode state to avoid holding a dangling pointer. */ encode_reset_it_block(dcontext); } #endif } /* set flags */ #ifdef DGC_DIAGNOSTICS /* no traces in dyngen code, that would mess up our exit tracking */ if (TEST(FRAG_DYNGEN, bb->flags)) bb->flags |= FRAG_CANNOT_BE_TRACE; #endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_prefix) IF_X64(|| !INTERNAL_OPTION(unsafe_ignore_eflags_trace))) { bb->flags |= instr_eflags_to_fragment_eflags(bb->eflags); if (TEST(FRAG_WRITES_EFLAGS_OF, bb->flags)) { LOG(THREAD, LOG_INTERP, 4, "fragment writes OF prior to reading it!\n"); STATS_INC(bbs_eflags_writes_of); } else if (TEST(FRAG_WRITES_EFLAGS_6, bb->flags)) { IF_X86(ASSERT(TEST(FRAG_WRITES_EFLAGS_OF, bb->flags))); LOG(THREAD, LOG_INTERP, 4, "fragment writes all 6 flags prior to reading any\n"); STATS_INC(bbs_eflags_writes_6); } else { DOSTATS({ if (bb->eflags == EFLAGS_READ_ARITH) { /* Reads a flag before writing any. Won't get here if * reads one flag and later writes OF, or writes OF and * later reads one flag before writing that flag. */ STATS_INC(bbs_eflags_reads); } else { STATS_INC(bbs_eflags_writes_none); if (TEST(LINK_INDIRECT, bb->exit_type)) STATS_INC(bbs_eflags_writes_none_ind); } }); } } /* can only have proactive translation info if flag was set from the beginning */ if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) && (!bb->record_translation || !bb->full_decode)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; /* if for_cache, caller must clear once done emitting (emitting can deref * app memory so we wait until all done) */ if (!bb_build_nested && !bb->for_cache && my_dcontext != NULL) { ASSERT(my_dcontext->bb_build_info == (void *)bb); my_dcontext->bb_build_info = NULL; } bb->instr = NULL; /* mangle the instruction list */ if (!bb->mangle_ilist) { /* do not mangle! * caller must use full_decode to find invalid instrs and avoid * a discrepancy w/ for_cache case that aborts b/c of selfmod sandbox * returning false (in code below) */ return; } if (!mangle_bb_ilist(dcontext, bb)) { /* have to rebuild bb w/ new bb flags set by mangle_bb_ilist */ build_bb_ilist(dcontext, bb); return; } } /* Call when about to throw exception or other drastic action in the * middle of bb building, in order to free resources */ void bb_build_abort(dcontext_t *dcontext, bool clean_vmarea, bool unlock) { ASSERT(dcontext->bb_build_info != NULL); /* caller should check */ if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *)dcontext->bb_build_info; /* free instr memory */ if (bb->instr != NULL && bb->ilist != NULL && instrlist_last(bb->ilist) != bb->instr) instr_destroy(dcontext, bb->instr); /* not added to bb->ilist yet */ DODEBUG({ bb->instr = NULL; }); if (bb->ilist != NULL) { instrlist_clear_and_destroy(dcontext, bb->ilist); DODEBUG({ bb->ilist = NULL; }); } if (clean_vmarea) { /* Free the vmlist and any locks held (we could have been in * the middle of check_thread_vm_area and had a decode fault * during code origins checking!) */ check_thread_vm_area_abort(dcontext, &bb->vmlist, bb->flags); } /* else we were presumably called from vmarea so caller does cleanup */ if (unlock) { /* Assumption: bb building lock is held iff bb->for_cache, * and on a nested app bb build where !bb->for_cache we do keep the * original bb info in dcontext (see build_bb_ilist()). */ if (bb->has_bb_building_lock) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); SHARED_BB_UNLOCK(); KSTOP_REWIND(bb_building); } else ASSERT_DO_NOT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); } dcontext->bb_build_info = NULL; } } bool expand_should_set_translation(dcontext_t *dcontext) { if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *)dcontext->bb_build_info; /* Expanding to a higher level should set the translation to * the raw bytes if we're building a bb where we can assume * the raw byte pointer is the app pc. */ return bb->record_translation; } return false; } /* returns false if need to rebuild bb: in that case this routine will * set the bb flags needed to ensure successful mangling 2nd time around */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { byte *selfmod_start, *selfmod_end; /* sandbox requires that bb have no direct cti followings! * check_thread_vm_area should have ensured this for us */ ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); LOG(THREAD, LOG_INTERP, 2, "fragment overlaps selfmod area, inserting sandboxing\n"); /* only reason can't be trace is don't have mechanism set up * to store app code for each trace bb and update sandbox code * to point there */ bb->flags |= FRAG_CANNOT_BE_TRACE; if (bb->pretend_pc != NULL) { selfmod_start = bb->pretend_pc; selfmod_end = bb->pretend_pc + (bb->cur_pc - bb->start_pc); } else { selfmod_start = bb->start_pc; selfmod_end = bb->cur_pc; } if (!insert_selfmod_sandbox(dcontext, bb->ilist, bb->flags, selfmod_start, selfmod_end, bb->record_translation, bb->for_cache)) { /* have to rebuild bb using full decode -- it has invalid instrs * in middle, which we don't want to deal w/ for sandboxing! */ ASSERT(!bb->full_decode); /* else, how did we get here??? */ LOG(THREAD, LOG_INTERP, 2, "*** must rebuild bb to avoid invalid instr in middle ***\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->flags = FRAG_SELFMOD_SANDBOXED; /* lose all other flags */ bb->full_decode = true; /* full decode this time! */ bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ return false; } STATS_INC(num_sandboxed_fragments); } #endif /* X86 */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist before mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); d_r_mangle(dcontext, bb->ilist, &bb->flags, true, bb->record_translation); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist after mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); return true; } /* Interprets the application's instructions until the end of a basic * block is found, following all the rules that build_bb_ilist follows * with regard to terminating the block. Does no mangling or anything of * the app code, though -- this routine is intended only for building the * original code! * Caller is responsible for freeing the list and its instrs! * If outf != INVALID_FILE, does full disassembly with comments to outf. */ instrlist_t * build_app_bb_ilist(dcontext_t *dcontext, byte *start_pc, file_t outf) { build_bb_t bb; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, outf, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist(dcontext, &bb); return bb.ilist; } #ifdef CLIENT_INTERFACE /* Client routine to decode instructions at an arbitrary app address, * following all the rules that DynamoRIO follows internally for * terminating basic blocks. Note that DynamoRIO does not validate * that start_pc is actually the first instruction of a basic block. * \note Caller is reponsible for freeing the list and its instrs! */ instrlist_t * decode_as_bb(void *drcontext, byte *start_pc) { build_bb_t bb; /* Case 10009: When we hook ntdll functions, we hide the jump to * the interception buffer from the client BB callback. If the * client asks to decode that address here, we need to decode the * instructions in the interception buffer instead so that we * again hide our hooking. * We will have the jmp from the buffer back to after the hooked * app code visible to the client (just like it is for the * real bb built there, so at least we're consistent). */ # ifdef WINDOWS byte *real_pc; if (is_intercepted_app_pc((app_pc)start_pc, &real_pc)) start_pc = real_pc; # endif init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, true /* translation; xref case 10070 where this * currently turns on full decode; today we * provide no way to turn that off, as IR * expansion routines are not exported (PR 200409). */ , INVALID_FILE, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist((dcontext_t *)drcontext, &bb); return bb.ilist; } /* Client routine to decode a trace. We return the instructions in * the original app code, i.e., no client modifications. */ instrlist_t * decode_trace(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *frag = fragment_lookup(dcontext, tag); /* We don't support asking about other threads, for synch purposes * (see recreate_fragment_ilist() synch notes) */ if (get_thread_private_dcontext() != dcontext) return NULL; if (frag != NULL && TEST(FRAG_IS_TRACE, frag->flags)) { instrlist_t *ilist; bool alloc_res; /* Support being called from bb/trace hook (couldbelinking) or * from cache clean call (nolinking). We disallow asking about * another thread's private traces. */ if (!is_couldbelinking(dcontext)) d_r_mutex_lock(&thread_initexit_lock); ilist = recreate_fragment_ilist(dcontext, NULL, &frag, &alloc_res, false /*no mangling*/ _IF_CLIENT(false /*do not re-call client*/)); ASSERT(!alloc_res); if (!is_couldbelinking(dcontext)) d_r_mutex_unlock(&thread_initexit_lock); return ilist; } return NULL; } #endif app_pc find_app_bb_end(dcontext_t *dcontext, byte *start_pc, uint flags) { build_bb_t bb; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, INVALID_FILE, flags, NULL /*no overlap*/); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); return bb.end_pc; } bool app_bb_overlaps(dcontext_t *dcontext, byte *start_pc, uint flags, byte *region_start, byte *region_end, overlap_info_t *info_res) { build_bb_t bb; overlap_info_t info; info.region_start = region_start; info.region_end = region_end; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, INVALID_FILE, flags, &info); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); info.bb_end = bb.end_pc; if (info_res != NULL) *info_res = info; return info.overlap; } #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc) { char name[MAX_MODNAME_INTERNAL]; const char *modname = name; if (os_get_module_name_buf(modpc, name, BUFFER_SIZE_ELEMENTS(name)) == 0) { /* for native_exec_callcall we do end up putting DGC on native_exec_list */ ASSERT(DYNAMO_OPTION(native_exec_callcall)); modname = "<DGC>"; } LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "module %s is on native list, executing natively\n", modname); STATS_INC(num_native_module_entrances); SYSLOG_INTERNAL_WARNING_ONCE("module %s set up for native execution", modname); } #endif /* WARNING: breaks all kinds of rules, like ret addr transparency and * assuming app stack and not doing calls out of the cache and not having * control during dll loads, etc... */ static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb) { instr_t *in; opnd_t jmp_tgt; #if defined(X86) && defined(X64) bool reachable = rel32_reachable_from_vmcode(bb->start_pc); #endif DEBUG_DECLARE(bool ok;) /* if we ever protect from simultaneous thread attacks then this will * be a hole -- for now should work, all protected while native until * another thread goes into DR */ /* Create a bb that changes the return address on the app stack such that we * will take control when coming back, and then goes native. * N.B.: we ASSUME we reached this moduled via a call -- * build_basic_block_fragment needs to make sure, since we can't verify here * w/o trying to decode backward from retaddr, and if we're wrong we'll * clobber the stack and never regain control! * We also assume this bb is never reached later through a non-call. */ ASSERT(bb->initialized); ASSERT(bb->app_interp); ASSERT(!bb->record_translation); ASSERT(bb->start_pc != NULL); /* vmlist must start out empty (or N/A). For clients it may have started early. */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; bb->native_exec = true; BBPRINT(bb, IF_DGCDIAG_ELSE(1, 2), "build_native_exec_bb @" PFX "\n", bb->start_pc); DOLOG(2, LOG_INTERP, { dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML); }); if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory * WARNING: do not add any app instructions to this ilist! * If you do you must enable selfmod below. */ bb->ilist = instrlist_create(dcontext); /* FIXME PR 303413: we won't properly translate a fault in our app * stack references here. We mark as our own mangling so we'll at * least return failure from our translate routine. */ instrlist_set_our_mangling(bb->ilist, true); /* get dcontext to xdi, for prot-dcontext, xsi holds upcontext too */ insert_shared_get_dcontext(dcontext, bb->ilist, NULL, true /*save xdi*/); instrlist_append(bb->ilist, instr_create_save_to_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); /* need some cleanup prior to native: turn off asynch, clobber trace, etc. * Now that we have a stack of native retaddrs, we save the app retaddr in C * code. */ if (bb->native_call) { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)call_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 1, opnd_create_reg(REG_XSP)); } else { if (DYNAMO_OPTION(native_exec_opt)) { insert_return_to_native(dcontext, bb->ilist, NULL, REG_NULL /* default */, SCRATCH_REG0); } else { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)return_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 0); } } #if defined(X86) && defined(X64) if (!reachable) { /* best to store the target at the end of the bb, to keep it readonly, * but that requires a post-pass to patch its value: since native_exec * is already hacky we just go through TLS and ignore multi-thread selfmod. */ instrlist_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG0), OPND_CREATE_INTPTR((ptr_int_t)bb->start_pc))); if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64_ibl_opt)) { jmp_tgt = opnd_create_reg(REG_R9); } else { jmp_tgt = opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)); } instrlist_append( bb->ilist, INSTR_CREATE_mov_st(dcontext, jmp_tgt, opnd_create_reg(REG_XAX))); } else #endif { jmp_tgt = opnd_create_pc(bb->start_pc); } instrlist_append(bb->ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); insert_shared_restore_dcontext_reg(dcontext, bb->ilist, NULL); #ifdef AARCH64 ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ #else /* this is the jump to native code */ instrlist_append(bb->ilist, opnd_is_pc(jmp_tgt) ? XINST_CREATE_jump(dcontext, jmp_tgt) : XINST_CREATE_jump_mem(dcontext, jmp_tgt)); #endif /* mark all as do-not-mangle, so selfmod, etc. will leave alone (in absence * of selfmod only really needed for the jmp to native code) */ for (in = instrlist_first(bb->ilist); in != NULL; in = instr_get_next(in)) instr_set_meta(in); /* this is a jump for a dummy exit cti */ instrlist_append(bb->ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(bb->start_pc))); if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_TEMP_PRIVATE, bb->flags)) bb->flags |= FRAG_SHARED; /* Can't be coarse-grain since has non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_native_exec); /* We exclude the bb from trace to avoid going native in the process of * building a trace for simplicity. * XXX i#1239: DR needs to be able to unlink native exec gateway bbs for * proper cache consistency and signal handling, in which case we could * use FRAG_MUST_END_TRACE here instead. */ bb->flags |= FRAG_CANNOT_BE_TRACE; /* We support mangling here, though currently we don't need it as we don't * include any app code (although we mark this bb as belonging to the start * pc, so we'll get flushed if this region does), and even if target is * selfmod we're running it natively no matter how it modifies itself. We * only care that transition to target is via a call or call* so we can * clobber the retaddr and regain control, and that no retaddr mangling * happens while native before coming back out. While the former does not * depend on the target at all, unfortunately we cannot verify the latter. */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; DEBUG_DECLARE(ok =) mangle_bb_ilist(dcontext, bb); ASSERT(ok); #ifdef DEBUG DOLOG(3, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 3, "native_exec_bb @" PFX "\n", bb->start_pc); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); #endif } static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)) { /* ASSUMPTION: transfer to another module will always be by indirect call * or non-inlined direct call from a fragment that will not be flushed. * For now we will only go native if last_exit was * a call, a true call*, or a PLT-style call,jmp* (and we detect the latter only * if the call is inlined, so if the jmp* table is in a DGC-marked region * or if -no_inline_calls we will miss these: FIXME). * FIXME: what if have PLT-style but no GOT indirection: call,jmp ?!? * * We try to identify funky call* constructions (like * call*,...,jmp* in case 4269) by examining TOS to see whether it's a * retaddr -- we do this if last_exit is a jmp* or is unknown (for the * target_delete ibl path). * * FIXME: we will fail to identify a delay-loaded indirect xfer! * Need to know dynamic link patchup code to look for. * * FIXME: we will fail to take over w/ non-call entrances to a dll, like * NtContinue or direct jmp from DGC. * we could try to take the top-of-stack value and see if it's a retaddr by * decoding the prev instr to see if it's a call. decode backwards may have * issues, and if really want everything will have to do this on every bb, * not just if lastexit is ind xfer. * * We count up easy-to-identify cases we've missed in the DOSTATS below. */ bool native_exec_bb = false; /* We can get here if we start interpreting native modules. */ ASSERT(start != (app_pc)back_from_native && start != (app_pc)native_module_callout && "interpreting return from native module?"); ASSERT(is_call != NULL); *is_call = false; if (DYNAMO_OPTION(native_exec) && !vmvector_empty(native_exec_areas)) { /* do we KNOW that we came from an indirect call? */ if (TEST(LINK_CALL /*includes IND_JMP_PLT*/, dcontext->last_exit->flags) && /* only check direct calls if native_exec_dircalls is on */ (DYNAMO_OPTION(native_exec_dircalls) || LINKSTUB_INDIRECT(dcontext->last_exit->flags))) { STATS_INC(num_native_entrance_checks); /* we do the overlap check last since it's more costly */ if (is_native_pc(start)) { native_exec_bb = true; *is_call = true; DOSTATS({ if (EXIT_IS_CALL(dcontext->last_exit->flags)) { if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) STATS_INC(num_native_module_entrances_indcall); else STATS_INC(num_native_module_entrances_call); } else STATS_INC(num_native_module_entrances_plt); }); } } /* can we GUESS that we came from an indirect call? */ else if (DYNAMO_OPTION(native_exec_guess_calls) && (/* FIXME: require jmp* be in separate module? */ (LINKSTUB_INDIRECT(dcontext->last_exit->flags) && EXIT_IS_JMP(dcontext->last_exit->flags)) || LINKSTUB_FAKE(dcontext->last_exit))) { /* if unknown last exit, or last exit was jmp*, examine TOS and guess * whether it's a retaddr */ app_pc *tos = (app_pc *)get_mcontext(dcontext)->xsp; STATS_INC(num_native_entrance_TOS_checks); /* vector check cheaper than is_readable syscall, etc. so do it before them, * but after last_exit checks above since overlap is more costly */ if (is_native_pc(start) && is_readable_without_exception((app_pc)tos, sizeof(app_pc))) { enum { MAX_CALL_CONSIDER = 6 /* ignore prefixes */ }; app_pc retaddr = *tos; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "at native_exec target: checking TOS " PFX " => " PFX " for retaddr\n", tos, retaddr); #ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(ret_after_call)) { native_exec_bb = is_observed_call_site(dcontext, retaddr); *is_call = true; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "native_exec: *TOS is %sa call site in ret-after-call table\n", native_exec_bb ? "" : "NOT "); } else { #endif /* try to decode backward -- make sure readable for decoding */ if (is_readable_without_exception(retaddr - MAX_CALL_CONSIDER, MAX_CALL_CONSIDER + MAX_INSTR_LENGTH)) { /* ind calls have variable length and form so we decode * each byte rather than searching for ff and guessing length */ app_pc pc, next_pc; instr_t instr; instr_init(dcontext, &instr); for (pc = retaddr - MAX_CALL_CONSIDER; pc < retaddr; pc++) { LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 3, "native_exec: decoding @" PFX " looking for call\n", pc); instr_reset(dcontext, &instr); next_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, pc, &instr); STATS_INC(num_native_entrance_TOS_decodes); if (next_pc == retaddr && instr_is_call(&instr)) { native_exec_bb = true; *is_call = true; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "native_exec: found call @ pre-*TOS " PFX "\n", pc); break; } } instr_free(dcontext, &instr); } #ifdef RETURN_AFTER_CALL } #endif DOSTATS({ if (native_exec_bb) { if (LINKSTUB_FAKE(dcontext->last_exit)) STATS_INC(num_native_module_entrances_TOS_unknown); else STATS_INC(num_native_module_entrances_TOS_jmp); } }); } } /* i#2381: Only now can we check things that might preempt the * "guess" code above. */ /* Is this a return from a non-native module into a native module? */ if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && TEST(LINK_RETURN, dcontext->last_exit->flags)) { if (is_native_pc(start)) { /* XXX: check that this is the return address of a known native * callsite where we took over on a module transition. */ STATS_INC(num_native_module_entrances_ret); native_exec_bb = true; *is_call = false; } } #ifdef UNIX /* Is this the entry point of a native ELF executable? The entry point * (usually _start) cannot return as there is no retaddr. */ else if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && start == get_image_entry()) { if (is_native_pc(start)) { native_exec_bb = true; *is_call = false; } } #endif DOSTATS({ /* did we reach a native dll w/o going through an ind call caught above? */ if (!xfer_target /* else we'll re-check at the target itself */ && !native_exec_bb && is_native_pc(start)) { LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "WARNING: pc " PFX " is on native list but reached bypassing " "gateway!\n", start); STATS_INC(num_native_entrance_miss); /* do-once since once get into dll past gateway may xfer * through a bunch of lastexit-null or indjmp to same dll */ ASSERT_CURIOSITY_ONCE(false && "inside native_exec dll"); } }); } return native_exec_bb; } /* Use when calling build_bb_ilist with for_cache = true. * Must hold bb_building_lock. */ static inline void init_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb, app_pc start, uint initial_flags _IF_CLIENT(bool for_trace) _IF_CLIENT(instrlist_t **unmangled_ilist)) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK() && !TEST(FRAG_TEMP_PRIVATE, initial_flags), &bb_building_lock); /* We need to set up for abort prior to native exec and other checks * that can crash */ ASSERT(dcontext->bb_build_info == NULL); /* This won't make us be nested b/c for bb.for_cache caller is supposed * to set this up */ dcontext->bb_build_info = (void *)bb; init_build_bb( bb, start, true /*real interp*/, true /*for cache*/, true /*mangle*/, false /* translation: set below for clients */, INVALID_FILE, initial_flags | (INTERNAL_OPTION(store_translations) ? FRAG_HAS_TRANSLATION_INFO : 0), NULL /*no overlap*/); if (!TEST(FRAG_TEMP_PRIVATE, initial_flags)) bb->has_bb_building_lock = true; #ifdef CLIENT_INTERFACE /* We avoid races where there is no hook when we start building a * bb (and hence we don't record translation or do full decode) yet * a hook when we're ready to call one by storing whether there is a * hook at translation/decode decision time: now. */ if (dr_bb_hook_exists()) { /* i#805: Don't instrument code on the null instru list. * Because the module load event is now on 1st exec, we need to trigger * it now so the client can adjust the null instru list: */ check_new_page_start(dcontext, bb); bb->checked_start_vmarea = true; if (!os_module_get_flag(bb->start_pc, MODULE_NULL_INSTRUMENT)) bb->pass_to_client = true; } /* PR 299808: even if no bb hook, for a trace hook we need to * record translation and do full decode. It's racy to check * dr_trace_hook_exists() here so we rely on trace building having * set unmangled_ilist. */ if (bb->pass_to_client || unmangled_ilist != NULL) { /* case 10009/214444: For client interface builds, store the translation. * by default. This ensures clients can get the correct app address * of any instruction. We also rely on this for allowing the client * to return DR_EMIT_STORE_TRANSLATIONS and setting the * FRAG_HAS_TRANSLATION_INFO flag after decoding the app code. * * FIXME: xref case 10070/214505. Currently this means that all * instructions are fully decoded for client interface builds. */ bb->record_translation = true; /* PR 200409: If a bb hook exists, we always do a full decode. * Note that we currently do this anyway to get * translation fields, but once we fix case 10070 it * won't be that way. * We do not let the client turn this off (the runtime * option is not dynamic, and off by default anyway), as we * do not export level-handling instr_t routines like *_expand * for walking instrlists and instr_decode(). */ bb->full_decode = !INTERNAL_OPTION(fast_client_decode); /* PR 299808: we give client chance to re-add instrumentation */ bb->for_trace = for_trace; } /* we need to clone the ilist pre-mangling */ bb->unmangled_ilist = unmangled_ilist; #endif } static inline void exit_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(dcontext->bb_build_info == (void *)bb); /* Caller's responsibility to clean up since bb.for_cache */ dcontext->bb_build_info = NULL; /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, bb->ilist); } /* Interprets the application's instructions until the end of a basic * block is found, and then creates a fragment for the basic block. * DOES NOT look in the hashtable to see if such a fragment already exists! */ fragment_t * build_basic_block_fragment(dcontext_t *dcontext, app_pc start, uint initial_flags, bool link, bool visible _IF_CLIENT(bool for_trace) _IF_CLIENT(instrlist_t **unmangled_ilist)) { fragment_t *f; build_bb_t bb; dr_where_am_i_t wherewasi = dcontext->whereami; bool image_entry; KSTART(bb_building); dcontext->whereami = DR_WHERE_INTERP; /* Neither thin_client nor hotp_only should be building any bbs. */ ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* ASSUMPTION: image entry is reached via indirect transfer and * so will be the start of a bb */ image_entry = check_for_image_entry(start); init_interp_build_bb(dcontext, &bb, start, initial_flags _IF_CLIENT(for_trace) _IF_CLIENT(unmangled_ilist)); if (at_native_exec_gateway(dcontext, start, &bb.native_call _IF_DEBUG(false /*not xfer tgt*/))) { DODEBUG({ report_native_module(dcontext, bb.start_pc); }); #ifdef CLIENT_INTERFACE /* PR 232617 - build_native_exec_bb doesn't support setting translation * info, but it also doesn't pass the built bb to the client (it * contains no app code) so we don't need it. */ bb.record_translation = false; #endif build_native_exec_bb(dcontext, &bb); } else { build_bb_ilist(dcontext, &bb); if (dcontext->bb_build_info == NULL) { /* going native */ f = NULL; goto build_basic_block_fragment_done; } if (bb.native_exec) { /* change bb to be a native_exec gateway */ bool is_call = bb.native_call; LOG(THREAD, LOG_INTERP, 2, "replacing built bb with native_exec bb\n"); instrlist_clear_and_destroy(dcontext, bb.ilist); vm_area_destroy_list(dcontext, bb.vmlist); dcontext->bb_build_info = NULL; init_interp_build_bb(dcontext, &bb, start, initial_flags _IF_CLIENT(for_trace) _IF_CLIENT(unmangled_ilist)); #ifdef CLIENT_INTERFACE /* PR 232617 - build_native_exec_bb doesn't support setting * translation info, but it also doesn't pass the built bb to the * client (it contains no app code) so we don't need it. */ bb.record_translation = false; #endif bb.native_call = is_call; build_native_exec_bb(dcontext, &bb); } } /* case 9652: we do not want to persist the image entry point, so we keep * it fine-grained */ if (image_entry) bb.flags &= ~FRAG_COARSE_GRAIN; if (DYNAMO_OPTION(opt_jit) && visible && is_jit_managed_area(bb.start_pc)) { ASSERT(bb.overlap_info == NULL || bb.overlap_info->contiguous); jitopt_add_dgc_bb(bb.start_pc, bb.end_pc, TEST(FRAG_IS_TRACE_HEAD, bb.flags)); } /* emit fragment into fcache */ KSTART(bb_emit); f = emit_fragment_ex(dcontext, start, bb.ilist, bb.flags, bb.vmlist, link, visible); KSTOP(bb_emit); #ifdef CUSTOM_TRACES_RET_REMOVAL f->num_calls = dcontext->num_calls; f->num_rets = dcontext->num_rets; #endif #ifdef DGC_DIAGNOSTICS if ((f->flags & FRAG_DYNGEN)) { LOG(THREAD, LOG_INTERP, 1, "new bb is DGC:\n"); DOLOG(1, LOG_INTERP, { disassemble_app_bb(dcontext, start, THREAD); }); DOLOG(3, LOG_INTERP, { disassemble_fragment(dcontext, f, false); }); } #endif DOLOG(2, LOG_INTERP, { disassemble_fragment(dcontext, f, d_r_stats->loglevel <= 3); }); DOLOG(4, LOG_INTERP, { if (TEST(FRAG_SELFMOD_SANDBOXED, f->flags)) { LOG(THREAD, LOG_INTERP, 4, "\nXXXX sandboxed fragment! original code:\n"); disassemble_app_bb(dcontext, f->tag, THREAD); LOG(THREAD, LOG_INTERP, 4, "code cache code:\n"); disassemble_fragment(dcontext, f, false); } }); #if defined(INTERNAL) || defined(DEBUG) || defined(CLIENT_INTERFACE) if (INTERNAL_OPTION(bbdump_tags)) { disassemble_fragment_header(dcontext, f, bbdump_file); } #endif #ifdef INTERNAL DODEBUG({ if (INTERNAL_OPTION(stress_recreate_pc)) { /* verify recreation */ stress_test_recreate(dcontext, f, bb.ilist); } }); #endif exit_interp_build_bb(dcontext, &bb); build_basic_block_fragment_done: dcontext->whereami = wherewasi; KSTOP(bb_building); return f; } /* Builds an instrlist_t as though building a bb from pretend_pc, but decodes * from pc. * Use recreate_fragment_ilist() for building an instrlist_t for a fragment. * If check_vm_area is false, Does NOT call check_thread_vm_area()! * Make sure you know it will terminate at the right spot. It does * check selfmod and native_exec for elision, but otherwise will * follow ubrs to the limit. Currently used for * record_translation_info() (case 3559). * If vmlist!=NULL and check_vm_area, returns the vmlist, which the * caller must free by calling vm_area_destroy_list. */ instrlist_t * recreate_bb_ilist(dcontext_t *dcontext, byte *pc, byte *pretend_pc, app_pc stop_pc, uint flags, uint *res_flags OUT, uint *res_exit_type OUT, bool check_vm_area, bool mangle, void **vmlist_out OUT _IF_CLIENT(bool call_client) _IF_CLIENT(bool for_trace)) { build_bb_t bb; /* don't know full range -- just do simple check now */ if (!is_readable_without_exception(pc, 4)) { LOG(THREAD, LOG_INTERP, 3, "recreate_bb_ilist: cannot read memory at " PFX "\n", pc); return NULL; } LOG(THREAD, LOG_INTERP, 3, "\nbuilding bb instrlist now *********************\n"); init_build_bb(&bb, pc, false /*not interp*/, false /*not for cache*/, mangle, true /*translation*/, INVALID_FILE, flags, NULL /*no overlap*/); /* We support a stop pc to ensure selfmod matches how it was originally built, * w/o having to include the next instr which might have triggered the bb * termination but not been included in the bb (i#1441). * It only applies to full_decode. */ bb.stop_pc = stop_pc; bb.check_vm_area = check_vm_area; if (check_vm_area && vmlist_out != NULL) bb.record_vmlist = true; #ifdef CLIENT_INTERFACE if (check_vm_area && !bb.record_vmlist) bb.record_vmlist = true; /* for xl8 region checks */ /* PR 214962: we call bb hook again, unless the client told us * DR_EMIT_STORE_TRANSLATIONS, in which case we shouldn't come here, * except for traces (see below): */ bb.pass_to_client = (DYNAMO_OPTION(code_api) && call_client && /* i#843: This flag cannot be changed dynamically, so * its current value should match the value used at * ilist building time. Alternatively, we could store * bb->pass_to_client in the fragment. */ !os_module_get_flag(pc, MODULE_NULL_INSTRUMENT)); /* PR 299808: we call bb hook again when translating a trace that * didn't have DR_EMIT_STORE_TRANSLATIONS on itself (or on any * for_trace bb if there was no trace hook). */ bb.for_trace = for_trace; /* instrument_basic_block, called by build_bb_ilist, verifies that all * non-meta instrs have translation fields */ #endif if (pretend_pc != pc) bb.pretend_pc = pretend_pc; build_bb_ilist(dcontext, &bb); LOG(THREAD, LOG_INTERP, 3, "\ndone building bb instrlist *********************\n\n"); if (res_flags != NULL) *res_flags = bb.flags; if (res_exit_type != NULL) *res_exit_type = bb.exit_type; if (check_vm_area && vmlist_out != NULL) *vmlist_out = bb.vmlist; else if (bb.record_vmlist) vm_area_destroy_list(dcontext, bb.vmlist); return bb.ilist; } /* Re-creates an ilist of the fragment that currently contains the * passed-in code cache pc, also returns the fragment. * * Exactly one of pc and (f_res or *f_res) must be NULL: * If pc==NULL, assumes that *f_res is the fragment to use; * else, looks up the fragment, allocating it if necessary. * If f_res!=NULL, the fragment is returned and whether it was allocated * is returned in the alloc_res param. * If f_res==NULL, if the fragment was allocated it is freed here. * * NOTE : does not add prefix instructions to the created ilist, if we change * this to add them be sure to check recreate_app_* for compatibility (for ex. * adding them and setting their translation to pc would break current * implementation, also setting translation to NULL would trigger an assert) * * Returns NULL if unable to recreate the fragment ilist (fragment not found * or fragment is pending deletion and app memory might have changed). * In that case f_res is still pointed at the fragment if it was found, and * alloc is valid. * * For proper synchronization : * If caller is the dcontext's owner then needs to be couldbelinking, otherwise * the dcontext's owner should be suspended and the callers should own the * thread_initexit_lock */ instrlist_t * recreate_fragment_ilist(dcontext_t *dcontext, byte *pc, /*IN/OUT*/ fragment_t **f_res, /*OUT*/ bool *alloc_res, bool mangle _IF_CLIENT(bool call_client)) { fragment_t *f; uint flags = 0; instrlist_t *ilist; bool alloc = false, ok; monitor_data_t md = { 0, }; dr_isa_mode_t old_mode = DEFAULT_ISA_MODE; /* check synchronization, we need to make sure no one flushes the * fragment we just looked up while we are recreating it, if it's the * caller's dcontext then just need to be couldbelinking, otherwise need * the thread_initexit_lock since then we are looking up in someone else's * table (the dcontext's owning thread would also need to be suspended) */ ASSERT((dcontext != GLOBAL_DCONTEXT && d_r_get_thread_id() == dcontext->owning_thread && is_couldbelinking(dcontext)) || (ASSERT_OWN_MUTEX(true, &thread_initexit_lock), true)); STATS_INC(num_recreated_fragments); if (pc == NULL) { ASSERT(f_res != NULL && *f_res != NULL); f = *f_res; } else { /* Ensure callers don't give us both valid f and valid pc */ ASSERT(f_res == NULL || *f_res == NULL); LOG(THREAD, LOG_INTERP, 3, "recreate_fragment_ilist: looking up pc " PFX "\n", pc); f = fragment_pclookup_with_linkstubs(dcontext, pc, &alloc); LOG(THREAD, LOG_INTERP, 3, "\tfound F%d\n", f == NULL ? -1 : f->id); if (f_res != NULL) *f_res = f; /* ref case 3559, others, we won't be able to reliably recreate if * target is pending flush, original memory might no longer be there or * the memory might have changed. caller should use the stored * translation info instead. */ if (f == NULL || TEST(FRAG_WAS_DELETED, f->flags)) { ASSERT(f != NULL || !alloc); /* alloc shouldn't be set if no f */ ilist = NULL; goto recreate_fragment_done; } } /* Recreate in same mode as original fragment */ ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); ASSERT(ok); if ((f->flags & FRAG_IS_TRACE) == 0) { /* easy case: just a bb */ ilist = recreate_bb_ilist( dcontext, (byte *)f->tag, (byte *)f->tag, NULL /*default stop*/, 0 /*no pre flags*/, &flags, NULL, true /*check vm area*/, mangle, NULL _IF_CLIENT(call_client) _IF_CLIENT(false /*not for_trace*/)); ASSERT(ilist != NULL); if (ilist == NULL) /* a race */ goto recreate_fragment_done; if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); goto recreate_fragment_done; } else { /* build trace up one bb at a time */ instrlist_t *bb; byte *apc; trace_only_t *t = TRACE_FIELDS(f); uint i; instr_t *last; bool mangle_at_end = mangle_trace_at_end(); if (mangle_at_end) { /* we need an md for mangle_trace */ md.trace_tag = f->tag; /* be sure we ask for translation fields */ md.trace_flags = f->flags | FRAG_HAS_TRANSLATION_INFO; md.num_blks = t->num_bbs; md.blk_info = (trace_bb_build_t *)HEAP_ARRAY_ALLOC( dcontext, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); #ifdef CLIENT_INTERFACE md.pass_to_client = true; #endif } ilist = instrlist_create(dcontext); STATS_INC(num_recreated_traces); ASSERT(t->bbs != NULL); for (i = 0; i < t->num_bbs; i++) { void *vmlist = NULL; apc = (byte *)t->bbs[i].tag; bb = recreate_bb_ilist(dcontext, apc, apc, NULL /*default stop*/, 0 /*no pre flags*/, &flags, &md.final_exit_flags, true /*check vm area*/, !mangle_at_end, (mangle_at_end ? &vmlist : NULL)_IF_CLIENT(call_client) _IF_CLIENT(true /*for_trace*/)); ASSERT(bb != NULL); if (bb == NULL) { instrlist_clear_and_destroy(dcontext, ilist); vm_area_destroy_list(dcontext, vmlist); ilist = NULL; goto recreate_fragment_done; } if (mangle_at_end) md.blk_info[i].info = t->bbs[i]; last = instrlist_last(bb); ASSERT(last != NULL); #ifdef CLIENT_INTERFACE if (mangle_at_end) { md.blk_info[i].vmlist = vmlist; md.blk_info[i].final_cti = instr_is_cti(instrlist_last(bb)); } #endif /* PR 299808: we need to duplicate what we did when we built the trace. * While if there's no client trace hook we could mangle and fixup as we * go, for simplicity we mangle at the end either way (in either case our * code here is not exactly what we did when we made it anyway) * PR 333597: we can't use mangle_trace if we have elision on. */ if (mangle && !mangle_at_end) { /* To duplicate the trace-building logic: * - call fixup_last_cti() * - retarget the ibl routine just like extend_trace() does */ app_pc target = (last != NULL) ? opnd_get_pc(instr_get_target(last)) : NULL; /* FIXME: is it always safe */ /* convert a basic block IBL, and retarget it to IBL_TRACE* */ if (target != NULL && is_indirect_branch_lookup_routine(dcontext, target)) { target = get_alternate_ibl_routine(dcontext, target, f->flags); ASSERT(target != NULL); LOG(THREAD, LOG_MONITOR, 3, "recreate_fragment_ilist: replacing ibl_routine to target=" PFX "\n", target); instr_set_target(last, opnd_create_pc(target)); } if (DYNAMO_OPTION(pad_jmps) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, bb _IF_DEBUG(true)); } if (instrlist_last(ilist) != NULL) { fixup_last_cti(dcontext, ilist, (app_pc)apc, flags, f->flags, NULL, NULL, true /* record translation */, NULL, NULL, NULL); } } instrlist_append(ilist, instrlist_first(bb)); instrlist_init(bb); /* to clear fields to make destroy happy */ instrlist_destroy(dcontext, bb); } #ifdef CLIENT_INTERFACE /* PR 214962: re-apply client changes, this time storing translation * info for modified instrs */ if (call_client) /* else it's decode_trace() who is not really recreating */ instrument_trace(dcontext, f->tag, ilist, true); /* instrument_trace checks that all non-meta instrs have translation fields */ #endif if (mangle) { if (mangle_at_end) { if (!mangle_trace(dcontext, ilist, &md)) { instrlist_clear_and_destroy(dcontext, ilist); ilist = NULL; goto recreate_fragment_done; } } /* else we mangled one bb at a time up above */ #ifdef INTERNAL /* we only optimize traces */ if (dynamo_options.optimize) { /* re-apply all optimizations to ilist * assumption: all optimizations are deterministic and stateless, * so we can exactly replicate their results */ LOG(THREAD_GET, LOG_INTERP, 2, "\tre-applying optimizations to F%d\n", f->id); # ifdef SIDELINE if (dynamo_options.sideline) { if (!TEST(FRAG_DO_NOT_SIDELINE, f->flags)) optimize_trace(dcontext, f->tag, ilist); /* else, never optimized */ } else # endif optimize_trace(dcontext, f->tag, ilist); } #endif /* FIXME: case 4718 append_trace_speculate_last_ibl(true) * should be called as well */ if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); } } recreate_fragment_done: if (md.blk_info != NULL) { uint i; for (i = 0; i < md.num_blks; i++) { vm_area_destroy_list(dcontext, md.blk_info[i].vmlist); md.blk_info[i].vmlist = NULL; } HEAP_ARRAY_FREE(dcontext, md.blk_info, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); } if (alloc_res != NULL) *alloc_res = alloc; if (f_res == NULL && alloc) fragment_free(dcontext, f); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); return ilist; } /*** TRACE BUILDING ROUTINES *****************************************************/ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)) { if (PAD_FRAGMENT_JMPS(flags) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, ilist _IF_DEBUG(recreating)); } } #ifdef CUSTOM_EXIT_STUBS /* * Builds custom exit stub instrlist for exit_cti, whose stub is l * Assumes that intra-fragment cti's in the custom stub only target other * instructions in the same stub, never in the body of the fragment or * in other stubs. FIXME: is this too restrictive? If change this, * change the comment in instr_set_exit_stub_code's declaration. */ static void regenerate_custom_exit_stub(dcontext_t *dcontext, instr_t *exit_cti, linkstub_t *l, fragment_t *f) { /* need to decode and restore custom stub instrlist */ byte *cspc = EXIT_STUB_PC(dcontext, f, l); byte *stop = EXIT_FIXED_STUB_PC(dcontext, f, l); instr_t *in, *cti; instrlist_t intra_ctis; instrlist_t *cil = instrlist_create(dcontext); cache_pc start_pc = FCACHE_ENTRY_PC(f); ASSERT(DYNAMO_OPTION(indirect_stubs)); if (l->fixed_stub_offset == 0) return; /* has no custom exit stub */ LOG(THREAD, LOG_INTERP, 3, "in regenerate_custom_exit_stub\n"); instrlist_init(&intra_ctis); while (cspc < stop) { in = instr_create(dcontext); cspc = decode(dcontext, cspc, in); ASSERT(cspc != NULL); /* our own code! */ if (instr_is_cti(in)) { if (!instr_is_return(in) && opnd_is_near_pc(instr_get_target(in)) && (opnd_get_pc(instr_get_target(in)) < start_pc || opnd_get_pc(instr_get_target(in)) > start_pc + f->size)) { d_r_loginst(dcontext, 3, in, "\tcti has off-fragment target"); /* indicate that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(in); instr_set_raw_bits_valid(in, false); } else if (opnd_is_near_pc(instr_get_target(in))) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, in); /* HACK: use note field! */ instr_set_note(clone, (void *)in); instrlist_append(&intra_ctis, clone); } } instrlist_append(cil, in); } /* must fix up intra-ilist cti's to have instr_t targets * assumption: they only target other instrs in custom stub * FIXME: allow targeting other instrs? */ for (in = instrlist_first(cil); in != NULL; in = instr_get_next(in)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { if (opnd_get_pc(instr_get_target(cti)) == instr_get_raw_bits(in)) { /* cti targets this instr */ instr_t *real_cti = (instr_t *)instr_get_note(cti); /* Do not preserve raw bits just in case instrlist changes * and the instr target moves (xref PR 333691) */ instr_set_target(real_cti, opnd_create_instr(in)); d_r_loginst(dcontext, 3, real_cti, "\tthis cti: "); d_r_loginst(dcontext, 3, in, "\t targets intra-stub instr"); break; } } } instrlist_clear(dcontext, &intra_ctis); instr_set_exit_stub_code(exit_cti, cil); } #endif /* Combines instrlist_preinsert to ilist and the size calculation of the addition */ static inline int tracelist_add(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; #if defined(X86) && defined(X64) if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true /*x86*/); instr_shrink_to_32_bits(inst); } #endif size = instr_length(dcontext, inst); instrlist_preinsert(ilist, where, inst); return size; } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* Combines instrlist_postinsert to ilist and the size calculation of the addition */ static inline int tracelist_add_after(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; # ifdef X64 if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true /*x86*/); instr_shrink_to_32_bits(inst); } # endif size = instr_length(dcontext, inst); instrlist_postinsert(ilist, where, inst); return size; } #endif /* X86 */ #ifdef HASHTABLE_STATISTICS /* increments a given counter - assuming XCX/R2 is dead */ int insert_increment_stat_counter(dcontext_t *dcontext, instrlist_t *trace, instr_t *next, uint *counter_address) { int added_size = 0; /* incrementing a branch-type specific thread private counter */ opnd_t private_branchtype_counter = OPND_CREATE_ABSMEM(counter_address, OPSZ_4); /* using LEA to avoid clobbering eflags in a simple load-increment-store */ /*>>> movl counter, %ecx */ /*>>> lea 1(%ecx), %ecx */ /*>>> movl %ecx, counter */ /* x64: the counter is still 32 bits */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), private_branchtype_counter)); added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_add(dcontext, opnd_create_reg(SCRATCH_REG2), OPND_CREATE_INT8(1))); added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_store(dcontext, private_branchtype_counter, opnd_create_reg(SCRATCH_REG2))); return added_size; } #endif /* HASHTABLE_STATISTICS */ /* inserts proper instruction(s) to restore XCX spilled on indirect branch mangling * assumes target instrlist is a trace! * returns size to be added to trace */ static inline int insert_restore_spilled_xcx(dcontext_t *dcontext, instrlist_t *trace, instr_t *next) { int added_size = 0; if (DYNAMO_OPTION(private_ib_in_tls)) { #ifdef X86 if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && IF_X64_ELSE(DYNAMO_OPTION(x86_to_x64_ibl_opt), false)) { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R9))); } else #endif { added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_load( dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)))); } } else { /* We need to restore XCX from TLS for shared fragments, but from * mcontext for private fragments, and all traces are private */ added_size += tracelist_add(dcontext, trace, next, instr_create_restore_from_dcontext( dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS)); } return added_size; } bool instr_is_trace_cmp(dcontext_t *dcontext, instr_t *inst) { if (!instr_is_our_mangling(inst)) return false; #ifdef X86 return # ifdef X64 instr_get_opcode(inst) == OP_mov_imm || /* mov %rax -> xbx-tls-spill-slot */ instr_get_opcode(inst) == OP_mov_st || instr_get_opcode(inst) == OP_lahf || instr_get_opcode(inst) == OP_seto || instr_get_opcode(inst) == OP_cmp || instr_get_opcode(inst) == OP_jnz || instr_get_opcode(inst) == OP_add || instr_get_opcode(inst) == OP_sahf # else instr_get_opcode(inst) == OP_lea || instr_get_opcode(inst) == OP_jecxz || instr_get_opcode(inst) == OP_jmp # endif ; #elif defined(AARCHXX) /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(DYNAMO_OPTION(disable_traces)); return false; #endif } /* 32-bit only: inserts a comparison to speculative_tag with no side effect and * if value is matched continue target is assumed to be immediately * after targeter (which must be < 127 bytes away). * returns size to be added to trace */ static int insert_transparent_comparison(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, /* exit CTI */ app_pc speculative_tag) { int added_size = 0; #ifdef X86 instr_t *jecxz; instr_t *continue_label = INSTR_CREATE_label(dcontext); /* instead of: * cmp ecx,const * we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * * we have to use the landing pad b/c we don't know whether the * stub will be <128 away */ /* lea requires OPSZ_lea operand */ added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp(REG_ECX, REG_NULL, 0, -((int)(ptr_int_t)speculative_tag), OPSZ_lea))); jecxz = INSTR_CREATE_jecxz(dcontext, opnd_create_instr(continue_label)); /* do not treat jecxz as exit cti! */ instr_set_meta(jecxz); added_size += tracelist_add(dcontext, trace, targeter, jecxz); /* need to recover address in ecx */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp( REG_ECX, REG_NULL, 0, ((int)(ptr_int_t)speculative_tag), OPSZ_lea))); added_size += tracelist_add_after(dcontext, trace, targeter, continue_label); #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif return added_size; } #if defined(X86) && defined(X64) static int mangle_x64_ib_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag) { int added_size = 0; if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_mov_st( dcontext, opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)), opnd_create_reg(REG_XAX))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } else { ASSERT(X64_CACHE_MODE_DC(dcontext)); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_R8), opnd_create_reg(REG_XAX))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_R10), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } /* saving in the trace and restoring in ibl means that * -unsafe_ignore_eflags_{trace,ibl} must be equivalent */ if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_mov_st( dcontext, opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT)), opnd_create_reg(REG_XAX))); } /* FIXME: share w/ insert_save_eflags() */ added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lahf(dcontext)); if (!INTERNAL_OPTION(unsafe_ignore_overflow)) { /* OF needs saving */ /* Move OF flags into the OF flag spill slot. */ added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL))); } if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp( dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT)))); } else { added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R10))); } } else { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) ? opnd_create_reg(REG_XAX) : opnd_create_reg(REG_R10))); } /* change jmp into jne to trace cmp entry of ibl routine (special entry * that is after the eflags save) */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ ASSERT(opnd_is_pc(instr_get_target(targeter))); instr_set_target(targeter, opnd_create_pc(get_trace_cmp_entry( dcontext, opnd_get_pc(instr_get_target(targeter))))); /* since the target gets lost we need to OR in this flag */ instr_exit_branch_set_type(targeter, instr_exit_branch_type(targeter) | INSTR_TRACE_CMP_EXIT); return added_size; } #endif /* Mangles an indirect branch in a trace where a basic block with tag "tag" * is being added as the next block beyond the indirect branch. * Returns the size of instructions added to trace. */ static int mangle_indirect_branch_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag, uint next_flags, instr_t **delete_after /*OUT*/, instr_t *end_instr) { int added_size = 0; #ifdef X86 instr_t *next = instr_get_next(targeter); /* all indirect branches should be ubrs */ ASSERT(instr_is_ubr(targeter)); /* expecting basic blocks only */ ASSERT((end_instr != NULL && targeter == end_instr) || targeter == instrlist_last(trace)); ASSERT(delete_after != NULL); *delete_after = (next == NULL || (end_instr != NULL && targeter == end_instr)) ? NULL : instr_get_prev(next); STATS_INC(trace_ib_cmp); /* Change jump to indirect_branch_lookup to a conditional jump * based on indirect target not equaling next block in trace * * the bb has already done: * spill xcx to xcx-tls-spill-slot * mov curtarget, xcx * <any other side effects of ind branch, like ret xsp adjust> * * and we now want to accomplish: * cmp ecx,const * * on 32-bit we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * restore ecx * we have to use the landing pad b/c we don't know whether the * stub will be <128 away * * on 64-bit we use (PR 245832): * mov xax, xax-tls-spill-slot * mov $staytarget, xax * if !INTERNAL_OPTION(unsafe_ignore_eflags_{trace,ibl}) * mov xax, xbx-tls-spill-slot * lahf * seto al * cmp xcx, xbx-tls-spill-slot * else * cmp xcx, xax * jne exit * if xcx live: * mov xcx-tls-spill-slot, xcx * if flags live && unsafe options not on: * add 7f, al * sahf * if xax live: * mov xax-tls-spill-slot, xax */ # ifdef CUSTOM_TRACES_RET_REMOVAL IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* try to remove ret * FIXME: also handle ret imm => prev instr is add */ inst = instr_get_prev(targeter); if (dcontext->call_depth >= 0 && instr_raw_bits_valid(inst)) { byte *b = inst->bytes + inst->length - 1; /* 0x40538115 89 0d ec 68 06 40 mov %ecx -> 0x400668ec 0x4053811b 59 pop %esp (%esp) -> %ecx %esp 0x4053811c 83 c4 04 add $0x04 %esp -> %esp */ LOG(THREAD, LOG_MONITOR, 4, "ret removal: *b=0x%x, prev=" PFX ", dcontext=" PFX ", 0x%x\n", *b, *((int *)(b - 4)), dcontext, XCX_OFFSET); if ((*b == 0x59 && *((int *)(b - 4)) == ((uint)dcontext) + XCX_OFFSET) || (*(b - 3) == 0x59 && *((int *)(b - 7)) == ((uint)dcontext) + XCX_OFFSET && *(b - 2) == 0x83 && *(b - 1) == 0xc4)) { uint esp_add; /* already added calls & rets to call depth * if not negative, the call for this ret is earlier in this trace! */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removing ret!\n"); /* delete save ecx and pop */ if (*b == 0x59) { instr_set_raw_bits(inst, inst->bytes, inst->length - 7); esp_add = 4; } else { /* delete add too */ instr_set_raw_bits(inst, inst->bytes, inst->length - 10); esp_add = 4 + (uint)(*b); LOG(THREAD, LOG_MONITOR, 4, "*b=0x%x, esp_add=%d\n", *b, esp_add); } # ifdef DEBUG num_rets_removed++; # endif removed_ret = true; added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ESP), opnd_create_base_disp(REG_ESP, REG_NULL, 0, esp_add, OPSZ_lea))); } } if (removed_ret) { *delete_after = instr_get_prev(targeter); return added_size; } # endif /* CUSTOM_TRACES_RET_REMOVAL */ # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { added_size += mangle_x64_ib_in_trace(dcontext, trace, targeter, next_tag); } else { # endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { /* if equal follow to the next instruction after the exit CTI */ added_size += insert_transparent_comparison(dcontext, trace, targeter, next_tag); /* leave jmp as it is, a jmp to exit stub (thence to ind br * lookup) */ } else { /* assume eflags don't need to be saved across ind branches, * so go ahead and use cmp, jne */ /* FIXME: no way to cmp w/ 64-bit immed */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_ECX), OPND_CREATE_INT32((int)(ptr_int_t)next_tag))); /* Change jmp into jne indirect_branch_lookup */ /* CHECK: is that also going to exit stub */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ } # ifdef X64 } # endif /* X64 */ /* PR 214962: our spill restoration needs this whole sequence marked mangle */ instr_set_our_mangling(targeter, true); LOG(THREAD, LOG_MONITOR, 3, "fixup_last_cti: added cmp vs. " PFX " for ind br\n", next_tag); # ifdef HASHTABLE_STATISTICS /* If we do stay on the trace, increment a counter using dead XCX */ if (INTERNAL_OPTION(stay_on_trace_stats)) { ibl_type_t ibl_type; /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(bool ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(targeter)), &ibl_type); ASSERT(ok); added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_stay_on_trace_stat); } # endif /* HASHTABLE_STATISTICS */ /* If we do stay on the trace, must restore xcx * TODO optimization: check if xcx is live or not in next bb */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { LOG(THREAD, LOG_INTERP, 4, "next_flags for post-ibl-cmp: 0x%x\n", next_flags); if (!TEST(FRAG_WRITES_EFLAGS_6, next_flags) && !INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { if (!TEST(FRAG_WRITES_EFLAGS_OF, next_flags) && /* OF was saved */ !INTERNAL_OPTION(unsafe_ignore_overflow)) { /* restore OF using add that overflows if OF was on when we did seto */ added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f))); } added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_sahf(dcontext)); } else STATS_INC(trace_ib_no_flag_restore); /* TODO optimization: check if xax is live or not in next bb */ if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, next, INSTR_CREATE_mov_ld( dcontext, opnd_create_reg(REG_XAX), opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)))); } else { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_R8))); } } # endif #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ return added_size; } /* This routine handles the mangling of the cti at the end of the * previous block when adding a new block (f) to the trace fragment. * If prev_l is not NULL, matches the ordinal of prev_l to the nth * exit cti in the trace instrlist_t. * * If prev_l is NULL: WARNING: this routine assumes that the previous * block can only have a single indirect branch -- otherwise there is * no way to determine which indirect exit targeted the new block! No * assumptions are made about direct exits -- we can walk through them * all to find the one that targeted the new block. * * Returns an upper bound on the size added to the trace with inserted * instructions. * If we change this to add a substantial # of instrs, should update * TRACE_CTI_MANGLE_SIZE_UPPER_BOUND (assert at bottom should notify us) * * If you want to re-add the ability to add the front end of a trace, * revive the now-removed CUSTOM_TRACES_ADD_TRACE define from the attic. */ static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted /*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr) { app_pc target_tag; instr_t *inst, *targeter = NULL; /* at end of routine we will delete all instrs after this one: */ instr_t *delete_after = NULL; bool is_indirect = false; /* Added size for transformations done here. * Use tracelist_add to automate adding inserted instr sizes. */ int added_size = 0; uint exits_deleted = 0; /* count exit stubs to get the ordinal of the exit that targeted us * start at prev_l, and count up extraneous exits and blks until end */ uint nth_exit = 0, cur_exit; #ifdef CUSTOM_TRACES_RET_REMOVAL bool removed_ret = false; #endif bool have_ordinal = false; if (prev_l != NULL && prev_l == get_deleted_linkstub(dcontext)) { int last_ordinal = get_last_linkstub_ordinal(dcontext); if (last_ordinal != -1) { nth_exit = last_ordinal; have_ordinal = true; } } if (!have_ordinal && prev_l != NULL && !LINKSTUB_FAKE(prev_l)) { linkstub_t *stub = FRAGMENT_EXIT_STUBS(prev_f); while (stub != prev_l) stub = LINKSTUB_NEXT_EXIT(stub); /* if prev_l is cbr followed by ubr, we'll get 1 for ubr, * but we want 0, so we count prev_l itself, then decrement */ stub = LINKSTUB_NEXT_EXIT(stub); while (stub != NULL) { nth_exit++; stub = LINKSTUB_NEXT_EXIT(stub); } } /* else, we assume it's the final exit */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: looking for %d-th exit cti from bottom\n", nth_exit); if (start_instr != NULL) { ASSERT(end_instr != NULL); } else { start_instr = instrlist_first(trace); end_instr = instrlist_last(trace); } start_instr = instr_get_prev(start_instr); /* get open-ended bound */ cur_exit = nth_exit; /* now match the ordinal to the instrs. * we don't have any way to find boundary with previous-previous block * to make sure we didn't go backwards too far -- does it matter? */ for (inst = end_instr; inst != NULL && inst != start_instr; inst = instr_get_prev(inst)) { if (instr_is_exit_cti(inst)) { if (cur_exit == 0) { ibl_type_t ibl_type; /* exit cti is guaranteed to have pc target */ target_tag = opnd_get_pc(instr_get_target(inst)); is_indirect = get_ibl_routine_type(dcontext, target_tag, &ibl_type); if (is_indirect) { /* this should be a trace exit stub therefore it cannot be IBL_BB* */ ASSERT(IS_IBL_TRACE(ibl_type.source_fragment_type)); targeter = inst; break; } else { if (prev_l != NULL) { /* direct jmp, better point to us */ ASSERT(target_tag == next_tag); targeter = inst; break; } else { /* need to search for targeting jmp */ DOLOG(4, LOG_MONITOR, { d_r_loginst(dcontext, 4, inst, "exit==targeter?"); }); LOG(THREAD, LOG_MONITOR, 4, "target_tag = " PFX ", next_tag = " PFX "\n", target_tag, next_tag); if (target_tag == next_tag) { targeter = inst; break; } } } } else if (prev_l != NULL) { LOG(THREAD, LOG_MONITOR, 4, "counting backwards: %d == target_tag = " PFX "\n", cur_exit, opnd_get_pc(instr_get_target(inst))); cur_exit--; } } /* is exit cti */ } ASSERT(targeter != NULL); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(targeter)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ DOLOG(4, LOG_MONITOR, { d_r_loginst(dcontext, 4, targeter, "\ttargeter"); }); if (is_indirect) { added_size += mangle_indirect_branch_in_trace( dcontext, trace, targeter, next_tag, next_flags, &delete_after, end_instr); } else { /* direct jump or conditional branch */ instr_t *next = targeter->next; if (instr_is_cbr(targeter)) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: inverted logic of cbr\n"); if (next != NULL && instr_is_ubr(next)) { /* cbr followed by ubr: if cbr got us here, reverse cbr and * remove ubr */ instr_invert_cbr(targeter); instr_set_target(targeter, instr_get_target(next)); ASSERT(next == end_instr); delete_after = targeter; LOG(THREAD, LOG_MONITOR, 4, "\tremoved ubr following cbr\n"); } else { ASSERT_NOT_REACHED(); } } else if (instr_is_ubr(targeter)) { #ifndef CUSTOM_TRACES ASSERT(targeter == end_instr); #endif /* remove unnecessary ubr at end of block */ delete_after = instr_get_prev(targeter); if (delete_after != NULL) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removed ubr\n"); } } else ASSERT_NOT_REACHED(); } /* remove all instrs after this cti -- but what if internal * control flow jumps ahead and then comes back? * too expensive to check for such all the time. * FIXME: what to do? * * ifdef CUSTOM_TRACES: * FIXME: rather than adding entire trace on and then chopping off where * we exited, why not add after we know where to stop? */ if (delete_after != NULL) { ASSERT(delete_after != end_instr); delete_after = instr_get_next(delete_after); while (delete_after != NULL) { inst = delete_after; if (delete_after == end_instr) delete_after = NULL; else delete_after = instr_get_next(delete_after); if (instr_is_exit_cti(inst)) { /* assumption: passing in cache target to exit_stub_size works * just as well as linkstub_t target, since only cares whether * targeting ibl */ app_pc target = opnd_get_pc(instr_get_target(inst)); /* we already added all the stub size differences to the trace, * so we subtract the trace size of the stub here */ added_size -= local_exit_stub_size(dcontext, target, trace_flags); exits_deleted++; } else if (instr_opcode_valid(inst) && instr_is_cti(inst)) { LOG(THREAD, LOG_MONITOR, 3, "WARNING: deleting non-exit cti in unused tail of frag added to " "trace\n"); } d_r_loginst(dcontext, 4, inst, "\tdeleting"); instrlist_remove(trace, inst); added_size -= instr_length(dcontext, inst); instr_destroy(dcontext, inst); } } if (num_exits_deleted != NULL) *num_exits_deleted = exits_deleted; if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ #if defined(X86) && defined(X64) DOCHECK(1, { if (FRAG_IS_32(trace_flags)) { instr_t *in; /* in case we missed any in tracelist_add() */ for (in = instrlist_first(trace); in != NULL; in = instr_get_next(in)) { if (instr_is_our_mangling(in)) ASSERT(instr_get_x86_mode(in)); } } }); #endif ASSERT(added_size < TRACE_CTI_MANGLE_SIZE_UPPER_BOUND); return added_size; } /* Add a speculative counter on last IBL exit * Returns additional size to add to trace estimate. */ int append_trace_speculate_last_ibl(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag, bool record_translation) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before last CTI */ instr_t *next = instr_get_next(inst); DEBUG_DECLARE(bool ok;) ASSERT(speculate_next_tag != NULL); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(inst)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ STATS_INC(num_traces_end_at_ibl_speculative_link); #ifdef HASHTABLE_STATISTICS DOSTATS({ if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(SCRATCH_REG2))); added_size += insert_increment_stat_counter( dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* preinsert comparison before exit CTI, but increment of success * statistics after it */ /* we need to compare to speculate_next_tag now */ /* XCX holds value to match */ /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* * 8d 89 76 9b bf ff lea -tag(%ecx) -> %ecx * e3 0b jecxz continue * 8d 89 8a 64 40 00 lea tag(%ecx) -> %ecx * e9 17 00 00 00 jmp <exit stub 1: IBL> * * continue: * <increment stats> * # see FIXME whether to go to prefix or do here * <restore app ecx> * e9 cc aa dd 00 jmp speculate_next_tag * */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); #ifdef HASHTABLE_STATISTICS DOSTATS({ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); /* XCX already saved */ added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore XCX to app IB target*/ added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* adding a new CTI for speculative target that is a pseudo * direct exit. Although we could have used the indirect stub * to be the unlinked path, with a new CTI way we can unlink a * speculated fragment without affecting any other targets * reached by the IBL. Also in general we could decide to add * multiple speculative comparisons and to chain them we'd * need new CTIs for them. */ /* Ensure all register state is properly preserved on both linked * and unlinked paths - currently only XCX is in use. * * * Preferably we should be targeting prefix of target to * save some space for recovering XCX from hot path. We'd * restore XCX in the exit stub when unlinked. * So it would act like a direct CTI when linked and like indirect * when unlinked. It could just be an unlinked indirect stub, if * we haven't modified any other registers or flags. * * For simplicity, we currently restore XCX here and use a plain * direct exit stub that goes to target start_pc instead of * prefixes. * * FIXME: (case 5085) the problem with the current scheme is that * when we exit unlinked the source will be marked as a DIRECT * exit - therefore no security policies will be enforced. * * FIXME: (case 4718) should add speculated target to current list * in case of RCT policy that needs to be invalidated if target is * flushed */ /* must restore xcx to app value, FIXME: see above for doing this in prefix+stub */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); /* add a new direct exit stub */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_jump(dcontext, opnd_create_pc(speculate_next_tag))); LOG(THREAD, LOG_INTERP, 3, "append_trace_speculate_last_ibl: added cmp vs. " PFX " for ind br\n", speculate_next_tag); if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ return added_size; } #ifdef HASHTABLE_STATISTICS /* Add a counter on last IBL exit * if speculate_next_tag is not NULL then check case 4817's possible success */ /* FIXME: remove this routine once append_trace_speculate_last_ibl() * currently useful only to see statistics without side effects of * adding exit stub */ int append_ib_trace_last_ibl_exit_stat(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before exit CTI */ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); DEBUG_DECLARE(bool ok;) /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ ok = get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(reg))); added_size += insert_increment_stat_counter( dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); if (speculate_next_tag != NULL) { instr_t *next = instr_get_next(inst); reg_id_t reg = IF_X86_ELSE(REG_ECX, DR_REG_R2); /* preinsert comparison before exit CTI, but increment goes after it */ /* we need to compare to speculate_next_tag now - just like * fixup_last_cti() would do later. */ /* ECX holds value to match here */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ /* continue: * increment success counter * jmp targeter * * FIXME: now the last instruction is no longer the exit_cti - see if that * breaks any assumptions, using a short jump to see if anyone erroneously * uses this */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); /* we'll kill again although ECX restored unnecessarily by comparison routine */ added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore ECX */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); /* jmp where */ added_size += tracelist_add(dcontext, trace, next, IF_X86_ELSE(INSTR_CREATE_jmp_short, XINST_CREATE_jump)( dcontext, opnd_create_instr(where))); } return added_size; } #endif /* HASHTABLE_STATISTICS */ /* Add the fragment f to the end of the trace instrlist_t kept in dcontext * * Note that recreate_fragment_ilist() is making assumptions about its operation * synchronize changes * * Returns the size change in the trace from mangling the previous block * (assumes the caller has already calculated the size from adding the new block) */ uint extend_trace(dcontext_t *dcontext, fragment_t *f, linkstub_t *prev_l) { monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field; fragment_t *prev_f = NULL; instrlist_t *trace = &(md->trace); instrlist_t *ilist; uint size; uint prev_mangle_size = 0; uint num_exits_deleted = 0; uint new_exits_dir = 0, new_exits_indir = 0; #ifdef X64 ASSERT((!!FRAG_IS_32(md->trace_flags) == !X64_MODE_DC(dcontext)) || (!FRAG_IS_32(md->trace_flags) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64))); #endif STATS_INC(num_traces_extended); /* if you want to re-add the ability to add traces, revive * CUSTOM_TRACES_ADD_TRACE from the attic */ ASSERT(!TEST(FRAG_IS_TRACE, f->flags)); /* expecting block fragments */ if (prev_l != NULL) { ASSERT(!LINKSTUB_FAKE(prev_l) || /* we track the ordinal of the del linkstub so it's ok */ prev_l == get_deleted_linkstub(dcontext)); prev_f = linkstub_fragment(dcontext, prev_l); LOG(THREAD, LOG_MONITOR, 4, "prev_l = owned by F%d, branch pc " PFX "\n", prev_f->id, EXIT_CTI_PC(prev_f, prev_l)); } else { LOG(THREAD, LOG_MONITOR, 4, "prev_l is NULL\n"); } /* insert code to optimize last branch based on new fragment */ if (instrlist_last(trace) != NULL) { prev_mangle_size = fixup_last_cti(dcontext, trace, f->tag, f->flags, md->trace_flags, prev_f, prev_l, false, &num_exits_deleted, NULL, NULL); } #ifdef CUSTOM_TRACES_RET_REMOVAL /* add now, want fixup to operate on depth before adding new blk */ dcontext->call_depth += f->num_calls; dcontext->call_depth -= f->num_rets; #endif LOG(THREAD, LOG_MONITOR, 4, "\tadding block %d == " PFX "\n", md->num_blks, f->tag); size = md->trace_buf_size - md->trace_buf_top; LOG(THREAD, LOG_MONITOR, 4, "decoding F%d into trace buf @" PFX " + 0x%x = " PFX "\n", f->id, md->trace_buf, md->trace_buf_top, md->trace_buf + md->trace_buf_top); /* FIXME: PR 307388: if md->pass_to_client, much of this is a waste of time as * we're going to re-mangle and re-fixup after passing our unmangled list to the * client. We do want to keep the size estimate, which requires having the last * cti at least, so for now we keep all the work. Of course the size estimate is * less valuable when the client may add a ton of instrumentation. */ /* decode_fragment will convert f's ibl routines into those appropriate for * our trace, whether f and the trace are shared or private */ ilist = decode_fragment(dcontext, f, md->trace_buf + md->trace_buf_top, &size, md->trace_flags, &new_exits_dir, &new_exits_indir); md->blk_info[md->num_blks].info.tag = f->tag; #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) if (md->num_blks > 0) md->blk_info[md->num_blks - 1].info.num_exits -= num_exits_deleted; md->blk_info[md->num_blks].info.num_exits = new_exits_dir + new_exits_indir; #endif md->num_blks++; /* We need to remove any nops we added for -pad_jmps (we don't expect there * to be any in a bb if -pad_jmps_shift_bb) to avoid screwing up * fixup_last_cti etc. */ process_nops_for_trace(dcontext, ilist, f->flags _IF_DEBUG(false /*!recreating*/)); DOLOG(5, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 5, "post-trace-ibl-fixup, ilist is:\n"); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ASSERT(!instrlist_get_our_mangling(ilist)); instrlist_append(trace, instrlist_first(ilist)); instrlist_init(ilist); /* clear fields so destroy won't kill instrs on trace list */ instrlist_destroy(dcontext, ilist); md->trace_buf_top += size; ASSERT(md->trace_buf_top < md->trace_buf_size); LOG(THREAD, LOG_MONITOR, 4, "post-extend_trace, trace buf + 0x%x => " PFX "\n", md->trace_buf_top, md->trace_buf); DOLOG(4, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 4, "\nafter extending trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, trace, THREAD); }); return prev_mangle_size; } /* If branch_type is 0, sets it to the type of a ubr */ static instr_t * create_exit_jmp(dcontext_t *dcontext, app_pc target, app_pc translation, uint branch_type) { instr_t *jmp = XINST_CREATE_jump(dcontext, opnd_create_pc(target)); instr_set_translation(jmp, translation); if (branch_type == 0) instr_exit_branch_set_type(jmp, instr_branch_type(jmp)); else instr_exit_branch_set_type(jmp, branch_type); instr_set_our_mangling(jmp, true); return jmp; } /* Given an ilist with no mangling or stitching together, this routine does those * things. This is used both for CLIENT_INTERFACE and for recreating traces * for state translation. * It assumes the ilist abides by client rules: single-mbr bbs, no * changes in source app code. Else, it returns false. * Elision is supported. * * Our docs disallow removal of an entire block, changing inter-block ctis, and * changing the ordering of the blocks, which is what allows us to correctly * mangle the inter-block ctis here. * * Reads the following fields from md: * - trace_tag * - trace_flags * - num_blks * - blk_info * - final_exit_flags */ bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md) { instr_t *inst, *next_inst, *start_instr, *jmp; uint blk, num_exits_deleted; app_pc fallthrough = NULL; bool found_syscall = false, found_int = false; #ifdef CLIENT_INTERFACE /* We don't assert that mangle_trace_at_end() is true b/c the client * can unregister its bb and trace hooks if it really wants to, * though we discourage it. */ ASSERT(md->pass_to_client); #endif LOG(THREAD, LOG_MONITOR, 2, "mangle_trace " PFX "\n", md->trace_tag); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "ilist passed to mangle_trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We make 3 passes. * 1st walk: find bb boundaries */ blk = 0; for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { app_pc xl8 = instr_get_translation(inst); next_inst = instr_get_next(inst); if (instr_is_meta(inst)) continue; DOLOG(5, LOG_INTERP, { LOG(THREAD, LOG_MONITOR, 4, "transl " PFX " ", xl8); d_r_loginst(dcontext, 4, inst, "considering non-meta"); }); /* Skip blocks that don't end in ctis (except final) */ while (blk < md->num_blks - 1 && !md->blk_info[blk].final_cti) { LOG(THREAD, LOG_MONITOR, 4, "skipping fall-through bb #%d\n", blk); md->blk_info[blk].end_instr = NULL; blk++; } #ifdef CLIENT_INTERFACE /* Ensure non-ignorable syscall/int2b terminates trace */ if (md->pass_to_client && !client_check_syscall(ilist, inst, &found_syscall, &found_int)) return false; /* Clients should not add new source code regions, which would mess us up * here, as well as mess up our cache consistency (both page prot and * selfmod). */ if (md->pass_to_client && (!vm_list_overlaps(dcontext, md->blk_info[blk].vmlist, xl8, xl8 + 1) && !(instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && xl8 == opnd_get_pc(instr_get_target(inst)))) IF_WINDOWS(&&!vmvector_overlap(landing_pad_areas, md->blk_info[blk].info.tag, md->blk_info[blk].info.tag + 1))) { LOG(THREAD, LOG_MONITOR, 2, "trace error: out-of-bounds transl " PFX " vs block w/ start " PFX "\n", xl8, md->blk_info[blk].info.tag); CLIENT_ASSERT(false, "trace's app sources (instr_set_translation() targets) " "must remain within original bounds"); return false; } #endif /* in case no exit ctis in last block, find last non-meta fall-through */ if (blk == md->num_blks - 1) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ fallthrough = decode_next_pc(dcontext, xl8); } /* PR 299808: identify bb boundaries. We can't go by translations alone, as * ubrs can point at their targets and theoretically the entire trace could * be ubrs: so we have to go by exits, and limit what the client can do. We * can assume that each bb should not violate the bb callback rules (PR * 215217): if has cbr or mbr, that must end bb. If it has a call, that * could be elided; if not, its target should match the start of the next * block. We also want to * impose the can't-be-trace rules (PR 215219), which are not documented for * bbs: if more than one exit cti or if code beyond last exit cti then can't * be in a trace. We can soften a little and allow extra ubrs if they do not * target the subsequent block. FIXME: we could have stricter translation * reqts for ubrs: make them point at corresponding app ubr (but what if * really correspond to app cbr?): then can handle code past exit ubr. */ if (instr_will_be_exit_cti(inst) && ((!instr_is_ubr(inst) && !instr_is_near_call_direct(inst)) || (inst == instrlist_last(ilist) || (blk + 1 < md->num_blks && /* client is disallowed from changing bb exits and sequencing in trace * hook; if they change in bb for_trace, will be reflected here. */ opnd_get_pc(instr_get_target(inst)) == md->blk_info[blk + 1].info.tag)))) { DOLOG(4, LOG_INTERP, { d_r_loginst(dcontext, 4, inst, "end of bb"); }); /* Add jump that fixup_last_cti expects */ if (!instr_is_ubr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { app_pc target; if (instr_is_mbr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { target = get_ibl_routine( dcontext, get_ibl_entry_type(instr_branch_type(inst)), DEFAULT_IBL_TRACE(), get_ibl_branch_type(inst)); } else if (instr_is_cbr(inst)) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ target = decode_next_pc(dcontext, xl8); } else { target = opnd_get_pc(instr_get_target(inst)); } ASSERT(target != NULL); jmp = create_exit_jmp(dcontext, target, xl8, instr_branch_type(inst)); instrlist_postinsert(ilist, inst, jmp); /* we're now done w/ vmlist: switch to end instr. * d_r_mangle() shouldn't remove the exit cti. */ vm_area_destroy_list(dcontext, md->blk_info[blk].vmlist); md->blk_info[blk].vmlist = NULL; md->blk_info[blk].end_instr = jmp; } else md->blk_info[blk].end_instr = inst; blk++; DOLOG(4, LOG_INTERP, { if (blk < md->num_blks) { LOG(THREAD, LOG_MONITOR, 4, "starting next bb " PFX "\n", md->blk_info[blk].info.tag); } }); if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: too many exits"); return false; } } #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) /* PR 306761: we need to re-calculate md->blk_info[blk].info.num_exits, * and then adjust after fixup_last_cti. */ if (instr_will_be_exit_cti(inst)) md->blk_info[blk].info.num_exits++; #endif } if (blk < md->num_blks) { ASSERT(!instr_is_ubr(instrlist_last(ilist))); if (blk + 1 < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: too few exits"); return false; } /* must have been no final exit cti: add final fall-through jmp */ jmp = create_exit_jmp(dcontext, fallthrough, fallthrough, 0); /* FIXME PR 307284: support client modifying, replacing, or adding * syscalls and ints: need to re-analyze. Then we wouldn't * need the md->final_exit_flags field anymore. * For now we disallow. */ if (found_syscall || found_int) { instr_exit_branch_set_type(jmp, md->final_exit_flags); #ifdef WINDOWS /* For INSTR_SHARED_SYSCALL, we set it pre-mangling, and it * survives to here if the instr is not clobbered, * and does not come from md->final_exit_flags */ if (TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { instr_set_target(jmp, opnd_create_pc(shared_syscall_routine(dcontext))); instr_set_our_mangling(jmp, true); /* undone by target set */ } /* FIXME: test for linux too, but allowing ignorable syscalls */ if (!TESTANY(LINK_NI_SYSCALL_ALL IF_WINDOWS(| LINK_CALLBACK_RETURN), md->final_exit_flags) && !TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { CLIENT_ASSERT(false, "client modified or added a syscall or int: unsupported"); return false; } #endif } instrlist_append(ilist, jmp); md->blk_info[blk].end_instr = jmp; } else { CLIENT_ASSERT((!found_syscall && !found_int) /* On linux we allow ignorable syscalls in middle. * FIXME PR 307284: see notes above. */ IF_UNIX(|| !TEST(LINK_NI_SYSCALL, md->final_exit_flags)), "client changed exit target where unsupported\n" "check if trace ends in a syscall or int"); } ASSERT(instr_is_ubr(instrlist_last(ilist))); if (found_syscall) md->trace_flags |= FRAG_HAS_SYSCALL; else md->trace_flags &= ~FRAG_HAS_SYSCALL; /* 2nd walk: mangle */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist before mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We do not need to remove nops since we never emitted */ d_r_mangle(dcontext, ilist, &md->trace_flags, true /*mangle calls*/, /* we're post-client so we don't need translations unless storing */ TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags)); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist after mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* 3rd walk: stitch together delineated bbs */ for (blk = 0; blk < md->num_blks && md->blk_info[blk].end_instr == NULL; blk++) ; /* nothing */ start_instr = instrlist_first(ilist); for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { next_inst = instr_get_next(inst); if (inst == md->blk_info[blk].end_instr) { /* Chain exit to point to next bb */ if (blk + 1 < md->num_blks) { /* We must do proper analysis so that state translation matches * created traces in whether eflags are restored post-cmp */ uint next_flags = forward_eflags_analysis(dcontext, ilist, instr_get_next(inst)); next_flags = instr_eflags_to_fragment_eflags(next_flags); LOG(THREAD, LOG_INTERP, 4, "next_flags for fixup_last_cti: 0x%x\n", next_flags); fixup_last_cti(dcontext, ilist, md->blk_info[blk + 1].info.tag, next_flags, md->trace_flags, NULL, NULL, TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags), &num_exits_deleted, /* Only walk ilist between these instrs */ start_instr, inst); #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) md->blk_info[blk].info.num_exits -= num_exits_deleted; #endif } blk++; /* skip fall-throughs */ while (blk < md->num_blks && md->blk_info[blk].end_instr == NULL) blk++; if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: exits modified"); return false; } start_instr = next_inst; } } if (blk < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: cannot find all exits"); return false; } return true; } /**************************************************************************** * UTILITIES */ /* Converts instr_t EFLAGS_ flags to corresponding fragment_t FRAG_ flags, * assuming that the instr_t flags correspond to the start of the fragment_t. * Assumes instr_eflags has already accounted for predication. */ uint instr_eflags_to_fragment_eflags(uint instr_eflags) { uint frag_eflags = 0; #ifdef X86 if (instr_eflags == EFLAGS_WRITE_OF) { /* this fragment writes OF before reading it * May still read other flags before writing them. */ frag_eflags |= FRAG_WRITES_EFLAGS_OF; return frag_eflags; } #endif if (instr_eflags == EFLAGS_WRITE_ARITH) { /* fragment writes all 6 prior to reading */ frag_eflags |= FRAG_WRITES_EFLAGS_ARITH; #ifdef X86 frag_eflags |= FRAG_WRITES_EFLAGS_OF; #endif } return frag_eflags; } /* Returns one of these flags, defined in instr.h: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-only) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information before 1st cti */ uint forward_eflags_analysis(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { instr_t *in; uint eflags_6 = 0; /* holds flags written so far (in read slots) */ int eflags_result = 0; for (in = instr; in != NULL; in = instr_get_next_expanded(dcontext, ilist, in)) { if (!instr_valid(in) || instr_is_cti(in)) { /* give up */ break; } if (eflags_result != EFLAGS_WRITE_ARITH IF_X86(&&eflags_result != EFLAGS_READ_OF)) eflags_result = eflags_analysis(in, eflags_result, &eflags_6); DOLOG(4, LOG_INTERP, { d_r_loginst(dcontext, 4, in, "forward_eflags_analysis"); LOG(THREAD, LOG_INTERP, 4, "\tinstr %x => %x\n", instr_get_eflags(in, DR_QUERY_DEFAULT), eflags_result); }); } return eflags_result; } /* This translates f's code into an instrlist_t and returns it. * If buf is NULL: * The Instrs returned point into f's raw bits, so encode them * before you delete f! * Else, f's raw bits are copied into buf, and *bufsz is modified to * contain the total bytes copied * FIXME: should have release build checks and not just asserts where * we rely on caller to have big-enough buffer? * If target_flags differ from f->flags in sharing and/or in trace-ness, * converts ibl and tls usage in f to match the desired target_flags. * FIXME: converting from private to shared tls is not yet * implemented: we rely on -private_ib_in_tls for adding normal * private bbs to shared traces, and disallow any extensive mangling * (native_exec, selfmod) from becoming shared traces. * The caller is responsible for destroying the instrlist and its instrs. * If the fragment ends in an elided jmp, a new jmp instr is created, though * its bits field is NULL, allowing the caller to set it to do-not-emit if * trying to exactly duplicate or calculate the size, though most callers * will want to emit that jmp. See decode_fragment_exact(). */ static void instr_set_raw_bits_trace_buf(instr_t *instr, byte *buf_writable_addr, uint length) { /* The trace buffer is a writable address, so we need to translate to an * executable address for pointing at bits. */ instr_set_raw_bits(instr, vmcode_get_executable_addr(buf_writable_addr), length); } /* We want to avoid low-loglevel disassembly when we're in the middle of disassembly */ #define DF_LOGLEVEL(dc) (((dc) != GLOBAL_DCONTEXT && (dc)->in_opnd_disassemble) ? 6U : 4U) instrlist_t * decode_fragment(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/ uint *bufsz, uint target_flags, /*OUT*/ uint *dir_exits, /*OUT*/ uint *indir_exits) { linkstub_t *l; cache_pc start_pc, stop_pc, pc, prev_pc = NULL, raw_start_pc; instr_t *instr, *cti = NULL, *raw_instr; instrlist_t *ilist = instrlist_create(dcontext); byte *top_buf = NULL, *cur_buf = NULL; app_pc target_tag; uint num_bytes, offset; uint num_dir = 0, num_indir = 0; bool tls_to_dc; bool shared_to_private = TEST(FRAG_SHARED, f->flags) && !TEST(FRAG_SHARED, target_flags); #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && TEST(FRAG_HAS_SYSCALL, f->flags); #endif instrlist_t intra_ctis; coarse_info_t *info = NULL; bool coarse_elided_ubrs = false; dr_isa_mode_t old_mode; /* for decoding and get_ibl routines we need the dcontext mode set */ bool ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); /* i#1494: Decoding a code fragment from code cache, decode_fragment * may mess up the 32-bit/64-bit mode in -x86_to_x64 because 32-bit * application code is encoded as 64-bit code fragments into the code cache. * Thus we currently do not support using decode_fragment with -x86_to_x64, * including trace and coarse_units (coarse-grain code cache management) */ IF_X86_64(ASSERT(!DYNAMO_OPTION(x86_to_x64))); instrlist_init(&intra_ctis); /* Now we need to go through f and make cti's for each of its exit cti's and * non-exit cti's with off-fragment targets that need to be re-pc-relativized. * The rest of the instructions can be lumped into raw instructions. */ start_pc = FCACHE_ENTRY_PC(f); pc = start_pc; raw_start_pc = start_pc; if (buf != NULL) { cur_buf = buf; top_buf = cur_buf; ASSERT(bufsz != NULL); } /* Handle code after last exit but before stubs by allowing l to be NULL. * Handle coarse-grain fake fragment_t by discovering exits as we go, with * l being NULL the whole time. */ if (TEST(FRAG_FAKE, f->flags)) { ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); info = get_fragment_coarse_info(f); ASSERT(info != NULL); coarse_elided_ubrs = (info->persisted && TEST(PERSCACHE_ELIDED_UBR, info->flags)) || (!info->persisted && DYNAMO_OPTION(coarse_freeze_elide_ubr)); /* Assumption: coarse-grain fragments have no ctis w/ off-fragment targets * that are not exit ctis */ l = NULL; } else l = FRAGMENT_EXIT_STUBS(f); while (true) { uint l_flags; cti = NULL; if (l != NULL) { stop_pc = EXIT_CTI_PC(f, l); } else if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f */ stop_pc = (cache_pc)UNIVERSAL_REGION_END; } else { /* fake fragment_t, or code between last exit but before stubs or padding */ stop_pc = fragment_body_end_pc(dcontext, f); if (PAD_FRAGMENT_JMPS(f->flags) && stop_pc != raw_start_pc) { /* We need to adjust stop_pc to account for any padding, only * way any code could get here is via client interface, * and there really is no nice way to distinguish it * from any padding we added. * PR 213005: we do not support decode_fragment() for bbs * that have code added beyond the last exit cti (we turn * off FRAG_COARSE_GRAIN and set FRAG_CANNOT_BE_TRACE). * Sanity check, make sure it at least looks like there is no * code here */ ASSERT(IS_SET_TO_DEBUG(raw_start_pc, stop_pc - raw_start_pc)); stop_pc = raw_start_pc; } } IF_X64(ASSERT(TEST(FRAG_FAKE, f->flags) /* no copy made */ || CHECK_TRUNCATE_TYPE_uint((stop_pc - raw_start_pc)))); num_bytes = (uint)(stop_pc - raw_start_pc); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decoding fragment from " PFX " to " PFX "\n", raw_start_pc, stop_pc); if (num_bytes > 0) { if (buf != NULL) { if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f, so we copy later, though * we do point instrs into buf before we copy! */ } else { /* first copy entire sequence up to exit cti into buf * so we don't have to copy it in pieces if we find cti's, if we don't * find any we want one giant piece anyway */ ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied " PFX "-" PFX " to " PFX "-" PFX "\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); /* cur_buf is incremented later -- it always points to start * of raw bytes for next-to-add-to-ilist instr, while * top_buf points to top of copied-to-buf data */ } } else { /* point at bits in code cache */ cur_buf = raw_start_pc; } /* now, we can't make a single raw instr for all that, there may * be calls with off-fragment targets in there that need to be * re-pc-relativized (instrumentation, etc. insert calls), or * we may not even know where the exit ctis are (coarse-grain fragments), * so walk through (original bytes!) and decode, looking for cti's */ instr = instr_create(dcontext); pc = raw_start_pc; /* do we have to translate the store of xcx from tls to dcontext? * be careful -- there can be private bbs w/ indirect branches, so * must see if this is a shared fragment we're adding */ tls_to_dc = (shared_to_private && !DYNAMO_OPTION(private_ib_in_tls) && /* if l==NULL (coarse src) we'll check for xcx every time */ (l == NULL || LINKSTUB_INDIRECT(l->flags))); do { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode() just * below */ #endif /* For frozen coarse fragments, ubr eliding forces us to check * every instr for a potential next fragment start. This is * expensive so users are advised to decode from app code if * possible (case 9325 -- need exact re-mangle + re-instrument), * though -coarse_pclookup_table helps. */ if (info != NULL && info->frozen && coarse_elided_ubrs && pc != start_pc) { /* case 6532: check for ib stubs as we elide the jmp there too */ bool stop = false; if (coarse_is_indirect_stub(pc)) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\thit ib stub @" PFX "\n", pc); } else { app_pc tag = fragment_coarse_entry_pclookup(dcontext, info, pc); if (tag != NULL) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\thit frozen tgt: " PFX "." PFX "\n", tag, pc); } } if (stop) { /* Add the ubr ourselves */ ASSERT(cti == NULL); cti = XINST_CREATE_jump(dcontext, opnd_create_pc(pc)); /* It's up to the caller to decide whether to mark this * as do-not-emit or not */ /* Process as an exit cti */ stop_pc = pc; pc = stop_pc; break; } } instr_reset(dcontext, instr); prev_pc = pc; pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, pc, instr); #ifdef WINDOWS /* Perform fixups for ignorable syscalls on XP & 2003. */ if (possible_ignorable_sysenter && instr_opcode_valid(instr) && instr_is_syscall(instr)) { /* We want to find the instr preceding the sysenter and have * it point to the post-sysenter instr in the trace, rather than * remain pointing to the post-sysenter instr in the BB. */ instr_t *sysenter_prev; instr_t *sysenter_post; ASSERT(prev_decode_pc != NULL); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: sysenter found @" PFX "\n", instr_get_raw_bits(instr)); /* create single raw instr for instructions up to the * sysenter EXCEPT for the immediately preceding instruction */ offset = (int)(prev_decode_pc - raw_start_pc); ASSERT(offset > 0); raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; /* Get the "mov" instr just before the sysenter. We know that * it's there because mangle put it there, so we can safely * decode at prev_decode_pc. */ sysenter_prev = instr_create(dcontext); decode(dcontext, prev_decode_pc, sysenter_prev); ASSERT(instr_valid(instr) && instr_is_mov_imm_to_tos(sysenter_prev)); instrlist_append(ilist, sysenter_prev); cur_buf += instr_length(dcontext, sysenter_prev); /* Append the sysenter. */ instr_set_raw_bits_trace_buf(instr, cur_buf, (int)(pc - prev_pc)); instrlist_append(ilist, instr); instr_set_meta(instr); /* skip current instr -- the sysenter */ cur_buf += (int)(pc - prev_pc); /* Decode the next instr -- the one after the sysenter. */ sysenter_post = instr_create(dcontext); prev_decode_pc = pc; prev_pc = pc; pc = decode(dcontext, pc, sysenter_post); if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) ASSERT(!instr_is_cti(sysenter_post)); raw_start_pc = pc; /* skip the post-sysenter instr */ cur_buf += (int)(pc - prev_pc); instrlist_append(ilist, sysenter_post); /* Point the pre-sysenter mov to the post-sysenter instr. */ instr_set_src(sysenter_prev, 0, opnd_create_instr(sysenter_post)); instr_set_meta(sysenter_prev); instr_set_meta(sysenter_post); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Post-sysenter -- F%d (" PFX ") into:\n", f->id, f->tag); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); /* Set all local state so that we can fall-thru and correctly * process the post-sysenter instruction. Point instr to the * already decoded instruction, sysenter_post. At this point, * pc and raw_start_pc point to just after sysenter_post, * prev_pc points to sysenter_post, prev_decode_pc points to * the sysenter itself, and cur_buf points to post_sysenter. */ instr = sysenter_post; } #endif /* look for a cti with an off-fragment target */ if (instr_opcode_valid(instr) && instr_is_cti(instr)) { bool separate_cti = false; bool re_relativize = false; bool intra_target = true; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "decode_fragment: found non-exit cti"); }); if (TEST(FRAG_FAKE, f->flags)) { /* Case 8711: we don't know the size so we can't even * distinguish off-fragment from intra-fragment targets. * Thus we have to assume that any cti is an exit cti, and * make all fragments for which that is not true into * fine-grained. * Except that we want to support intra-fragment ctis for * clients (i#665), so we use some heuristics. */ if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Pull in the two short jmps for a "short-rewrite" instr. * We must do this before asking whether it's an * intra-fragment so we don't just look at the * first part of the sequence. */ pc = remangle_short_rewrite(dcontext, instr, prev_pc, 0 /*same target*/); } if (!coarse_cti_is_intra_fragment(dcontext, info, instr, start_pc)) { /* Process this cti as an exit cti. FIXME: we will then * re-copy the raw bytes from this cti to the end of the * fragment at the top of the next loop iter, but for * coarse-grain bbs that should be just one instr for cbr bbs * or none for others, so not worth doing anything about. */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse exit cti"); }); intra_target = false; stop_pc = prev_pc; pc = stop_pc; break; } else { /* we'll make it to intra_target if() below */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse intra-fragment cti"); }); } } else if (instr_is_return(instr) || !opnd_is_near_pc(instr_get_target(instr))) { /* just leave it undecoded */ intra_target = false; } else if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Cti-short should only occur as exit ctis, which are * separated out unless we're decoding a fake fragment. We * include this case for future use, as otherwise we'll * decode just the short cti and think it is an * intra-fragment cti. */ ASSERT_NOT_REACHED(); separate_cti = true; re_relativize = true; intra_target = false; } else if (opnd_get_pc(instr_get_target(instr)) < start_pc || opnd_get_pc(instr_get_target(instr)) > start_pc + f->size) { separate_cti = true; re_relativize = true; intra_target = false; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "\tcti has off-fragment target"); }); } if (intra_target) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, instr); /* HACK: use note field! */ instr_set_note(clone, (void *)instr); /* we leave the clone pointing at valid original raw bits */ instrlist_append(&intra_ctis, clone); /* intra-fragment target */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "\tcti has intra-fragment target"); }); /* since the resulting instrlist could be manipulated, * we need to change the target operand from pc to instr_t. * that requires having this instr separated out now so * our clone-in-note-field hack above works. */ separate_cti = true; re_relativize = false; } if (separate_cti) { /* create single raw instr for instructions up to the cti */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append cti, indicating that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(instr); if (re_relativize) instr_set_raw_bits_valid(instr, false); else if (!instr_is_cti_short_rewrite(instr, NULL)) { instr_set_raw_bits_trace_buf(instr, cur_buf, (int)(pc - prev_pc)); } instrlist_append(ilist, instr); /* include buf for off-fragment cti, to simplify assert below */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } } /* is cti */ /* instr_is_tls_xcx_spill won't upgrade from level 1 */ else if (tls_to_dc && instr_is_tls_xcx_spill(instr)) { /* shouldn't get here for x64, where everything uses tls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "mangling xcx save from tls to dcontext\n"); /* create single raw instr for instructions up to the xcx save */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append our new xcx save */ instrlist_append(ilist, instr_create_save_to_dcontext( dcontext, IF_X86_ELSE(REG_XCX, DR_REG_R2), IF_X86_ELSE(XCX_OFFSET, R2_OFFSET))); /* make sure skip current instr */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; } #if defined(X86) && defined(X64) else if (instr_has_rel_addr_reference(instr)) { /* We need to re-relativize, which is done automatically only for * level 1 instrs (PR 251479), and only when raw bits point to * their original location. We assume that all the if statements * above end up creating a high-level instr, so a cti w/ a * rip-rel operand is already covered. */ /* create single raw instr for instructions up to this one */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* should be valid right now since pointing at original bits */ ASSERT(instr_rip_rel_valid(instr)); if (buf != NULL) { /* re-relativize into the new buffer */ DEBUG_DECLARE(byte *nxt =) instr_encode_to_copy(dcontext, instr, cur_buf, vmcode_get_executable_addr(cur_buf)); instr_set_raw_bits_trace_buf(instr, vmcode_get_executable_addr(cur_buf), (int)(pc - prev_pc)); instr_set_rip_rel_valid(instr, true); ASSERT(nxt != NULL); } instrlist_append(ilist, instr); cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } #endif } while (pc < stop_pc); DODEBUG({ if (pc != stop_pc) { LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "PC " PFX ", stop_pc " PFX "\n", pc, stop_pc); } }); ASSERT(pc == stop_pc); /* create single raw instr for rest of instructions up to exit cti */ if (pc > raw_start_pc) { instr_reset(dcontext, instr); /* point to buffer bits */ offset = (int)(pc - raw_start_pc); if (offset > 0) { instr_set_raw_bits_trace_buf(instr, cur_buf, offset); instrlist_append(ilist, instr); cur_buf += offset; } if (buf != NULL && TEST(FRAG_FAKE, f->flags)) { /* Now that we know the size we can copy into buf. * We have been incrementing cur_buf all along, though * we didn't have contents there. */ ASSERT(top_buf < cur_buf); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((cur_buf - top_buf)))); num_bytes = (uint)(cur_buf - top_buf); ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied " PFX "-" PFX " to " PFX "-" PFX "\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); } ASSERT(buf == NULL || cur_buf == top_buf); } else { /* will reach here if had a processed instr (off-fragment target, etc.) * immediately prior to exit cti, so now don't need instr -- an * example (in absence of clients) is trampoline to interception code */ instr_destroy(dcontext, instr); } } if (l == NULL && !TEST(FRAG_FAKE, f->flags)) break; /* decode the exit branch */ if (cti != NULL) { /* already created */ instr = cti; ASSERT(info != NULL && info->frozen && instr_is_ubr(instr)); raw_start_pc = pc; } else { instr = instr_create(dcontext); raw_start_pc = decode(dcontext, stop_pc, instr); ASSERT(raw_start_pc != NULL); /* our own code! */ /* pc now points into fragment! */ } ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* replace fcache target with target_tag and add to fragment */ if (l == NULL) { app_pc instr_tgt; /* Ensure we get proper target for short cti sequence */ if (instr_is_cti_short_rewrite(instr, stop_pc)) remangle_short_rewrite(dcontext, instr, stop_pc, 0 /*same target*/); instr_tgt = opnd_get_pc(instr_get_target(instr)); ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); if (cti == NULL && coarse_is_entrance_stub(instr_tgt)) { target_tag = entrance_stub_target_tag(instr_tgt, info); l_flags = LINK_DIRECT; /* FIXME; try to get LINK_JMP vs LINK_CALL vs fall-through? */ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tstub tgt: " PFX " => " PFX "\n", instr_tgt, target_tag); } else if (instr_tgt == raw_start_pc /*target next instr*/ /* could optimize by not checking for stub if * coarse_elided_ubrs but we need to know whether ALL * ubrs were elided, which we don't know as normally * entire-bb-ubrs are not elided (case 9677). * plus now that we elide jmp-to-ib-stub we must check. */ && coarse_is_indirect_stub(instr_tgt)) { ibl_type_t ibl_type; DEBUG_DECLARE(bool is_ibl;) target_tag = coarse_indirect_stub_jmp_target(instr_tgt); l_flags = LINK_INDIRECT; DEBUG_DECLARE(is_ibl =) get_ibl_routine_type_ex(dcontext, target_tag, &ibl_type _IF_X86_64(NULL)); ASSERT(is_ibl); l_flags |= ibltype_to_linktype(ibl_type.branch_type); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tind stub tgt: " PFX " => " PFX "\n", instr_tgt, target_tag); } else { target_tag = fragment_coarse_entry_pclookup(dcontext, info, instr_tgt); /* Only frozen units don't jump through stubs */ ASSERT(info != NULL && info->frozen); ASSERT(target_tag != NULL); l_flags = LINK_DIRECT; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tfrozen tgt: " PFX "." PFX "\n", target_tag, instr_tgt); } } else { target_tag = EXIT_TARGET_TAG(dcontext, f, l); l_flags = l->flags; } if (LINKSTUB_DIRECT(l_flags)) num_dir++; else num_indir++; ASSERT(target_tag != NULL); if (instr_is_cti_short_rewrite(instr, stop_pc)) { raw_start_pc = remangle_short_rewrite(dcontext, instr, stop_pc, target_tag); } else { app_pc new_target = target_tag; /* don't point to fcache bits */ instr_set_raw_bits_valid(instr, false); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "decode_fragment exit_cti: pc=" PFX " l->target_tag=" PFX " l->flags=0x%x\n", stop_pc, target_tag, l_flags); /* need to propagate exit branch type flags, * instr_t flag copied from old fragment linkstub * TODO: when ibl targets are different this won't be necessary */ instr_exit_branch_set_type(instr, linkstub_propagatable_flags(l_flags)); /* convert to proper ibl */ if (is_indirect_branch_lookup_routine(dcontext, target_tag)) { DEBUG_DECLARE(app_pc old_target = new_target;) new_target = get_alternate_ibl_routine(dcontext, target_tag, target_flags); ASSERT(new_target != NULL); /* for stats on traces, we assume if target_flags contains * FRAG_IS_TRACE then we are extending a trace */ DODEBUG({ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "%s: %s ibl_routine " PFX " with %s_target=" PFX "\n", TEST(FRAG_IS_TRACE, target_flags) ? "extend_trace" : "decode_fragment", new_target == old_target ? "maintaining" : "replacing", old_target, new_target == old_target ? "old" : "new", new_target); STATS_INC(num_traces_ibl_extended); }); #ifdef WINDOWS DOSTATS({ if (TEST(FRAG_IS_TRACE, target_flags) && old_target == shared_syscall_routine(dcontext)) STATS_INC(num_traces_shared_syscall_extended); }); #endif } instr_set_target(instr, opnd_create_pc(new_target)); if (instr_is_cti_short(instr)) { /* make sure non-mangled short ctis, which are generated by * us and never left there from apps, are not marked as exit ctis */ instr_set_meta(instr); } } instrlist_append(ilist, instr); #ifdef CUSTOM_EXIT_STUBS if (l != NULL && l->fixed_stub_offset > 0) regenerate_custom_exit_stub(dcontext, instr, l, f); #endif if (TEST(FRAG_FAKE, f->flags)) { /* Assumption: coarse-grain bbs have 1 ind exit or 2 direct, * and no code beyond the last exit! Of course frozen bbs * can have their final jmp elided, which we handle above. */ if (instr_is_ubr(instr)) { break; } } if (l != NULL) /* if NULL keep going: discovering exits as we go */ l = LINKSTUB_NEXT_EXIT(l); } /* end while(true) loop through exit stubs */ /* now fix up intra-trace cti targets */ if (instrlist_first(&intra_ctis) != NULL) { /* We have to undo all of our level 0 blocks by expanding. * Any instrs that need re-relativization should already be * separate, so this should not affect rip-rel instrs. */ int offs = 0; for (instr = instrlist_first_expanded(dcontext, ilist); instr != NULL; instr = instr_get_next_expanded(dcontext, ilist, instr)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { /* The clone we put in intra_ctis has raw bits equal to the * original bits, so its target will be in original fragment body. * We can't rely on the raw bits of the new instrs (since the * non-level-0 ones may have allocated raw bits) so we * calculate a running offset as we go. */ if (opnd_get_pc(instr_get_target(cti)) - start_pc == offs) { /* cti targets this instr */ instr_t *real_cti = (instr_t *)instr_get_note(cti); /* PR 333691: do not preserve raw bits of real_cti, since * instrlist may change (e.g., inserted nops). Must re-encode * once instrlist is finalized. */ instr_set_target(real_cti, opnd_create_instr(instr)); DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, real_cti, "\tre-set intra-fragment target"); }); break; } } offs += instr_length(dcontext, instr); } } instrlist_clear(dcontext, &intra_ctis); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Decoded F%d (" PFX "." PFX ") into:\n", f->id, f->tag, FCACHE_ENTRY_PC(f)); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); if (dir_exits != NULL) *dir_exits = num_dir; if (indir_exits != NULL) *indir_exits = num_indir; if (buf != NULL) { IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((top_buf - buf)))); *bufsz = (uint)(top_buf - buf); } return ilist; } #undef DF_LOGLEVEL /* Just like decode_fragment() but marks any instrs missing in the cache * as do-not-emit */ instrlist_t * decode_fragment_exact(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/ uint *bufsz, uint target_flags, /*OUT*/ uint *dir_exits, /*OUT*/ uint *indir_exits) { instrlist_t *ilist = decode_fragment(dcontext, f, buf, bufsz, target_flags, dir_exits, indir_exits); /* If the final jmp was elided we do NOT want to count it in the size! */ if (instr_get_raw_bits(instrlist_last(ilist)) == NULL) { instr_set_ok_to_emit(instrlist_last(ilist), false); } return ilist; } /* Makes a new copy of fragment f * If replace is true, * removes f from the fcache and adds the new copy in its place * Else * creates f as an invisible fragment (caller is responsible for linking * the new fragment!) */ fragment_t * copy_fragment(dcontext_t *dcontext, fragment_t *f, bool replace) { instrlist_t *trace = instrlist_create(dcontext); instr_t *instr; uint *trace_buf; int trace_buf_top; /* index of next free location in trace_buf */ linkstub_t *l; byte *p; cache_pc start_pc; int num_bytes; fragment_t *new_f; void *vmlist = NULL; app_pc target_tag; DEBUG_DECLARE(bool ok;) trace_buf = heap_alloc(dcontext, f->size * 2 HEAPACCT(ACCT_FRAGMENT)); start_pc = FCACHE_ENTRY_PC(f); trace_buf_top = 0; p = ((byte *)trace_buf) + trace_buf_top; IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* must re-relativize when copying! */ for (l = FRAGMENT_EXIT_STUBS(f); l; l = LINKSTUB_NEXT_EXIT(l)) { /* Copy the instruction bytes up to (but not including) the first * control-transfer instruction. ***WARNING*** This code assumes * that the first link stub corresponds to the first exit branch * in the body. */ IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((EXIT_CTI_PC(f, l) - start_pc)))); num_bytes = (uint)(EXIT_CTI_PC(f, l) - start_pc); if (num_bytes > 0) { memcpy(p, (byte *)start_pc, num_bytes); trace_buf_top += num_bytes; start_pc += num_bytes; /* build a mongo instruction corresponding to the copied instructions */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_append(trace, instr); } /* decode the exit branch */ instr = instr_create(dcontext); p = decode(dcontext, (byte *)EXIT_CTI_PC(f, l), instr); ASSERT(p != NULL); /* our own code! */ /* p now points into fragment! */ ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* Replace cache_pc target with target_tag and add to trace. For * an indirect branch, the target_tag is zero. */ target_tag = EXIT_TARGET_TAG(dcontext, f, l); ASSERT(target_tag); if (instr_is_cti_short_rewrite(instr, EXIT_CTI_PC(f, l))) { p = remangle_short_rewrite(dcontext, instr, EXIT_CTI_PC(f, l), target_tag); } else { /* no short ctis that aren't mangled should be exit ctis */ ASSERT(!instr_is_cti_short(instr)); instr_set_target(instr, opnd_create_pc(target_tag)); } instrlist_append(trace, instr); start_pc += (p - (byte *)EXIT_CTI_PC(f, l)); } /* emit as invisible fragment */ /* We don't support shared fragments, where vm_area_add_to_list can fail */ ASSERT_NOT_IMPLEMENTED(!TEST(FRAG_SHARED, f->flags)); DEBUG_DECLARE(ok =) vm_area_add_to_list(dcontext, f->tag, &vmlist, f->flags, f, false /*no locks*/); ASSERT(ok); /* should never fail for private fragments */ new_f = emit_invisible_fragment(dcontext, f->tag, trace, f->flags, vmlist); if (replace) { /* link and replace old fragment */ shift_links_to_new_fragment(dcontext, f, new_f); fragment_replace(dcontext, f, new_f); } else { /* caller is responsible for linking new fragment */ } ASSERT(new_f->flags == f->flags); fragment_copy_data_fields(dcontext, f, new_f); #ifdef DEBUG if (d_r_stats->loglevel > 1) { LOG(THREAD, LOG_ALL, 2, "Copying F%d to F%d\n", f->id, new_f->id); disassemble_fragment(dcontext, f, d_r_stats->loglevel < 3); disassemble_fragment(dcontext, new_f, d_r_stats->loglevel < 3); } #endif /* DEBUG */ heap_free(dcontext, trace_buf, f->size * 2 HEAPACCT(ACCT_FRAGMENT)); /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, trace); if (replace) { fragment_delete(dcontext, f, FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE); STATS_INC(num_fragments_deleted_copy_and_replace); } return new_f; } /* Used when the code cache is enlarged by copying to a larger space, * and all of the relative ctis that target outside the cache need * to be shifted. Additionally, sysenter-related patching for ignore-syscalls * on XP/2003 is performed here, as the absolute code cache address pushed * onto the stack must be updated. * Assumption: old code cache has been copied to TOP of new cache, so to * detect for ctis targeting outside of old cache can look at new cache * start plus old cache size. */ void shift_ctis_in_fragment(dcontext_t *dcontext, fragment_t *f, ssize_t shift, cache_pc fcache_start, cache_pc fcache_end, size_t old_size) { cache_pc pc, prev_pc = NULL; cache_pc start_pc = FCACHE_ENTRY_PC(f); cache_pc stop_pc = fragment_stubs_end_pc(f); /* get what would have been end of cache if just shifted not resized */ cache_pc fcache_old_end = fcache_start + old_size; #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && /* FIXME Traces don't have FRAG_HAS_SYSCALL set so we can't filter on * that flag for all fragments. */ (TEST(FRAG_HAS_SYSCALL, f->flags) || TEST(FRAG_IS_TRACE, f->flags)); #endif instr_t instr; instr_init(dcontext, &instr); pc = start_pc; while (pc < stop_pc) { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode_cti() just * below */ #endif prev_pc = pc; instr_reset(dcontext, &instr); pc = (cache_pc)decode_cti(dcontext, (byte *)pc, &instr); #ifdef WINDOWS /* Perform fixups for sysenter instrs when ignorable syscalls is used on * XP & 2003. These are not cache-external fixups, but it's convenient & * efficient to perform them here since decode_cti() is called on every * instruction, allowing identification of sysenters without additional * decoding. */ if (possible_ignorable_sysenter && instr_opcode_valid(&instr) && instr_is_syscall(&instr)) { cache_pc next_pc; app_pc target; DEBUG_DECLARE(app_pc old_target;) DEBUG_DECLARE(cache_pc encode_nxt;) /* Peek up to find the "mov $post-sysenter -> (%xsp)" */ instr_reset(dcontext, &instr); next_pc = decode(dcontext, prev_decode_pc, &instr); ASSERT(next_pc == prev_pc); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov found @" PFX "\n", instr_get_raw_bits(&instr)); ASSERT(instr_is_mov_imm_to_tos(&instr)); target = instr_get_raw_bits(&instr) + instr_length(dcontext, &instr) + (pc - prev_pc); DODEBUG(old_target = (app_pc)opnd_get_immed_int(instr_get_src(&instr, 0));); /* PR 253943: we don't support sysenter in x64 */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */ instr_set_src(&instr, 0, opnd_create_immed_int((ptr_int_t)target, OPSZ_4)); ASSERT(old_target + shift == target); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov now pts to @" PFX "\n", target); DEBUG_DECLARE(encode_nxt =) instr_encode_to_copy(dcontext, &instr, vmcode_get_writable_addr(prev_decode_pc), prev_decode_pc); /* must not change size! */ ASSERT(encode_nxt != NULL && vmcode_get_executable_addr(encode_nxt) == next_pc); } /* The following 'if' won't get executed since a sysenter isn't * a CTI instr, so we don't need an else. We do need to take care * that any 'else' clauses are added after the 'if' won't trigger * on a sysenter either. */ #endif /* look for a pc-relative cti (including exit ctis) w/ out-of-cache * target (anything in-cache is fine, the whole cache was moved) */ if (instr_is_cti(&instr) && /* only ret, ret_far, and iret don't have targets, and * we really shouldn't see them, except possibly if they * are inserted through instrumentation, so go ahead and * check num srcs */ instr_num_srcs(&instr) > 0 && opnd_is_near_pc(instr_get_target(&instr))) { app_pc target = opnd_get_pc(instr_get_target(&instr)); if (target < fcache_start || target > fcache_old_end) { DEBUG_DECLARE(byte * nxt_pc;) /* re-encode instr w/ new pc-relative target */ instr_set_raw_bits_valid(&instr, false); instr_set_target(&instr, opnd_create_pc(target - shift)); DEBUG_DECLARE(nxt_pc =) instr_encode_to_copy(dcontext, &instr, vmcode_get_writable_addr(prev_pc), prev_pc); /* must not change size! */ ASSERT(nxt_pc != NULL && vmcode_get_executable_addr(nxt_pc) == pc); #ifdef DEBUG if ((d_r_stats->logmask & LOG_CACHE) != 0) { d_r_loginst( dcontext, 5, &instr, "shift_ctis_in_fragment: found cti w/ out-of-cache target"); } #endif } } } instr_free(dcontext, &instr); } #ifdef PROFILE_RDTSC /* Add profile call to front of the trace in dc * Must call finalize_profile_call and pass it the fragment_t* * once the trace is turned into a fragment to fix up a few profile * call instructions. */ void add_profile_call(dcontext_t *dcontext) { monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field; instrlist_t *trace = &(md->trace); byte *p = ((byte *)md->trace_buf) + md->trace_buf_top; instr_t *instr; uint num_bytes = profile_call_size(); ASSERT(num_bytes + md->trace_buf_top < md->trace_buf_size); insert_profile_call((cache_pc)p); /* use one giant BINARY instruction to hold everything, * to keep dynamo from interpreting the cti instructions as real ones */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_prepend(trace, instr); md->trace_buf_top += num_bytes; } #endif /* emulates the effects of the instruction at pc with the state in mcontext * limited right now to only mov instructions * returns NULL if failed or not yet implemented, else returns the pc of the next instr. */ app_pc d_r_emulate(dcontext_t *dcontext, app_pc pc, priv_mcontext_t *mc) { instr_t instr; app_pc next_pc = NULL; uint opc; instr_init(dcontext, &instr); next_pc = decode(dcontext, pc, &instr); if (!instr_valid(&instr)) { next_pc = NULL; goto emulate_failure; } DOLOG(2, LOG_INTERP, { d_r_loginst(dcontext, 2, &instr, "emulating"); }); opc = instr_get_opcode(&instr); if (opc == OP_store) { opnd_t src = instr_get_src(&instr, 0); opnd_t dst = instr_get_dst(&instr, 0); reg_t *target; reg_t val; uint sz = opnd_size_in_bytes(opnd_get_size(dst)); ASSERT(opnd_is_memory_reference(dst)); if (sz != 4 IF_X64(&&sz != 8)) { next_pc = NULL; goto emulate_failure; } target = (reg_t *)opnd_compute_address_priv(dst, mc); if (opnd_is_reg(src)) { val = reg_get_value_priv(opnd_get_reg(src), mc); } else if (opnd_is_immed_int(src)) { val = (reg_t)opnd_get_immed_int(src); } else { next_pc = NULL; goto emulate_failure; } DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating store by writing " PFX " to " PFX "\n", val, target); if (sz == 4) *((int *)target) = (int)val; #ifdef X64 else if (sz == 8) *target = val; #endif } else if (opc == IF_X86_ELSE(OP_inc, OP_add) || opc == IF_X86_ELSE(OP_dec, OP_sub)) { opnd_t src = instr_get_src(&instr, 0); reg_t *target; uint sz = opnd_size_in_bytes(opnd_get_size(src)); if (sz != 4 IF_X64(&&sz != 8)) { next_pc = NULL; goto emulate_failure; } /* FIXME: handle changing register value */ ASSERT(opnd_is_memory_reference(src)); /* FIXME: change these to take in priv_mcontext_t* ? */ target = (reg_t *)opnd_compute_address_priv(src, mc); DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating %s to " PFX "\n", opc == IF_X86_ELSE(OP_inc, OP_add) ? "inc" : "dec", target); if (sz == 4) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*((int *)target))++; else (*((int *)target))--; } #ifdef X64 else if (sz == 8) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*target)++; else (*target)--; } #endif } emulate_failure: instr_free(dcontext, &instr); return next_pc; }
1
17,749
I would make this level 1 or 2.
DynamoRIO-dynamorio
c
@@ -0,0 +1,17 @@ +require 'beaker/hypervisor/vagrant' + +class Beaker::VagrantFusion < Beaker::Vagrant + def provision(provider = 'vmware_fusion') + # By default vmware_fusion creates a .vagrant directory relative to the + # Vagrantfile path. That means beaker tries to scp the VM to itself unless + # we move the VM files elsewhere. + ENV['VAGRANT_VMWARE_CLONE_DIRECTORY'] = '~/.vagrant/vmware_fusion' + super + end + + def self.provider_vfile_section(host, options) + " v.vm.provider :vmware_fusion do |v|\n" + + " v.vmx['memsize'] = '#{options['vagrant_memsize'] ||= '1024'}'\n" + + " end\n" + end +end
1
1
5,862
Is that path in a variable anyway? Not a big fan of hard coding it here.
voxpupuli-beaker
rb
@@ -16,14 +16,18 @@ limitations under the License. package controllers import ( + "bytes" "context" + "errors" + "strings" + "text/template" "github.com/sirupsen/logrus" federation "github.com/spiffe/spire/support/k8s/k8s-workload-registrar/federation" spiffeidv1beta1 "github.com/spiffe/spire/support/k8s/k8s-workload-registrar/mode-crd/api/spiffeid/v1beta1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types"
1
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "github.com/sirupsen/logrus" federation "github.com/spiffe/spire/support/k8s/k8s-workload-registrar/federation" spiffeidv1beta1 "github.com/spiffe/spire/support/k8s/k8s-workload-registrar/mode-crd/api/spiffeid/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) // PodReconcilerConfig holds the config passed in when creating the reconciler type PodReconcilerConfig struct { Client client.Client Cluster string Ctx context.Context DisabledNamespaces []string Log logrus.FieldLogger PodLabel string PodAnnotation string Scheme *runtime.Scheme TrustDomain string } // PodReconciler holds the runtime configuration and state of this controller type PodReconciler struct { client.Client c PodReconcilerConfig } // NewPodReconciler creates a new PodReconciler object func NewPodReconciler(config PodReconcilerConfig) *PodReconciler { return &PodReconciler{ Client: config.Client, c: config, } } // SetupWithManager adds a controller manager to manage this reconciler func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&corev1.Pod{}). Complete(r) } // Reconcile creates a new SPIFFE ID when pods are created func (r *PodReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if containsString(r.c.DisabledNamespaces, req.NamespacedName.Namespace) { return ctrl.Result{}, nil } pod := corev1.Pod{} ctx := r.c.Ctx if err := r.Get(ctx, req.NamespacedName, &pod); err != nil { if !errors.IsNotFound(err) { r.c.Log.WithError(err).Error("Unable to get Pod") return ctrl.Result{}, err } return ctrl.Result{}, client.IgnoreNotFound(err) } // Pod needs to be assigned a node before it can get a SPIFFE ID if pod.Spec.NodeName == "" { return ctrl.Result{}, nil } return r.updateorCreatePodEntry(ctx, &pod) } // updateorCreatePodEntry attempts to create a new SpiffeID resource. func (r *PodReconciler) updateorCreatePodEntry(ctx context.Context, pod *corev1.Pod) (ctrl.Result, error) { spiffeIDURI := r.podSpiffeID(pod) // If we have no spiffe ID for the pod, do nothing if spiffeIDURI == "" { return ctrl.Result{}, nil } federationDomains := federation.GetFederationDomains(pod) // Set up new SPIFFE ID spiffeID := &spiffeidv1beta1.SpiffeID{ ObjectMeta: metav1.ObjectMeta{ Name: pod.Name, Namespace: pod.Namespace, Labels: map[string]string{ "podUid": string(pod.ObjectMeta.UID), }, }, Spec: spiffeidv1beta1.SpiffeIDSpec{ SpiffeId: spiffeIDURI, ParentId: r.podParentID(pod.Spec.NodeName), DnsNames: []string{pod.Name}, // Set pod name as first DNS name FederatesWith: federationDomains, Selector: spiffeidv1beta1.Selector{ PodUid: pod.GetUID(), Namespace: pod.Namespace, NodeName: pod.Spec.NodeName, }, }, } err := setOwnerRef(pod, spiffeID, r.c.Scheme) if err != nil { return ctrl.Result{}, err } // Check for existing entry existing := spiffeidv1beta1.SpiffeID{} err = r.Get(ctx, types.NamespacedName{ Name: spiffeID.ObjectMeta.Name, Namespace: spiffeID.ObjectMeta.Namespace, }, &existing) if err != nil { if errors.IsNotFound(err) { // Create new entry return ctrl.Result{}, r.Create(ctx, spiffeID) } return ctrl.Result{}, err } if spiffeID.Spec.Selector.PodUid != existing.Spec.Selector.PodUid { // Already deleted pod is taking up the name, retry after it has deleted return ctrl.Result{Requeue: true}, nil } // Check if label or annotation has changed if spiffeID.Spec.SpiffeId != existing.Spec.SpiffeId { existing.Spec.SpiffeId = spiffeID.Spec.SpiffeId err := r.Update(r.c.Ctx, &existing) if err != nil { return ctrl.Result{}, err } } // Nothing to do return ctrl.Result{}, nil } // podSpiffeID returns the desired spiffe ID for the pod, or nil if it should be ignored func (r *PodReconciler) podSpiffeID(pod *corev1.Pod) string { if r.c.PodLabel != "" { // the controller has been configured with a pod label. if the pod // has that label, use the value to construct the pod entry. otherwise // ignore the pod altogether. if labelValue, ok := pod.Labels[r.c.PodLabel]; ok { return makeID(r.c.TrustDomain, "%s", labelValue) } return "" } if r.c.PodAnnotation != "" { // the controller has been configured with a pod annotation. if the pod // has that annotation, use the value to construct the pod entry. otherwise // ignore the pod altogether. if annotationValue, ok := pod.Annotations[r.c.PodAnnotation]; ok { return makeID(r.c.TrustDomain, "%s", annotationValue) } return "" } // the controller has not been configured with a pod label or a pod annotation. // create an entry based on the service account. return makeID(r.c.TrustDomain, "ns/%s/sa/%s", pod.Namespace, pod.Spec.ServiceAccountName) } func (r *PodReconciler) podParentID(nodeName string) string { return makeID(r.c.TrustDomain, "k8s-workload-registrar/%s/node/%s", r.c.Cluster, nodeName) }
1
17,377
nit: Move `IdentityTemplateLabel` to be under `IdentityTemplate` to match the struct in config_crd.go
spiffe-spire
go
@@ -1,8 +1,13 @@ class Analytics include AnalyticsHelper - SAMPLER = "sampler" - SUBSCRIBER = "subscriber" + SAMPLER = "sampler".freeze + SUBSCRIBER = "subscriber".freeze + TRACKERS = { + "Video" => VideoTracker, + "Exercise" => ExerciseTracker, + "Trail" => TrailTracker, + }.freeze class_attribute :backend self.backend = AnalyticsRuby
1
class Analytics include AnalyticsHelper SAMPLER = "sampler" SUBSCRIBER = "subscriber" class_attribute :backend self.backend = AnalyticsRuby def initialize(user) @user = user end def track_updated backend.identify(user_id: user.id, traits: identify_hash(user)) end def track_video_finished(name:, watchable_name:) track("Finished video", name: name, watchable_name: watchable_name) end def track_video_started(name:, watchable_name:) track("Started video", name: name, watchable_name: watchable_name) track_touched_video(name: name, watchable_name: watchable_name) end def track_searched(query:, results_count:) track("Searched", query: query, results_count: results_count) end def track_collaborated(repository_name:) track("Created Collaboration", repository_name: repository_name) end def track_accessed_forum track("Logged into Forum") end def track_cancelled(reason:) track("Cancelled", reason: reason) end def track_flashcard_attempted(deck:, title:) track("Flashcard Attempted", deck: deck, title: title) end def track_downloaded(name:, watchable_name:, download_type:) track( "Downloaded Video", name: name, watchable_name: watchable_name, download_type: download_type, ) track_touched_video(name: name, watchable_name: watchable_name) end def track_replied_to_beta_offer(name:, accepted:) track( "Replied to beta offer", name: name, accepted: accepted, ) end def track_authed_to_access(video_name:, watchable_name:) track( "Authed to Access", video_name: video_name, watchable_name: watchable_name, ) end private attr_reader :user def user_type(user) if user.subscriber? SUBSCRIBER else SAMPLER end end def track_touched_video(name:, watchable_name:) track("Touched Video", name: name, watchable_name: watchable_name) end def track(event, properties = {}) backend.track( event: event, user_id: user.id, properties: properties.merge( email: user.email, user_type: user_type(user), ), ) end end
1
16,756
Not necessarily related to this PR, but don't you think having a centralized place for all interactions with analytics might make this into a very big class? Is that something that you don't worry about until it happens?
thoughtbot-upcase
rb
@@ -670,6 +670,13 @@ func (w *Workflow) runStep(ctx context.Context, s *Step) DError { select { case err := <-e: return err + case <-ctx.Done(): + if err := ctx.Err(); err == context.DeadlineExceeded { + return s.getTimeoutError() + } else if err != nil { + return Errf("step %q error: %s", s.name, err) + } + return nil case <-timeout: return s.getTimeoutError() }
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package daisy describes a daisy workflow. package daisy import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "os" "path" "path/filepath" "reflect" "strconv" "strings" "sync" "time" "cloud.google.com/go/logging" "cloud.google.com/go/storage" "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "google.golang.org/api/iterator" "google.golang.org/api/option" ) const defaultTimeout = "10m" func daisyBkt(ctx context.Context, client *storage.Client, project string) (string, DError) { dBkt := strings.Replace(project, ":", "-", -1) + "-daisy-bkt" it := client.Buckets(ctx, project) for bucketAttrs, err := it.Next(); err != iterator.Done; bucketAttrs, err = it.Next() { if err != nil { return "", typedErr(apiError, "failed to iterate buckets", err) } if bucketAttrs.Name == dBkt { return dBkt, nil } } if err := client.Bucket(dBkt).Create(ctx, project, nil); err != nil { return "", typedErr(apiError, "failed to create bucket", err) } return dBkt, nil } // TimeRecord is a type with info of a step execution time type TimeRecord struct { Name string StartTime time.Time EndTime time.Time } // Var is a type with a flexible JSON representation. A Var can be represented // by either a string, or by this struct definition. A Var that is represented // by a string will unmarshal into the struct: {Value: <string>, Required: false, Description: ""}. type Var struct { Value string Required bool `json:",omitempty"` Description string `json:",omitempty"` } // UnmarshalJSON unmarshals a Var. func (v *Var) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err == nil { v.Value = s return nil } // We can't unmarshal into Var directly as it would create an infinite loop. type aVar Var return json.Unmarshal(b, &struct{ *aVar }{aVar: (*aVar)(v)}) } // Workflow is a single Daisy workflow workflow. type Workflow struct { // Populated on New() construction. Cancel chan struct{} `json:"-"` // Workflow template fields. // Workflow name. Name string `json:",omitempty"` // Project to run in. Project string `json:",omitempty"` // Zone to run in. Zone string `json:",omitempty"` // GCS Path to use for scratch data and write logs/results to. GCSPath string `json:",omitempty"` // Path to OAuth credentials file. OAuthPath string `json:",omitempty"` // Sources used by this workflow, map of destination to source. Sources map[string]string `json:",omitempty"` // Vars defines workflow variables, substitution is done at Workflow run time. Vars map[string]Var `json:",omitempty"` Steps map[string]*Step `json:",omitempty"` // Map of steps to their dependencies. Dependencies map[string][]string `json:",omitempty"` // Default timout for each step, defaults to 10m. // Must be parsable by https://golang.org/pkg/time/#ParseDuration. DefaultTimeout string `json:",omitempty"` defaultTimeout time.Duration // Working fields. autovars map[string]string workflowDir string parent *Workflow bucket string scratchPath string sourcesPath string logsPath string outsPath string username string externalLogging bool gcsLoggingDisabled bool cloudLoggingDisabled bool stdoutLoggingDisabled bool id string Logger Logger `json:"-"` cleanupHooks []func() DError cleanupHooksMx sync.Mutex recordTimeMx sync.Mutex stepWait sync.WaitGroup logProcessHook func(string) string // Optional compute endpoint override.stepWait ComputeEndpoint string `json:",omitempty"` ComputeClient compute.Client `json:"-"` StorageClient *storage.Client `json:"-"` cloudLoggingClient *logging.Client // Resource registries. disks *diskRegistry forwardingRules *forwardingRuleRegistry firewallRules *firewallRuleRegistry images *imageRegistry machineImages *machineImageRegistry instances *instanceRegistry networks *networkRegistry subnetworks *subnetworkRegistry targetInstances *targetInstanceRegistry objects *objectRegistry snapshots *snapshotRegistry // Cache of resources machineTypeCache twoDResourceCache instanceCache twoDResourceCache diskCache twoDResourceCache subnetworkCache twoDResourceCache targetInstanceCache twoDResourceCache forwardingRuleCache twoDResourceCache imageCache oneDResourceCache imageFamilyCache oneDResourceCache machineImageCache oneDResourceCache networkCache oneDResourceCache firewallRuleCache oneDResourceCache zonesCache oneDResourceCache regionsCache oneDResourceCache licenseCache oneDResourceCache snapshotCache oneDResourceCache stepTimeRecords []TimeRecord serialControlOutputValues map[string]string serialControlOutputValuesMx sync.Mutex //Forces cleanup on error of all resources, including those marked with NoCleanup ForceCleanupOnError bool // forceCleanup is set to true when resources should be forced clean, even when NoCleanup is set to true forceCleanup bool // cancelReason provides custom reason when workflow is canceled. f cancelReason string } //DisableCloudLogging disables logging to Cloud Logging for this workflow. func (w *Workflow) DisableCloudLogging() { w.cloudLoggingDisabled = true } //DisableGCSLogging disables logging to GCS for this workflow. func (w *Workflow) DisableGCSLogging() { w.gcsLoggingDisabled = true } //DisableStdoutLogging disables logging to stdout for this workflow. func (w *Workflow) DisableStdoutLogging() { w.stdoutLoggingDisabled = true } // AddVar adds a variable set to the Workflow. func (w *Workflow) AddVar(k, v string) { if w.Vars == nil { w.Vars = map[string]Var{} } w.Vars[k] = Var{Value: v} } // AddSerialConsoleOutputValue adds an serial-output key-value pair to the Workflow. func (w *Workflow) AddSerialConsoleOutputValue(k, v string) { w.serialControlOutputValuesMx.Lock() if w.serialControlOutputValues == nil { w.serialControlOutputValues = map[string]string{} } w.serialControlOutputValues[k] = v w.serialControlOutputValuesMx.Unlock() } // GetSerialConsoleOutputValue gets an serial-output value by key. func (w *Workflow) GetSerialConsoleOutputValue(k string) string { return w.serialControlOutputValues[k] } func (w *Workflow) addCleanupHook(hook func() DError) { w.cleanupHooksMx.Lock() w.cleanupHooks = append(w.cleanupHooks, hook) w.cleanupHooksMx.Unlock() } // SetLogProcessHook sets a hook function to process log string func (w *Workflow) SetLogProcessHook(hook func(string) string) { w.logProcessHook = hook } // Validate runs validation on the workflow. func (w *Workflow) Validate(ctx context.Context) DError { if err := w.PopulateClients(ctx); err != nil { close(w.Cancel) return Errf("error populating workflow: %v", err) } if err := w.validateRequiredFields(); err != nil { close(w.Cancel) return Errf("error validating workflow: %v", err) } if err := w.populate(ctx); err != nil { close(w.Cancel) return Errf("error populating workflow: %v", err) } w.LogWorkflowInfo("Validating workflow") if err := w.validate(ctx); err != nil { w.LogWorkflowInfo("Error validating workflow: %v", err) close(w.Cancel) return err } w.LogWorkflowInfo("Validation Complete") return nil } // WorkflowModifier is a function type for functions that can modify a Workflow object. type WorkflowModifier func(*Workflow) // Run runs a workflow. func (w *Workflow) Run(ctx context.Context) error { return w.RunWithModifiers(ctx, nil, nil) } // RunWithModifiers runs a workflow with the ability to modify it before and/or after validation. func (w *Workflow) RunWithModifiers( ctx context.Context, preValidateWorkflowModifier WorkflowModifier, postValidateWorkflowModifier WorkflowModifier) (err DError) { w.externalLogging = true if preValidateWorkflowModifier != nil { preValidateWorkflowModifier(w) } if err = w.Validate(ctx); err != nil { return err } if postValidateWorkflowModifier != nil { postValidateWorkflowModifier(w) } defer w.cleanup() defer func() { if err != nil { w.forceCleanup = w.ForceCleanupOnError } }() w.LogWorkflowInfo("Workflow Project: %s", w.Project) w.LogWorkflowInfo("Workflow Zone: %s", w.Zone) w.LogWorkflowInfo("Workflow GCSPath: %s", w.GCSPath) w.LogWorkflowInfo("Daisy scratch path: https://console.cloud.google.com/storage/browser/%s", path.Join(w.bucket, w.scratchPath)) w.LogWorkflowInfo("Uploading sources") if err = w.uploadSources(ctx); err != nil { w.LogWorkflowInfo("Error uploading sources: %v", err) close(w.Cancel) return err } w.LogWorkflowInfo("Running workflow") defer func() { for k, v := range w.serialControlOutputValues { w.LogWorkflowInfo("Serial-output value -> %v:%v", k, v) } }() if err = w.run(ctx); err != nil { w.LogWorkflowInfo("Error running workflow: %v", err) return err } return nil } func (w *Workflow) recordStepTime(stepName string, startTime time.Time, endTime time.Time) { if w.parent == nil { w.recordTimeMx.Lock() w.stepTimeRecords = append(w.stepTimeRecords, TimeRecord{stepName, startTime, endTime}) w.recordTimeMx.Unlock() } else { w.parent.recordStepTime(fmt.Sprintf("%s.%s", w.Name, stepName), startTime, endTime) } } // GetStepTimeRecords returns time records of each steps func (w *Workflow) GetStepTimeRecords() []TimeRecord { return w.stepTimeRecords } func (w *Workflow) cleanup() { startTime := time.Now() w.LogWorkflowInfo("Workflow %q cleaning up (this may take up to 2 minutes).", w.Name) select { case <-w.Cancel: default: close(w.Cancel) } // Allow goroutines that are watching w.Cancel an opportunity // to detect that the workflow was cancelled and to cleanup. c := make(chan struct{}) go func() { w.stepWait.Wait() close(c) }() select { case <-c: case <-time.After(4 * time.Second): } for _, hook := range w.cleanupHooks { if err := hook(); err != nil { w.LogWorkflowInfo("Error returned from cleanup hook: %s", err) } } w.LogWorkflowInfo("Workflow %q finished cleanup.", w.Name) w.recordStepTime("workflow cleanup", startTime, time.Now()) } func (w *Workflow) genName(n string) string { name := w.Name for parent := w.parent; parent != nil; parent = parent.parent { name = parent.Name + "-" + name } prefix := name if n != "" { prefix = fmt.Sprintf("%s-%s", n, name) } if len(prefix) > 57 { prefix = prefix[0:56] } result := fmt.Sprintf("%s-%s", prefix, w.id) if len(result) > 64 { result = result[0:63] } return strings.ToLower(result) } func (w *Workflow) getSourceGCSAPIPath(s string) string { return fmt.Sprintf("%s/%s", gcsAPIBase, path.Join(w.bucket, w.sourcesPath, s)) } // PopulateClients populates the compute and storage clients for the workflow. func (w *Workflow) PopulateClients(ctx context.Context) error { // API clients instantiation. var err error computeOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)} if w.ComputeEndpoint != "" { computeOptions = append(computeOptions, option.WithEndpoint(w.ComputeEndpoint)) } if w.ComputeClient == nil { w.ComputeClient, err = compute.NewClient(ctx, computeOptions...) if err != nil { return typedErr(apiError, "failed to create compute client", err) } } storageOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)} if w.StorageClient == nil { w.StorageClient, err = storage.NewClient(ctx, storageOptions...) if err != nil { return err } } loggingOptions := []option.ClientOption{option.WithCredentialsFile(w.OAuthPath)} if w.externalLogging && w.cloudLoggingClient == nil { w.cloudLoggingClient, err = logging.NewClient(ctx, w.Project, loggingOptions...) if err != nil { return err } } return nil } func (w *Workflow) populateStep(ctx context.Context, s *Step) DError { if s.Timeout == "" { s.Timeout = w.DefaultTimeout } timeout, err := time.ParseDuration(s.Timeout) if err != nil { return newErr(fmt.Sprintf("failed to parse duration for workflow %v, step %v", w.Name, s.name), err) } s.timeout = timeout var derr DError var step stepImpl if step, derr = s.stepImpl(); derr != nil { return derr } return step.populate(ctx, s) } // populate does the following: // - checks that all required Vars are set. // - instantiates API clients, if needed. // - sets generic autovars and do first round of var substitution. // - sets GCS path information. // - generates autovars from workflow fields (Name, Zone, etc) and run second round of var substitution. // - sets up logger. // - runs populate on each step. func (w *Workflow) populate(ctx context.Context) DError { for k, v := range w.Vars { if v.Required && v.Value == "" { return Errf("cannot populate workflow, required var %q is unset", k) } } // Set some generic autovars and run first round of var substitution. cwd, _ := os.Getwd() now := time.Now().UTC() w.username = getUser() w.autovars = map[string]string{ "ID": w.id, "DATE": now.Format("20060102"), "DATETIME": now.Format("20060102150405"), "TIMESTAMP": strconv.FormatInt(now.Unix(), 10), "USERNAME": w.username, "WFDIR": w.workflowDir, "CWD": cwd, } var replacements []string for k, v := range w.autovars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } for k, v := range w.Vars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value) } substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...)) // Parse timeout. timeout, err := time.ParseDuration(w.DefaultTimeout) if err != nil { return Errf("failed to parse timeout for workflow: %v", err) } w.defaultTimeout = timeout // Set up GCS paths. if w.GCSPath == "" { dBkt, err := daisyBkt(ctx, w.StorageClient, w.Project) if err != nil { return err } w.GCSPath = "gs://" + dBkt } bkt, p, derr := splitGCSPath(w.GCSPath) if err != nil { return derr } w.bucket = bkt w.scratchPath = path.Join(p, fmt.Sprintf("daisy-%s-%s-%s", w.Name, now.Format("20060102-15:04:05"), w.id)) w.sourcesPath = path.Join(w.scratchPath, "sources") w.logsPath = path.Join(w.scratchPath, "logs") w.outsPath = path.Join(w.scratchPath, "outs") // Generate more autovars from workflow fields. Run second round of var substitution. w.autovars["NAME"] = w.Name w.autovars["FULLNAME"] = w.genName("") w.autovars["ZONE"] = w.Zone w.autovars["PROJECT"] = w.Project w.autovars["GCSPATH"] = w.GCSPath w.autovars["SCRATCHPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.scratchPath) w.autovars["SOURCESPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.sourcesPath) w.autovars["LOGSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.logsPath) w.autovars["OUTSPATH"] = fmt.Sprintf("gs://%s/%s", w.bucket, w.outsPath) replacements = []string{} for k, v := range w.autovars { replacements = append(replacements, fmt.Sprintf("${%s}", k), v) } substitute(reflect.ValueOf(w).Elem(), strings.NewReplacer(replacements...)) // We do this here, and not in validate, as embedded startup scripts could // have what we think are daisy variables. if err := w.validateVarsSubbed(); err != nil { return err } if err := w.substituteSourceVars(ctx, reflect.ValueOf(w).Elem()); err != nil { return err } if w.Logger == nil { w.createLogger(ctx) } // Run populate on each step. for name, s := range w.Steps { s.name = name s.w = w if err := w.populateStep(ctx, s); err != nil { return Errf("error populating step %q: %v", name, err) } } return nil } // AddDependency creates a dependency of dependent on each dependency. Returns an // error if dependent or dependency are not steps in this workflow. func (w *Workflow) AddDependency(dependent *Step, dependencies ...*Step) error { if _, ok := w.Steps[dependent.name]; !ok { return fmt.Errorf("can't create dependency: step %q does not exist", dependent.name) } if w.Dependencies == nil { w.Dependencies = map[string][]string{} } for _, dependency := range dependencies { if _, ok := w.Steps[dependency.name]; !ok { return fmt.Errorf("can't create dependency: step %q does not exist", dependency.name) } if !strIn(dependency.name, w.Dependencies[dependent.name]) { // Don't add if dependency already exists. w.Dependencies[dependent.name] = append(w.Dependencies[dependent.name], dependency.name) } } return nil } func (w *Workflow) includeWorkflow(iw *Workflow) { iw.Cancel = w.Cancel iw.parent = w iw.disks = w.disks iw.forwardingRules = w.forwardingRules iw.firewallRules = w.firewallRules iw.images = w.images iw.machineImages = w.machineImages iw.instances = w.instances iw.networks = w.networks iw.subnetworks = w.subnetworks iw.targetInstances = w.targetInstances iw.snapshots = w.snapshots iw.objects = w.objects } // ID is the unique identifyier for this Workflow. func (w *Workflow) ID() string { return w.id } // NewIncludedWorkflowFromFile reads and unmarshals a workflow with the same resources as the parent. func (w *Workflow) NewIncludedWorkflowFromFile(file string) (*Workflow, error) { iw := New() w.includeWorkflow(iw) if !filepath.IsAbs(file) { file = filepath.Join(w.workflowDir, file) } if err := readWorkflow(file, iw); err != nil { return nil, err } return iw, nil } // NewStep instantiates a new, typeless step for this workflow. // The step type must be specified before running this workflow. func (w *Workflow) NewStep(name string) (*Step, error) { if _, ok := w.Steps[name]; ok { return nil, fmt.Errorf("can't create step %q: a step already exists with that name", name) } s := &Step{name: name, w: w} if w.Steps == nil { w.Steps = map[string]*Step{} } w.Steps[name] = s return s, nil } // NewSubWorkflow instantiates a new workflow as a child to this workflow. func (w *Workflow) NewSubWorkflow() *Workflow { sw := New() sw.Cancel = w.Cancel sw.parent = w return sw } // NewSubWorkflowFromFile reads and unmarshals a workflow as a child to this workflow. func (w *Workflow) NewSubWorkflowFromFile(file string) (*Workflow, error) { sw := w.NewSubWorkflow() if !filepath.IsAbs(file) { file = filepath.Join(w.workflowDir, file) } if err := readWorkflow(file, sw); err != nil { return nil, err } return sw, nil } // Print populates then pretty prints the workflow. func (w *Workflow) Print(ctx context.Context) { w.externalLogging = false if err := w.PopulateClients(ctx); err != nil { fmt.Println("Error running PopulateClients:", err) } if err := w.populate(ctx); err != nil { fmt.Println("Error running populate:", err) } b, err := json.MarshalIndent(w, "", " ") if err != nil { fmt.Println("Error marshalling workflow for printing:", err) } fmt.Println(string(b)) } func (w *Workflow) run(ctx context.Context) DError { return w.traverseDAG(func(s *Step) DError { return w.runStep(ctx, s) }) } func (w *Workflow) runStep(ctx context.Context, s *Step) DError { timeout := make(chan struct{}) go func() { time.Sleep(s.timeout) close(timeout) }() e := make(chan DError) go func() { e <- s.run(ctx) }() select { case err := <-e: return err case <-timeout: return s.getTimeoutError() } } // Concurrently traverse the DAG, running func f on each step. // Return an error if f returns an error on any step. func (w *Workflow) traverseDAG(f func(*Step) DError) DError { // waiting = steps and the dependencies they are waiting for. // running = the currently running steps. // start = map of steps' start channels/semaphores. // done = map of steps' done channels for signaling step completion. waiting := map[string][]string{} var running []string start := map[string]chan DError{} done := map[string]chan DError{} // Setup: channels, copy dependencies. for name := range w.Steps { waiting[name] = w.Dependencies[name] start[name] = make(chan DError) done[name] = make(chan DError) } // Setup: goroutine for each step. Each waits to be notified to start. for name, s := range w.Steps { go func(name string, s *Step) { // Wait for signal, then run the function. Return any errs. if err := <-start[name]; err != nil { done[name] <- err } else if err := f(s); err != nil { done[name] <- err } close(done[name]) }(name, s) } // Main signaling logic. for len(waiting) != 0 || len(running) != 0 { // If we got a Cancel signal, kill all waiting steps. // Let running steps finish. select { case <-w.Cancel: waiting = map[string][]string{} default: } // Kick off all steps that aren't waiting for anything. for name, deps := range waiting { if len(deps) == 0 { delete(waiting, name) running = append(running, name) close(start[name]) } } // Sanity check. There should be at least one running step, // but loop back through if there isn't. if len(running) == 0 { continue } // Get next finished step. Return the step error if it erred. finished, err := stepsListen(running, done) if err != nil { return err } // Remove finished step from other steps' waiting lists. for name, deps := range waiting { waiting[name] = filter(deps, finished) } // Remove finished from currently running list. running = filter(running, finished) } return nil } func (w *Workflow) isCanceled() bool { select { case <-w.Cancel: return true default: return false } } // New instantiates a new workflow. func New() *Workflow { // We can't use context.WithCancel as we use the context even after cancel for cleanup. w := &Workflow{Cancel: make(chan struct{})} // Init nil'ed fields w.Sources = map[string]string{} w.Vars = map[string]Var{} w.Steps = map[string]*Step{} w.Dependencies = map[string][]string{} w.DefaultTimeout = defaultTimeout w.autovars = map[string]string{} // Resource registries and cleanup. w.disks = newDiskRegistry(w) w.forwardingRules = newForwardingRuleRegistry(w) w.firewallRules = newFirewallRuleRegistry(w) w.images = newImageRegistry(w) w.machineImages = newMachineImageRegistry(w) w.instances = newInstanceRegistry(w) w.networks = newNetworkRegistry(w) w.subnetworks = newSubnetworkRegistry(w) w.objects = newObjectRegistry(w) w.targetInstances = newTargetInstanceRegistry(w) w.snapshots = newSnapshotRegistry(w) w.addCleanupHook(func() DError { w.instances.cleanup() // instances need to be done before disks/networks w.images.cleanup() w.machineImages.cleanup() w.disks.cleanup() w.forwardingRules.cleanup() w.targetInstances.cleanup() w.firewallRules.cleanup() w.subnetworks.cleanup() w.networks.cleanup() w.snapshots.cleanup() return nil }) w.id = randString(5) return w } // NewFromFile reads and unmarshals a workflow file. // Recursively reads subworkflow steps as well. func NewFromFile(file string) (*Workflow, error) { w := New() if err := readWorkflow(file, w); err != nil { return nil, err } return w, nil } // JSONError turns an error from json.Unmarshal and returns a more user // friendly error. func JSONError(file string, data []byte, err error) error { // If this is a syntax error return a useful error. sErr, ok := err.(*json.SyntaxError) if !ok { return err } // Byte number where the error line starts. start := bytes.LastIndex(data[:sErr.Offset], []byte("\n")) + 1 // Assume end byte of error line is EOF unless this isn't the last line. end := len(data) if i := bytes.Index(data[start:], []byte("\n")); i >= 0 { end = start + i } // Line number of error. line := bytes.Count(data[:start], []byte("\n")) + 1 // Position of error in line (where to place the '^'). pos := int(sErr.Offset) - start if pos != 0 { pos = pos - 1 } return fmt.Errorf("%s: JSON syntax error in line %d: %s \n%s\n%s^", file, line, err, data[start:end], strings.Repeat(" ", pos)) } func readWorkflow(file string, w *Workflow) DError { data, err := ioutil.ReadFile(file) if err != nil { return newErr("failed to read workflow file", err) } w.workflowDir, err = filepath.Abs(filepath.Dir(file)) if err != nil { return newErr("failed to get absolute path of workflow file", err) } if err := json.Unmarshal(data, &w); err != nil { return newErr("failed to unmarshal workflow file", JSONError(file, data, err)) } if w.OAuthPath != "" && !filepath.IsAbs(w.OAuthPath) { w.OAuthPath = filepath.Join(w.workflowDir, w.OAuthPath) } for name, s := range w.Steps { s.name = name s.w = w } return nil } // stepsListen returns the first step that finishes/errs. func stepsListen(names []string, chans map[string]chan DError) (string, DError) { cases := make([]reflect.SelectCase, len(names)) for i, name := range names { cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(chans[name])} } caseIndex, value, recvOk := reflect.Select(cases) name := names[caseIndex] if recvOk { // recvOk -> a step failed, return the error. return name, value.Interface().(DError) } return name, nil } // IterateWorkflowSteps iterates over all workflow steps, including included // workflow steps, and calls cb callback function func (w *Workflow) IterateWorkflowSteps(cb func(step *Step)) { for _, step := range w.Steps { if step.IncludeWorkflow != nil { //recurse into included workflow step.IncludeWorkflow.Workflow.IterateWorkflowSteps(cb) } cb(step) } } // CancelWithReason cancels workflow with a specific reason. The specific reason replaces "is canceled" in the default error message. func (w *Workflow) CancelWithReason(reason string) { w.cancelReason = reason close(w.Cancel) } func (w *Workflow) getCancelReason() string { cancelReason := w.cancelReason for wi := w; cancelReason == "" && wi != nil; wi = wi.parent { cancelReason = wi.cancelReason } return cancelReason } func (w *Workflow) onStepCancel(s *Step, stepClass string) DError { if s == nil { return nil } cancelReason := w.getCancelReason() if cancelReason == "" { cancelReason = "is canceled" } errorMessageFormat := "Step %q (%s) " + cancelReason + "." s.w.LogWorkflowInfo(errorMessageFormat, s.name, stepClass) return Errf(errorMessageFormat, s.name, stepClass) }
1
11,514
it seems that my terminal is dead when I press ctrl+c during running daisy cli today, is this PR fixing that?
GoogleCloudPlatform-compute-image-tools
go
@@ -403,7 +403,7 @@ func (nc *Config) Build(ctx context.Context) (*Node, error) { // only the syncer gets the storage which is online connected chainSyncer := chain.NewDefaultSyncer(&cstOffline, nodeConsensus, chainStore, fetcher) - msgPool := core.NewMessagePool(chainStore, consensus.NewIngestionValidator(chainStore)) + msgPool := core.NewMessagePool(chainStore, nc.Repo.Config().Mpool, consensus.NewIngestionValidator(chainStore)) outbox := core.NewMessageQueue() // Set up libp2p pubsub
1
package node import ( "context" "encoding/json" "fmt" "os" "sync" "time" "github.com/ipfs/go-bitswap" bsnet "github.com/ipfs/go-bitswap/network" bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-hamt-ipld" bstore "github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-exchange-interface" "github.com/ipfs/go-ipfs-exchange-offline" offroute "github.com/ipfs/go-ipfs-routing/offline" logging "github.com/ipfs/go-log" "github.com/ipfs/go-merkledag" "github.com/libp2p/go-libp2p" autonatsvc "github.com/libp2p/go-libp2p-autonat-svc" circuit "github.com/libp2p/go-libp2p-circuit" "github.com/libp2p/go-libp2p-host" "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p-kad-dht/opts" p2pmetrics "github.com/libp2p/go-libp2p-metrics" libp2ppeer "github.com/libp2p/go-libp2p-peer" dhtprotocol "github.com/libp2p/go-libp2p-protocol" libp2pps "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p-routing" rhost "github.com/libp2p/go-libp2p/p2p/host/routed" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/abi" "github.com/filecoin-project/go-filecoin/actor/builtin" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/chain" "github.com/filecoin-project/go-filecoin/config" "github.com/filecoin-project/go-filecoin/consensus" "github.com/filecoin-project/go-filecoin/core" "github.com/filecoin-project/go-filecoin/flags" "github.com/filecoin-project/go-filecoin/metrics" "github.com/filecoin-project/go-filecoin/mining" "github.com/filecoin-project/go-filecoin/net" "github.com/filecoin-project/go-filecoin/net/pubsub" "github.com/filecoin-project/go-filecoin/plumbing" "github.com/filecoin-project/go-filecoin/plumbing/cfg" "github.com/filecoin-project/go-filecoin/plumbing/dag" "github.com/filecoin-project/go-filecoin/plumbing/msg" "github.com/filecoin-project/go-filecoin/plumbing/mthdsig" "github.com/filecoin-project/go-filecoin/plumbing/strgdls" "github.com/filecoin-project/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/proofs" "github.com/filecoin-project/go-filecoin/proofs/sectorbuilder" "github.com/filecoin-project/go-filecoin/protocol/block" "github.com/filecoin-project/go-filecoin/protocol/hello" "github.com/filecoin-project/go-filecoin/protocol/retrieval" "github.com/filecoin-project/go-filecoin/protocol/storage" "github.com/filecoin-project/go-filecoin/repo" "github.com/filecoin-project/go-filecoin/sampling" "github.com/filecoin-project/go-filecoin/state" "github.com/filecoin-project/go-filecoin/types" "github.com/filecoin-project/go-filecoin/wallet" ) const ( filecoinDHTProtocol dhtprotocol.ID = "/fil/kad/1.0.0" ) var log = logging.Logger("node") // nolint: deadcode var ( // ErrNoMinerAddress is returned when the node is not configured to have any miner addresses. ErrNoMinerAddress = errors.New("no miner addresses configured") ) type pubSubProcessorFunc func(ctx context.Context, msg pubsub.Message) error // Node represents a full Filecoin node. type Node struct { host host.Host PeerHost host.Host Consensus consensus.Protocol ChainReader chain.ReadStore Syncer chain.Syncer PowerTable consensus.PowerTableView BlockMiningAPI *block.MiningAPI PorcelainAPI *porcelain.API RetrievalAPI *retrieval.API StorageAPI *storage.API // HeavyTipSetCh is a subscription to the heaviest tipset topic on the chain. HeaviestTipSetCh chan interface{} // HeavyTipSetHandled is a hook for tests because pubsub notifications // arrive async. It's called after handling a new heaviest tipset. // Remove this after replacing the tipset "pubsub" with a synchronous event bus: // https://github.com/filecoin-project/go-filecoin/issues/2309 HeaviestTipSetHandled func() // Incoming messages for block mining. MsgPool *core.MessagePool // Messages sent and not yet mined. Outbox *core.MessageQueue Wallet *wallet.Wallet // Mining stuff. AddNewlyMinedBlock newBlockFunc blockTime time.Duration cancelMining context.CancelFunc GetAncestorsFunc mining.GetAncestors GetStateTreeFunc mining.GetStateTree GetWeightFunc mining.GetWeight MiningWorker mining.Worker MiningScheduler mining.Scheduler mining struct { sync.Mutex isMining bool } miningCtx context.Context miningDoneWg *sync.WaitGroup // Storage Market Interfaces StorageMiner *storage.Miner // Retrieval Interfaces RetrievalMiner *retrieval.Miner // Network Fields BlockSub pubsub.Subscription MessageSub pubsub.Subscription HelloSvc *hello.Handler Bootstrapper *net.Bootstrapper // Data Storage Fields // Repo is the repo this node was created with // it contains all persistent artifacts of the filecoin node Repo repo.Repo // SectorBuilder is used by the miner to fill and seal sectors. sectorBuilder sectorbuilder.SectorBuilder // Fetcher is the interface for fetching data from nodes. Fetcher *net.Fetcher // Exchange is the interface for fetching data from other nodes. Exchange exchange.Interface // Blockstore is the un-networked blocks interface Blockstore bstore.Blockstore // Blockservice is a higher level interface for fetching data blockservice bserv.BlockService // CborStore is a temporary interface for interacting with IPLD objects. cborStore *hamt.CborIpldStore // cancelSubscriptionsCtx is a handle to cancel the block and message subscriptions. cancelSubscriptionsCtx context.CancelFunc // OfflineMode, when true, disables libp2p OfflineMode bool // Router is a router from IPFS Router routing.IpfsRouting } // Config is a helper to aid in the construction of a filecoin node. type Config struct { BlockTime time.Duration Libp2pOpts []libp2p.Option OfflineMode bool Verifier proofs.Verifier Rewarder consensus.BlockRewarder Repo repo.Repo IsRelay bool } // ConfigOpt is a configuration option for a filecoin node. type ConfigOpt func(*Config) error // OfflineMode enables or disables offline mode. func OfflineMode(offlineMode bool) ConfigOpt { return func(c *Config) error { c.OfflineMode = offlineMode return nil } } // IsRelay configures node to act as a libp2p relay. func IsRelay() ConfigOpt { return func(c *Config) error { c.IsRelay = true return nil } } // BlockTime sets the blockTime. func BlockTime(blockTime time.Duration) ConfigOpt { return func(c *Config) error { c.BlockTime = blockTime return nil } } // Libp2pOptions returns a node config option that sets up the libp2p node func Libp2pOptions(opts ...libp2p.Option) ConfigOpt { return func(nc *Config) error { // Quietly having your options overridden leads to hair loss if len(nc.Libp2pOpts) > 0 { panic("Libp2pOptions can only be called once") } nc.Libp2pOpts = opts return nil } } // VerifierConfigOption returns a function that sets the verifier to use in the node consensus func VerifierConfigOption(verifier proofs.Verifier) ConfigOpt { return func(c *Config) error { c.Verifier = verifier return nil } } // RewarderConfigOption returns a function that sets the rewarder to use in the node consensus func RewarderConfigOption(rewarder consensus.BlockRewarder) ConfigOpt { return func(c *Config) error { c.Rewarder = rewarder return nil } } // New creates a new node. func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) { n := &Config{} for _, o := range opts { if err := o(n); err != nil { return nil, err } } return n.Build(ctx) } type blankValidator struct{} func (blankValidator) Validate(_ string, _ []byte) error { return nil } func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil } // readGenesisCid is a helper function that queries the provided datastore for // an entry with the genesisKey cid, returning if found. func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) { bb, err := ds.Get(chain.GenesisKey) if err != nil { return cid.Undef, errors.Wrap(err, "failed to read genesisKey") } var c cid.Cid err = json.Unmarshal(bb, &c) if err != nil { return cid.Undef, errors.Wrap(err, "failed to cast genesisCid") } return c, nil } // buildHost determines if we are publically dialable. If so use public // Address, if not configure node to announce relay address. func (nc *Config) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.IpfsRouting, error)) (host.Host, error) { // Node must build a host acting as a libp2p relay. Additionally it // runs the autoNAT service which allows other nodes to check for their // own dialability by having this node attempt to dial them. makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) { return makeDHT(h) } if nc.IsRelay { cfg := nc.Repo.Config() publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress) if err != nil { return nil, err } publicAddrFactory := func(lc *libp2p.Config) error { lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { if cfg.Swarm.PublicRelayAddress == "" { return addrs } return append(addrs, publicAddr) } return nil } relayHost, err := libp2p.New( ctx, libp2p.EnableRelay(circuit.OptHop), libp2p.EnableAutoRelay(), libp2p.Routing(makeDHTRightType), publicAddrFactory, libp2p.ChainOptions(nc.Libp2pOpts...), ) if err != nil { return nil, err } // Set up autoNATService as a streamhandler on the host. _, err = autonatsvc.NewAutoNATService(ctx, relayHost) if err != nil { return nil, err } return relayHost, nil } return libp2p.New( ctx, libp2p.EnableAutoRelay(), libp2p.Routing(makeDHTRightType), libp2p.ChainOptions(nc.Libp2pOpts...), ) } // Build instantiates a filecoin Node from the settings specified in the config. func (nc *Config) Build(ctx context.Context) (*Node, error) { if nc.Repo == nil { nc.Repo = repo.NewInMemoryRepo() } bs := bstore.NewBlockstore(nc.Repo.Datastore()) validator := blankValidator{} var peerHost host.Host var router routing.IpfsRouting bandwidthTracker := p2pmetrics.NewBandwidthCounter() nc.Libp2pOpts = append(nc.Libp2pOpts, libp2p.BandwidthReporter(bandwidthTracker)) if !nc.OfflineMode { makeDHT := func(h host.Host) (routing.IpfsRouting, error) { r, err := dht.New( ctx, h, dhtopts.Datastore(nc.Repo.Datastore()), dhtopts.NamespacedValidator("v", validator), dhtopts.Protocols(filecoinDHTProtocol), ) if err != nil { return nil, errors.Wrap(err, "failed to setup routing") } router = r return r, err } var err error peerHost, err = nc.buildHost(ctx, makeDHT) if err != nil { return nil, err } } else { router = offroute.NewOfflineRouter(nc.Repo.Datastore(), validator) peerHost = rhost.Wrap(noopLibP2PHost{}, router) } // set up pinger pinger := ping.NewPingService(peerHost) // set up bitswap nwork := bsnet.NewFromIpfsHost(peerHost, router) //nwork := bsnet.NewFromIpfsHost(innerHost, router) bswap := bitswap.New(ctx, nwork, bs) bservice := bserv.New(bs, bswap) fetcher := net.NewFetcher(ctx, bservice) cstOffline := hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))} genCid, err := readGenesisCid(nc.Repo.Datastore()) if err != nil { return nil, err } // set up chainstore chainStore := chain.NewDefaultStore(nc.Repo.ChainDatastore(), &cstOffline, genCid) powerTable := &consensus.MarketView{} // set up processor var processor consensus.Processor if nc.Rewarder == nil { processor = consensus.NewDefaultProcessor() } else { processor = consensus.NewConfiguredProcessor(consensus.NewDefaultMessageValidator(), nc.Rewarder) } // set up consensus var nodeConsensus consensus.Protocol if nc.Verifier == nil { nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, &proofs.RustVerifier{}) } else { nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, nc.Verifier) } // only the syncer gets the storage which is online connected chainSyncer := chain.NewDefaultSyncer(&cstOffline, nodeConsensus, chainStore, fetcher) msgPool := core.NewMessagePool(chainStore, consensus.NewIngestionValidator(chainStore)) outbox := core.NewMessageQueue() // Set up libp2p pubsub fsub, err := libp2pps.NewFloodSub(ctx, peerHost) if err != nil { return nil, errors.Wrap(err, "failed to set up pubsub") } backend, err := wallet.NewDSBackend(nc.Repo.WalletDatastore()) if err != nil { return nil, errors.Wrap(err, "failed to set up wallet backend") } fcWallet := wallet.New(backend) PorcelainAPI := porcelain.New(plumbing.New(&plumbing.APIDeps{ Bitswap: bswap, Chain: chainStore, Config: cfg.NewConfig(nc.Repo), DAG: dag.NewDAG(merkledag.NewDAGService(bservice)), Deals: strgdls.New(nc.Repo.DealsDatastore()), MsgPool: msgPool, MsgPreviewer: msg.NewPreviewer(fcWallet, chainStore, &cstOffline, bs), MsgQueryer: msg.NewQueryer(nc.Repo, fcWallet, chainStore, &cstOffline, bs), MsgSender: msg.NewSender(fcWallet, chainStore, chainStore, outbox, msgPool, consensus.NewOutboundMessageValidator(), fsub.Publish), MsgWaiter: msg.NewWaiter(chainStore, bs, &cstOffline), Network: net.New(peerHost, pubsub.NewPublisher(fsub), pubsub.NewSubscriber(fsub), net.NewRouter(router), bandwidthTracker, pinger), Outbox: outbox, SigGetter: mthdsig.NewGetter(chainStore), Wallet: fcWallet, })) nd := &Node{ blockservice: bservice, Blockstore: bs, cborStore: &cstOffline, Consensus: nodeConsensus, ChainReader: chainStore, Syncer: chainSyncer, PowerTable: powerTable, PorcelainAPI: PorcelainAPI, Fetcher: fetcher, Exchange: bswap, host: peerHost, MsgPool: msgPool, Outbox: outbox, OfflineMode: nc.OfflineMode, PeerHost: peerHost, Repo: nc.Repo, Wallet: fcWallet, blockTime: nc.BlockTime, Router: router, } // set up mining worker funcs nd.GetAncestorsFunc = nd.getAncestors nd.GetStateTreeFunc = nd.getStateTree nd.GetWeightFunc = nd.getWeight // Bootstrapping network peers. periodStr := nd.Repo.Config().Bootstrap.Period period, err := time.ParseDuration(periodStr) if err != nil { return nil, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr) } // Bootstrapper maintains connections to some subset of addresses ba := nd.Repo.Config().Bootstrap.Addresses bpi, err := net.PeerAddrsToPeerInfos(ba) if err != nil { return nil, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba) } minPeerThreshold := nd.Repo.Config().Bootstrap.MinPeerThreshold nd.Bootstrapper = net.NewBootstrapper(bpi, nd.Host(), nd.Host().Network(), nd.Router, minPeerThreshold, period) return nd, nil } // Start boots up the node. func (node *Node) Start(ctx context.Context) error { if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Metrics); err != nil { return errors.Wrap(err, "failed to setup metrics") } var err error if err = node.ChainReader.Load(ctx); err != nil { return err } // Only set these up if there is a miner configured. if _, err := node.miningAddress(); err == nil { if err := node.setupMining(ctx); err != nil { log.Errorf("setup mining failed: %v", err) return err } } // Start up 'hello' handshake service syncCallBack := func(pid libp2ppeer.ID, cids []cid.Cid, height uint64) { cidSet := types.NewSortedCidSet(cids...) err := node.Syncer.HandleNewTipset(context.Background(), cidSet) if err != nil { log.Infof("error handling blocks: %s", cidSet.String()) } } node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), syncCallBack, node.ChainReader.Head, node.Repo.Config().Net, flags.Commit) err = node.setupProtocols() if err != nil { return errors.Wrap(err, "failed to set up protocols:") } node.RetrievalMiner = retrieval.NewMiner(node) // subscribe to block notifications blkSub, err := node.PorcelainAPI.PubSubSubscribe(BlockTopic) if err != nil { return errors.Wrap(err, "failed to subscribe to blocks topic") } node.BlockSub = blkSub // subscribe to message notifications msgSub, err := node.PorcelainAPI.PubSubSubscribe(msg.Topic) if err != nil { return errors.Wrap(err, "failed to subscribe to message topic") } node.MessageSub = msgSub cctx, cancel := context.WithCancel(context.Background()) node.cancelSubscriptionsCtx = cancel go node.handleSubscription(cctx, node.processBlock, "processBlock", node.BlockSub, "BlockSub") go node.handleSubscription(cctx, node.processMessage, "processMessage", node.MessageSub, "MessageSub") outboxPolicy := core.NewMessageQueuePolicy(node.Outbox, node.ChainReadStore(), core.OutboxMaxAgeRounds) node.HeaviestTipSetHandled = func() {} node.HeaviestTipSetCh = node.ChainReader.HeadEvents().Sub(chain.NewHeadTopic) go node.handleNewHeaviestTipSet(cctx, node.ChainReader.Head(), outboxPolicy) if !node.OfflineMode { node.Bootstrapper.Start(context.Background()) } if err := node.setupHeartbeatServices(ctx); err != nil { return errors.Wrap(err, "failed to start heartbeat services") } return nil } func (node *Node) setupHeartbeatServices(ctx context.Context) error { mag := func() address.Address { addr, err := node.miningAddress() // the only error miningAddress() returns is ErrNoMinerAddress. // if there is no configured miner address, simply send a zero // address across the wire. if err != nil { return address.Undef } return addr } // start the primary heartbeat service if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 { hbs := metrics.NewHeartbeatService(node.Host(), node.Repo.Config().Heartbeat, node.ChainReader.Head, metrics.WithMinerAddressGetter(mag)) go hbs.Start(ctx) } // check if we want to connect to an alert service. An alerting service is a heartbeat // service that can trigger alerts based on the contents of heatbeats. if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 { ahbs := metrics.NewHeartbeatService(node.Host(), &config.HeartbeatConfig{ BeatTarget: alertTarget, BeatPeriod: "10s", ReconnectPeriod: "10s", Nickname: node.Repo.Config().Heartbeat.Nickname, }, node.ChainReader.Head, metrics.WithMinerAddressGetter(mag)) go ahbs.Start(ctx) } return nil } func (node *Node) setupMining(ctx context.Context) error { // configure the underlying sector store, defaulting to the non-test version proofsMode := proofs.LiveMode if os.Getenv("FIL_USE_SMALL_SECTORS") == "true" { proofsMode = proofs.TestMode } // initialize a sector builder sectorBuilder, err := initSectorBuilderForNode(ctx, node, proofsMode) if err != nil { return errors.Wrap(err, "failed to initialize sector builder") } node.sectorBuilder = sectorBuilder return nil } func (node *Node) setIsMining(isMining bool) { node.mining.Lock() defer node.mining.Unlock() node.mining.isMining = isMining } func (node *Node) handleNewMiningOutput(miningOutCh <-chan mining.Output) { defer func() { node.miningDoneWg.Done() }() for { select { case <-node.miningCtx.Done(): return case output, ok := <-miningOutCh: if !ok { return } if output.Err != nil { log.Errorf("stopping mining. error: %s", output.Err.Error()) node.StopMining(context.Background()) } else { node.miningDoneWg.Add(1) go func() { if node.IsMining() { node.AddNewlyMinedBlock(node.miningCtx, output.NewBlock) } node.miningDoneWg.Done() }() } } } } func (node *Node) handleNewHeaviestTipSet(ctx context.Context, head types.TipSet, outboxPolicy *core.MessageQueuePolicy) { for { select { case ts, ok := <-node.HeaviestTipSetCh: if !ok { return } newHead, ok := ts.(types.TipSet) if !ok { log.Error("non-tipset published on heaviest tipset channel") continue } if len(newHead) == 0 { log.Error("tipset of size 0 published on heaviest tipset channel. ignoring and waiting for a new heaviest tipset.") continue } if err := outboxPolicy.OnNewHeadTipset(ctx, head, newHead); err != nil { log.Error("updating outbound message queue for new tipset", err) } if err := node.MsgPool.UpdateMessagePool(ctx, node.ChainReadStore(), head, newHead); err != nil { log.Error("updating message pool for new tipset", err) } head = newHead if node.StorageMiner != nil { node.StorageMiner.OnNewHeaviestTipSet(newHead) } node.HeaviestTipSetHandled() case <-ctx.Done(): return } } } func (node *Node) cancelSubscriptions() { if node.BlockSub != nil || node.MessageSub != nil { node.cancelSubscriptionsCtx() } if node.BlockSub != nil { node.BlockSub.Cancel() node.BlockSub = nil } if node.MessageSub != nil { node.MessageSub.Cancel() node.MessageSub = nil } } // Stop initiates the shutdown of the node. func (node *Node) Stop(ctx context.Context) { node.ChainReader.HeadEvents().Unsub(node.HeaviestTipSetCh) node.StopMining(ctx) node.cancelSubscriptions() node.ChainReader.Stop() if node.SectorBuilder() != nil { if err := node.SectorBuilder().Close(); err != nil { fmt.Printf("error closing sector builder: %s\n", err) } node.sectorBuilder = nil } if err := node.Host().Close(); err != nil { fmt.Printf("error closing host: %s\n", err) } if err := node.Repo.Close(); err != nil { fmt.Printf("error closing repo: %s\n", err) } node.Bootstrapper.Stop() fmt.Println("stopping filecoin :(") } type newBlockFunc func(context.Context, *types.Block) func (node *Node) addNewlyMinedBlock(ctx context.Context, b *types.Block) { log.Debugf("Got a newly mined block from the mining worker: %s", b) if err := node.AddNewBlock(ctx, b); err != nil { log.Warningf("error adding new mined block: %s. err: %s", b.Cid().String(), err.Error()) } } // miningAddress returns the address of the mining actor mining on behalf of // the node. func (node *Node) miningAddress() (address.Address, error) { addr := node.Repo.Config().Mining.MinerAddress if addr.Empty() { return address.Undef, ErrNoMinerAddress } return addr, nil } // MiningTimes returns the configured time it takes to mine a block, and also // the mining delay duration, which is currently a fixed fraction of block time. // Note this is mocked behavior, in production this time is determined by how // long it takes to generate PoSTs. func (node *Node) MiningTimes() (time.Duration, time.Duration) { mineDelay := node.GetBlockTime() / mining.MineDelayConversionFactor return node.GetBlockTime(), mineDelay } // GetBlockTime returns the current block time. // TODO this should be surfaced somewhere in the plumbing API. func (node *Node) GetBlockTime() time.Duration { return node.blockTime } // SetBlockTime sets the block time. func (node *Node) SetBlockTime(blockTime time.Duration) { node.blockTime = blockTime } // StartMining causes the node to start feeding blocks to the mining worker and initializes // the SectorBuilder for the mining address. func (node *Node) StartMining(ctx context.Context) error { if node.IsMining() { return errors.New("Node is already mining") } minerAddr, err := node.miningAddress() if err != nil { return errors.Wrap(err, "failed to get mining address") } // ensure we have a sector builder if node.SectorBuilder() == nil { if err := node.setupMining(ctx); err != nil { return err } } minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr) if err != nil { return errors.Wrapf(err, "failed to get mining owner address for miner %s", minerAddr) } _, mineDelay := node.MiningTimes() if node.MiningWorker == nil { if node.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil { return err } } if node.MiningScheduler == nil { node.MiningScheduler = mining.NewScheduler(node.MiningWorker, mineDelay, node.ChainReader.Head) } // paranoid check if !node.MiningScheduler.IsStarted() { node.miningCtx, node.cancelMining = context.WithCancel(context.Background()) outCh, doneWg := node.MiningScheduler.Start(node.miningCtx) node.miningDoneWg = doneWg node.AddNewlyMinedBlock = node.addNewlyMinedBlock node.miningDoneWg.Add(1) go node.handleNewMiningOutput(outCh) } // initialize a storage miner storageMiner, err := initStorageMinerForNode(ctx, node) if err != nil { return errors.Wrap(err, "failed to initialize storage miner") } node.StorageMiner = storageMiner // loop, turning sealing-results into commitSector messages to be included // in the chain go func() { for { select { case result := <-node.SectorBuilder().SectorSealResults(): if result.SealingErr != nil { log.Errorf("failed to seal sector with id %d: %s", result.SectorID, result.SealingErr.Error()) } else if result.SealingResult != nil { // TODO: determine these algorithmically by simulating call and querying historical prices gasPrice := types.NewGasPrice(0) gasUnits := types.NewGasUnits(300) val := result.SealingResult // This call can fail due to, e.g. nonce collisions. Our miners existence depends on this. // We should deal with this, but MessageSendWithRetry is problematic. _, err := node.PorcelainAPI.MessageSend( node.miningCtx, minerOwnerAddr, minerAddr, nil, gasPrice, gasUnits, "commitSector", val.SectorID, val.CommD[:], val.CommR[:], val.CommRStar[:], val.Proof[:], ) if err != nil { log.Errorf("failed to send commitSector message from %s to %s for sector with id %d: %s", minerOwnerAddr, minerAddr, val.SectorID, err) continue } node.StorageMiner.OnCommitmentAddedToChain(val, nil) } case <-node.miningCtx.Done(): return } } }() // schedules sealing of staged piece-data if node.Repo.Config().Mining.AutoSealIntervalSeconds > 0 { go func() { for { select { case <-node.miningCtx.Done(): return case <-time.After(time.Duration(node.Repo.Config().Mining.AutoSealIntervalSeconds) * time.Second): log.Info("auto-seal has been triggered") if err := node.SectorBuilder().SealAllStagedSectors(node.miningCtx); err != nil { log.Errorf("scheduler received error from node.SectorBuilder.SealAllStagedSectors (%s) - exiting", err.Error()) return } } } }() } else { log.Debug("auto-seal is disabled") } node.setIsMining(true) return nil } func (node *Node) getLastUsedSectorID(ctx context.Context, minerAddr address.Address) (uint64, error) { rets, methodSignature, err := node.PorcelainAPI.MessageQuery( ctx, address.Address{}, minerAddr, "getLastUsedSectorID", ) if err != nil { return 0, errors.Wrap(err, "failed to call query method getLastUsedSectorID") } lastUsedSectorIDVal, err := abi.Deserialize(rets[0], methodSignature.Return[0]) if err != nil { return 0, errors.Wrap(err, "failed to convert returned ABI value") } lastUsedSectorID, ok := lastUsedSectorIDVal.Val.(uint64) if !ok { return 0, errors.New("failed to convert returned ABI value to uint64") } return lastUsedSectorID, nil } func initSectorBuilderForNode(ctx context.Context, node *Node, proofsMode proofs.Mode) (sectorbuilder.SectorBuilder, error) { minerAddr, err := node.miningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get node's mining address") } lastUsedSectorID, err := node.getLastUsedSectorID(ctx, minerAddr) if err != nil { return nil, errors.Wrapf(err, "failed to get last used sector id for miner w/address %s", minerAddr.String()) } // TODO: Where should we store the RustSectorBuilder metadata? Currently, we // configure the RustSectorBuilder to store its metadata in the staging // directory. cfg := sectorbuilder.RustSectorBuilderConfig{ BlockService: node.blockservice, LastUsedSectorID: lastUsedSectorID, MetadataDir: node.Repo.StagingDir(), MinerAddr: minerAddr, SealedSectorDir: node.Repo.SealedDir(), ProofsMode: proofsMode, StagedSectorDir: node.Repo.StagingDir(), } sb, err := sectorbuilder.NewRustSectorBuilder(cfg) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("failed to initialize sector builder for miner %s", minerAddr.String())) } return sb, nil } func initStorageMinerForNode(ctx context.Context, node *Node) (*storage.Miner, error) { minerAddr, err := node.miningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get node's mining address") } miningOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr) if err != nil { return nil, errors.Wrap(err, "no mining owner available, skipping storage miner setup") } miner, err := storage.NewMiner(minerAddr, miningOwnerAddr, node, node.Repo.DealsDatastore(), node.PorcelainAPI) if err != nil { return nil, errors.Wrap(err, "failed to instantiate storage miner") } return miner, nil } // StopMining stops mining on new blocks. func (node *Node) StopMining(ctx context.Context) { node.setIsMining(false) if node.cancelMining != nil { node.cancelMining() } if node.miningDoneWg != nil { node.miningDoneWg.Wait() } // TODO: stop node.StorageMiner } // NewAddress creates a new account address on the default wallet backend. func (node *Node) NewAddress() (address.Address, error) { return wallet.NewAddress(node.Wallet) } // miningOwnerAddress returns the owner of miningAddr. // TODO: find a better home for this method func (node *Node) miningOwnerAddress(ctx context.Context, miningAddr address.Address) (address.Address, error) { ownerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, miningAddr) if err != nil { return address.Undef, errors.Wrap(err, "failed to get miner owner address") } return ownerAddr, nil } // BlockHeight returns the current block height of the chain. func (node *Node) BlockHeight() (*types.BlockHeight, error) { head := node.ChainReader.Head() if head == nil { return nil, errors.New("invalid nil head") } height, err := head.Height() if err != nil { return nil, err } return types.NewBlockHeight(height), nil } func (node *Node) handleSubscription(ctx context.Context, f pubSubProcessorFunc, fname string, s pubsub.Subscription, sname string) { for { pubSubMsg, err := s.Next(ctx) if err != nil { log.Errorf("%s.Next(): %s", sname, err) return } if err := f(ctx, pubSubMsg); err != nil { if err != context.Canceled { log.Errorf("%s(): %s", fname, err) } } } } // setupProtocols creates protocol clients and miners, then sets the node's APIs // for each func (node *Node) setupProtocols() error { _, mineDelay := node.MiningTimes() blockMiningAPI := block.New( node.AddNewBlock, node.ChainReader, mineDelay, node.StartMining, node.StopMining, node.CreateMiningWorker) node.BlockMiningAPI = &blockMiningAPI // set up retrieval client and api retapi := retrieval.NewAPI(retrieval.NewClient(node.host, node.blockTime)) node.RetrievalAPI = &retapi // set up storage client and api smc := storage.NewClient(node.blockTime, node.host, node.PorcelainAPI) smcAPI := storage.NewAPI(smc) node.StorageAPI = &smcAPI return nil } // CreateMiningWorker creates a mining.Worker for the node using the configured // getStateTree, getWeight, and getAncestors functions for the node func (node *Node) CreateMiningWorker(ctx context.Context) (mining.Worker, error) { processor := consensus.NewDefaultProcessor() minerAddr, err := node.miningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get mining address") } minerPubKey, err := node.PorcelainAPI.MinerGetKey(ctx, minerAddr) if err != nil { return nil, errors.Wrap(err, "could not get key from miner actor") } minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr) if err != nil { log.Errorf("could not get owner address of miner actor") return nil, err } return mining.NewDefaultWorker( node.MsgPool, node.getStateTree, node.getWeight, node.getAncestors, processor, node.PowerTable, node.Blockstore, node.CborStore(), minerAddr, minerOwnerAddr, minerPubKey, node.Wallet, node.blockTime), nil } // getStateFromKey returns the state tree based on tipset fetched with provided key tsKey func (node *Node) getStateFromKey(ctx context.Context, tsKey string) (state.Tree, error) { tsas, err := node.ChainReader.GetTipSetAndState(ctx, tsKey) if err != nil { return nil, err } return state.LoadStateTree(ctx, node.CborStore(), tsas.TipSetStateRoot, builtin.Actors) } // getStateTree is the default GetStateTree function for the mining worker. func (node *Node) getStateTree(ctx context.Context, ts types.TipSet) (state.Tree, error) { return node.getStateFromKey(ctx, ts.String()) } // getWeight is the default GetWeight function for the mining worker. func (node *Node) getWeight(ctx context.Context, ts types.TipSet) (uint64, error) { parent, err := ts.Parents() if err != nil { return uint64(0), err } // TODO handle genesis cid more gracefully if parent.Len() == 0 { return node.Consensus.Weight(ctx, ts, nil) } pSt, err := node.getStateFromKey(ctx, parent.String()) if err != nil { return uint64(0), err } return node.Consensus.Weight(ctx, ts, pSt) } // getAncestors is the default GetAncestors function for the mining worker. func (node *Node) getAncestors(ctx context.Context, ts types.TipSet, newBlockHeight *types.BlockHeight) ([]types.TipSet, error) { return chain.GetRecentAncestors(ctx, ts, node.ChainReader, newBlockHeight, consensus.AncestorRoundsNeeded, sampling.LookbackParameter) } // -- Accessors // Host returns the nodes host. func (node *Node) Host() host.Host { return node.host } // SectorBuilder returns the nodes sectorBuilder. func (node *Node) SectorBuilder() sectorbuilder.SectorBuilder { return node.sectorBuilder } // BlockService returns the nodes blockservice. func (node *Node) BlockService() bserv.BlockService { return node.blockservice } // CborStore returns the nodes cborStore. func (node *Node) CborStore() *hamt.CborIpldStore { return node.cborStore } // ChainReadStore returns the node's chain store. func (node *Node) ChainReadStore() chain.ReadStore { return node.ChainReader } // IsMining returns a boolean indicating whether the node is mining blocks. func (node *Node) IsMining() bool { node.mining.Lock() defer node.mining.Unlock() return node.mining.isMining }
1
18,520
If we are going to add config to specify the maximum message pool size, we should probably also add a parameter for the maximum nonce gap and pass it into the `IngestionValidator`. This could be done in this PR or added as an issue.
filecoin-project-venus
go
@@ -12,10 +12,8 @@ */ package org.camunda.bpm.container.impl.spi; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; + import org.camunda.bpm.container.impl.ContainerIntegrationLogger; import org.camunda.bpm.engine.impl.ProcessEngineLogger;
1
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.container.impl.spi; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.camunda.bpm.container.impl.ContainerIntegrationLogger; import org.camunda.bpm.engine.impl.ProcessEngineLogger; /** * <p>A DeploymentOperation allows bundling multiple deployment steps into a * composite operation that succeeds or fails atomically.</p> * * <p>The DeploymentOperation is composed of a list of individual steps ( * {@link DeploymentOperationStep}). Each step may or may not install new * services into the container. If one of the steps fails, the operation makes * sure that * <ul> * <li>all successfully completed steps are notified by calling their * {@link DeploymentOperationStep#cancelOperationStep(DeploymentOperation)} * method.</li> * <li>all services installed in the context of the operation are removed from the container.</li> * </ul> * * @author Daniel Meyer * */ public class DeploymentOperation { private final static ContainerIntegrationLogger LOG = ProcessEngineLogger.CONTAINER_INTEGRATION_LOGGER; /** the name of this composite operation */ protected final String name; /** the service container */ protected final PlatformServiceContainer serviceContainer; /** the list of steps that make up this composite operation */ protected final List<DeploymentOperationStep> steps; /** a list of steps that completed successfully */ protected final List<DeploymentOperationStep> successfulSteps = new ArrayList<DeploymentOperationStep>(); /** the list of services installed by this operation. The {@link #rollbackOperation()} must make sure * all these services are removed if the operation fails. */ protected List<String> installedServices = new ArrayList<String>(); /** a list of attachments allows to pass state from one operation to another */ protected Map<String, Object> attachments = new HashMap<String, Object>(); protected boolean isRollbackOnFailure = true; protected DeploymentOperationStep currentStep; public DeploymentOperation(String name, PlatformServiceContainer container, List<DeploymentOperationStep> steps) { this.name = name; this.serviceContainer = container; this.steps = steps; } // getter / setters ///////////////////////////////// @SuppressWarnings("unchecked") public <S> S getAttachment(String name) { return (S) attachments.get(name); } public void addAttachment(String name, Object value) { attachments.put(name, value); } /** * Add a new atomic step to the composite operation. * If the operation is currently executing a step, the step is added after the current step. */ public void addStep(DeploymentOperationStep step) { if(currentStep != null) { steps.add(steps.indexOf(currentStep)+1, step); } else { steps.add(step); } } public void serviceAdded(String serviceName) { installedServices.add(serviceName); } public PlatformServiceContainer getServiceContainer() { return serviceContainer; } // runtime aspect /////////////////////////////////// public void execute() { while (!steps.isEmpty()) { currentStep = steps.remove(0); try { LOG.debugPerformOperationStep(currentStep.getName()); currentStep.performOperationStep(this); successfulSteps.add(currentStep); LOG.debugSuccessfullyPerformedOperationStep(currentStep.getName()); } catch (Exception e) { if(isRollbackOnFailure) { try { rollbackOperation(); } catch(Exception e2) { LOG.exceptionWhileRollingBackOperation(e2); } // re-throw the original exception throw LOG.exceptionWhilePerformingOperationStep(name, currentStep.getName(), e); } else { LOG.exceptionWhilePerformingOperationStep(currentStep.getName(), e); } } } } protected void rollbackOperation() { // first, rollback all successful steps for (DeploymentOperationStep step : successfulSteps) { try { step.cancelOperationStep(this); } catch(Exception e) { LOG.exceptionWhileRollingBackOperation(e); } } // second, remove services for (String serviceName : installedServices) { try { serviceContainer.stopService(serviceName); } catch(Exception e) { LOG.exceptionWhileStopping("service", serviceName, e); } } } public List<String> getInstalledServices() { return installedServices; } // builder ///////////////////////////// public static class DeploymentOperationBuilder { protected PlatformServiceContainer container; protected String name; protected boolean isUndeploymentOperation = false; protected List<DeploymentOperationStep> steps = new ArrayList<DeploymentOperationStep>(); protected Map<String, Object> initialAttachments = new HashMap<String, Object>(); public DeploymentOperationBuilder(PlatformServiceContainer container, String name) { this.container = container; this.name = name; } public DeploymentOperationBuilder addStep(DeploymentOperationStep step) { steps.add(step); return this; } public DeploymentOperationBuilder addAttachment(String name, Object value) { initialAttachments.put(name, value); return this; } public DeploymentOperationBuilder setUndeploymentOperation() { isUndeploymentOperation = true; return this; } public void execute() { DeploymentOperation operation = new DeploymentOperation(name, container, steps); operation.isRollbackOnFailure = !isUndeploymentOperation; operation.attachments.putAll(initialAttachments); container.executeDeploymentOperation(operation); } } }
1
8,993
please inline imports
camunda-camunda-bpm-platform
java
@@ -113,7 +113,7 @@ type Config struct { IptablesRefreshInterval time.Duration `config:"seconds;90"` IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;30"` IptablesLockFilePath string `config:"file;/run/xtables.lock"` - IptablesLockTimeoutSecs time.Duration `config:"seconds;30"` + IptablesLockTimeoutSecs time.Duration `config:"seconds;0"` IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"` IpsetsRefreshInterval time.Duration `config:"seconds;10"` MaxIpsetSize int `config:"int;1048576;non-zero"`
1
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "errors" "fmt" "net" "os" "reflect" "regexp" "strconv" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/api" "github.com/projectcalico/libcalico-go/lib/client" ) var ( IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`) AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`) HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`) StringRegexp = regexp.MustCompile(`^.*$`) ) const ( maxUint = ^uint(0) maxInt = int(maxUint >> 1) minInt = -maxInt - 1 ) // Source of a config value. Values from higher-numbered sources override // those from lower-numbered sources. Note: some parameters (such as those // needed to connect to the datastore) can only be set from a local source. type Source uint8 const ( Default = iota DatastoreGlobal DatastorePerHost ConfigFile EnvironmentVariable ) var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal} func (source Source) String() string { switch source { case Default: return "<default>" case DatastoreGlobal: return "datastore (global)" case DatastorePerHost: return "datastore (per-host)" case ConfigFile: return "config file" case EnvironmentVariable: return "environment variable" } return fmt.Sprintf("<unknown(%v)>", uint8(source)) } func (source Source) Local() bool { switch source { case Default, ConfigFile, EnvironmentVariable: return true default: return false } } // Config contains the best, parsed config values loaded from the various sources. // We use tags to control the parsing and validation. type Config struct { // Configuration parameters. UseInternalDataplaneDriver bool `config:"bool;true"` DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"` DatastoreType string `config:"oneof(kubernetes,etcdv2);etcdv2;non-zero,die-on-fail"` FelixHostname string `config:"hostname;;local,non-zero"` EtcdAddr string `config:"authority;127.0.0.1:2379;local"` EtcdScheme string `config:"oneof(http,https);http;local"` EtcdKeyFile string `config:"file(must-exist);;local"` EtcdCertFile string `config:"file(must-exist);;local"` EtcdCaFile string `config:"file(must-exist);;local"` EtcdEndpoints []string `config:"endpoint-list;;local"` TyphaAddr string `config:"authority;;"` TyphaK8sServiceName string `config:"string;"` TyphaK8sNamespace string `config:"string;kube-system;non-zero"` Ipv6Support bool `config:"bool;true"` IgnoreLooseRPF bool `config:"bool;false"` RouteRefreshInterval time.Duration `config:"seconds;90"` IptablesRefreshInterval time.Duration `config:"seconds;90"` IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;30"` IptablesLockFilePath string `config:"file;/run/xtables.lock"` IptablesLockTimeoutSecs time.Duration `config:"seconds;30"` IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"` IpsetsRefreshInterval time.Duration `config:"seconds;10"` MaxIpsetSize int `config:"int;1048576;non-zero"` MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"` MetadataPort int `config:"int(0,65535);8775;die-on-fail"` InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"` ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"` DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"` IptablesAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` LogPrefix string `config:"string;calico-packet"` LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"` LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"` LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"` LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,CRITICAL);INFO"` IpInIpEnabled bool `config:"bool;false"` IpInIpMtu int `config:"int;1440;non-zero"` IpInIpTunnelAddr net.IP `config:"ipv4;"` ReportingIntervalSecs time.Duration `config:"seconds;30"` ReportingTTLSecs time.Duration `config:"seconds;90"` EndpointReportingEnabled bool `config:"bool;false"` EndpointReportingDelaySecs time.Duration `config:"seconds;1"` IptablesMarkMask uint32 `config:"mark-bitmask;0xff000000;non-zero,die-on-fail"` DisableConntrackInvalidCheck bool `config:"bool;false"` HealthEnabled bool `config:"bool;false"` HealthPort int `config:"int(0,65535);9099"` PrometheusMetricsEnabled bool `config:"bool;false"` PrometheusMetricsPort int `config:"int(0,65535);9091"` PrometheusGoMetricsEnabled bool `config:"bool;true"` PrometheusProcessMetricsEnabled bool `config:"bool;true"` FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68;die-on-fail"` FailsafeOutboundHostPorts []ProtoPort `config:"port-list;tcp:2379,tcp:2380,tcp:4001,tcp:7001,udp:53,udp:67;die-on-fail"` UsageReportingEnabled bool `config:"bool;true"` ClusterGUID string `config:"string;baddecaf"` ClusterType string `config:"string;"` CalicoVersion string `config:"string;"` DebugMemoryProfilePath string `config:"file;;"` DebugDisableLogDropping bool `config:"bool;false"` // State tracking. // nameToSource tracks where we loaded each config param from. sourceToRawConfig map[Source]map[string]string rawValues map[string]string Err error numIptablesBitsAllocated int } type ProtoPort struct { Protocol string Port uint16 } // Load parses and merges the rawData from one particular source into this config object. // If there is a config value already loaded from a higher-priority source, then // the new value will be ignored (after validation). func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) { log.Infof("Merging in config from %v: %v", source, rawData) // Defensively take a copy of the raw data, in case we've been handed // a mutable map by mistake. rawDataCopy := make(map[string]string) for k, v := range rawData { if v == "" { log.WithFields(log.Fields{ "name": k, "source": source, }).Info("Ignoring empty configuration parameter. Use value 'none' if " + "your intention is to explicitly disable the default value.") continue } rawDataCopy[k] = v } config.sourceToRawConfig[source] = rawDataCopy changed, err = config.resolve() return } func (c *Config) InterfacePrefixes() []string { return strings.Split(c.InterfacePrefix, ",") } func (config *Config) OpenstackActive() bool { if strings.Contains(strings.ToLower(config.ClusterType), "openstack") { log.Debug("Cluster type contains OpenStack") return true } if config.MetadataAddr != "127.0.0.1" { log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active") return true } if config.MetadataPort != 8775 { log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active") return true } for _, prefix := range config.InterfacePrefixes() { if prefix == "tap" { log.Debug("Interface prefix list contains 'tap', assuming OpenStack") return true } } log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases") return false } func (config *Config) NextIptablesMark() uint32 { mark := config.NthIPTablesMark(config.numIptablesBitsAllocated) config.numIptablesBitsAllocated++ return mark } func (config *Config) NthIPTablesMark(n int) uint32 { numBitsFound := 0 for shift := uint(0); shift < 32; shift++ { candidate := uint32(1) << shift if config.IptablesMarkMask&candidate > 0 { if numBitsFound == n { return candidate } numBitsFound += 1 } } log.WithFields(log.Fields{ "IptablesMarkMask": config.IptablesMarkMask, "requestedMark": n, }).Panic("Not enough iptables mark bits available.") return 0 } func (config *Config) resolve() (changed bool, err error) { newRawValues := make(map[string]string) nameToSource := make(map[string]Source) for _, source := range SourcesInDescendingOrder { valueLoop: for rawName, rawValue := range config.sourceToRawConfig[source] { currentSource := nameToSource[rawName] param, ok := knownParams[strings.ToLower(rawName)] if !ok { if source >= currentSource { // Stash the raw value in case it's useful for // a plugin. Since we don't know the canonical // name, use the raw name. newRawValues[rawName] = rawValue nameToSource[rawName] = source } log.WithField("raw name", rawName).Info( "Ignoring unknown config param.") continue valueLoop } metadata := param.GetMetadata() name := metadata.Name if metadata.Local && !source.Local() { log.Warningf("Ignoring local-only configuration for %v from %v", name, source) continue valueLoop } log.Infof("Parsing value for %v: %v (from %v)", name, rawValue, source) var value interface{} if strings.ToLower(rawValue) == "none" { // Special case: we allow a value of "none" to force the value to // the zero value for a field. The zero value often differs from // the default value. Typically, the zero value means "turn off // the feature". if metadata.NonZero { err = errors.New("Non-zero field cannot be set to none") log.Errorf( "Failed to parse value for %v: %v from source %v. %v", name, rawValue, source, err) config.Err = err return } value = metadata.ZeroValue log.Infof("Value set to 'none', replacing with zero-value: %#v.", value) } else { value, err = param.Parse(rawValue) if err != nil { logCxt := log.WithError(err).WithField("source", source) if metadata.DieOnParseFailure { logCxt.Error("Invalid (required) config value.") config.Err = err return } else { logCxt.WithField("default", metadata.Default).Warn( "Replacing invalid value with default") value = metadata.Default err = nil } } } log.Infof("Parsed value for %v: %v (from %v)", name, value, source) if source < currentSource { log.Infof("Skipping config value for %v from %v; "+ "already have a value from %v", name, source, currentSource) continue } field := reflect.ValueOf(config).Elem().FieldByName(name) field.Set(reflect.ValueOf(value)) newRawValues[name] = rawValue nameToSource[name] = source } } changed = !reflect.DeepEqual(newRawValues, config.rawValues) config.rawValues = newRawValues return } func (config *Config) DatastoreConfig() api.CalicoAPIConfig { // Special case for etcdv2 datastore, where we want to honour established Felix-specific // config mechanisms. if config.DatastoreType == "etcdv2" { // Build a CalicoAPIConfig with the etcd fields filled in from Felix-specific // config. var etcdEndpoints string if len(config.EtcdEndpoints) == 0 { etcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr } else { etcdEndpoints = strings.Join(config.EtcdEndpoints, ",") } etcdCfg := api.EtcdConfig{ EtcdEndpoints: etcdEndpoints, EtcdKeyFile: config.EtcdKeyFile, EtcdCertFile: config.EtcdCertFile, EtcdCACertFile: config.EtcdCaFile, } return api.CalicoAPIConfig{ Spec: api.CalicoAPIConfigSpec{ DatastoreType: api.EtcdV2, EtcdConfig: etcdCfg, }, } } // Build CalicoAPIConfig from the environment. This means that any XxxYyy field in // CalicoAPIConfigSpec can be set by a corresponding XXX_YYY or CALICO_XXX_YYY environment // variable, and that the datastore type can be set by a DATASTORE_TYPE or // CALICO_DATASTORE_TYPE variable. (Except in the etcdv2 case which is handled specially // above.) cfg, err := client.LoadClientConfigFromEnvironment() if err != nil { log.WithError(err).Panic("Failed to create datastore config") } // If that didn't set the datastore type (in which case the field will have been set to its // default 'etcdv2' value), copy it from the Felix config. if cfg.Spec.DatastoreType == "etcdv2" { cfg.Spec.DatastoreType = api.DatastoreType(config.DatastoreType) } if !config.IpInIpEnabled { // Polling k8s for node updates is expensive (because we get many superfluous // updates) so disable if we don't need it. log.Info("IPIP disabled, disabling node poll (if KDD is in use).") cfg.Spec.K8sDisableNodePoll = true } return *cfg } // Validate() performs cross-field validation. func (config *Config) Validate() (err error) { if config.FelixHostname == "" { err = errors.New("Failed to determine hostname") } if config.DatastoreType == "etcdv2" && len(config.EtcdEndpoints) == 0 { if config.EtcdScheme == "" { err = errors.New("EtcdEndpoints and EtcdScheme both missing") } if config.EtcdAddr == "" { err = errors.New("EtcdEndpoints and EtcdAddr both missing") } } if err != nil { config.Err = err } return } var knownParams map[string]param func loadParams() { knownParams = make(map[string]param) config := Config{} kind := reflect.TypeOf(config) metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` + `([^;]*)(?:;` + `([^;]*))?$`) for ii := 0; ii < kind.NumField(); ii++ { field := kind.Field(ii) tag := field.Tag.Get("config") if tag == "" { continue } captures := metaRegexp.FindStringSubmatch(tag) if len(captures) == 0 { log.Panicf("Failed to parse metadata for config param %v", field.Name) } log.Debugf("%v: metadata captures: %#v", field.Name, captures) kind := captures[1] // Type: "int|oneof|bool|port-list|..." kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https" defaultStr := captures[3] // Default value e.g "1.0" flags := captures[4] var param param var err error switch kind { case "bool": param = &BoolParam{} case "int": min := minInt max := maxInt if kindParams != "" { minAndMax := strings.Split(kindParams, ",") min, err = strconv.Atoi(minAndMax[0]) if err != nil { log.Panicf("Failed to parse min value for %v", field.Name) } max, err = strconv.Atoi(minAndMax[1]) if err != nil { log.Panicf("Failed to parse max value for %v", field.Name) } } param = &IntParam{Min: min, Max: max} case "int32": param = &Int32Param{} case "mark-bitmask": param = &MarkBitmaskParam{} case "float": param = &FloatParam{} case "seconds": param = &SecondsParam{} case "millis": param = &MillisParam{} case "iface-list": param = &RegexpParam{Regexp: IfaceListRegexp, Msg: "invalid Linux interface name"} case "file": param = &FileParam{ MustExist: strings.Contains(kindParams, "must-exist"), Executable: strings.Contains(kindParams, "executable"), } case "authority": param = &RegexpParam{Regexp: AuthorityRegexp, Msg: "invalid URL authority"} case "ipv4": param = &Ipv4Param{} case "endpoint-list": param = &EndpointListParam{} case "port-list": param = &PortListParam{} case "hostname": param = &RegexpParam{Regexp: HostnameRegexp, Msg: "invalid hostname"} case "oneof": options := strings.Split(kindParams, ",") lowerCaseToCanon := make(map[string]string) for _, option := range options { lowerCaseToCanon[strings.ToLower(option)] = option } param = &OneofListParam{ lowerCaseOptionsToCanonical: lowerCaseToCanon} case "string": param = &RegexpParam{Regexp: StringRegexp, Msg: "invalid string"} default: log.Panicf("Unknown type of parameter: %v", kind) } metadata := param.GetMetadata() metadata.Name = field.Name metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface() if strings.Index(flags, "non-zero") > -1 { metadata.NonZero = true } if strings.Index(flags, "die-on-fail") > -1 { metadata.DieOnParseFailure = true } if strings.Index(flags, "local") > -1 { metadata.Local = true } if defaultStr != "" { if strings.Index(flags, "skip-default-validation") > -1 { metadata.Default = defaultStr } else { // Parse the default value and save it in the metadata. Doing // that here ensures that we syntax-check the defaults now. defaultVal, err := param.Parse(defaultStr) if err != nil { log.Panicf("Invalid default value: %v", err) } metadata.Default = defaultVal } } else { metadata.Default = metadata.ZeroValue } knownParams[strings.ToLower(field.Name)] = param } } func (config *Config) RawValues() map[string]string { return config.rawValues } func New() *Config { if knownParams == nil { loadParams() } p := &Config{ rawValues: make(map[string]string), sourceToRawConfig: make(map[Source]map[string]string), } for _, param := range knownParams { param.setDefault(p) } hostname, err := os.Hostname() if err != nil { log.Warningf("Failed to get hostname from kernel, "+ "trying HOSTNAME variable: %v", err) hostname = os.Getenv("HOSTNAME") } p.FelixHostname = hostname return p } type param interface { GetMetadata() *Metadata Parse(raw string) (result interface{}, err error) setDefault(*Config) }
1
15,520
Should IptablesPostWriteCheckIntervalSecs be set back to its previous smaller value, if use of the iptables lock is disabled?
projectcalico-felix
c
@@ -313,7 +313,11 @@ class JoplinDatabase extends Database { // currentVersionIndex < 0 if for the case where an old version of Joplin used with a newer // version of the database, so that migration is not run in this case. - if (currentVersionIndex < 0) throw new Error('Unknown profile version. Most likely this is an old version of Joplin, while the profile was created by a newer version. Please upgrade Joplin at https://joplinapp.org and try again.'); + if (currentVersionIndex < 0) { + this.logger().error(`Cannot convert database from version ${fromVersion} to ${existingDatabaseVersions[existingDatabaseVersions.length-1]}`); + const p = require('../package.json'); + throw new Error(`Unknown profile version. Most likely this is an old version of Joplin, while the profile was created by a newer version. Please upgrade Joplin at https://joplinapp.org and try again.\n${p.name} version: ${p.version}\nProfile version: ${fromVersion}\nExpected version: ${existingDatabaseVersions[existingDatabaseVersions.length-1]}`); + } if (currentVersionIndex == existingDatabaseVersions.length - 1) return fromVersion;
1
const { promiseChain } = require('lib/promise-utils.js'); const { Database } = require('lib/database.js'); const { sprintf } = require('sprintf-js'); const Resource = require('lib/models/Resource'); const structureSql = ` CREATE TABLE folders ( id TEXT PRIMARY KEY, title TEXT NOT NULL DEFAULT "", created_time INT NOT NULL, updated_time INT NOT NULL ); CREATE INDEX folders_title ON folders (title); CREATE INDEX folders_updated_time ON folders (updated_time); CREATE TABLE notes ( id TEXT PRIMARY KEY, parent_id TEXT NOT NULL DEFAULT "", title TEXT NOT NULL DEFAULT "", body TEXT NOT NULL DEFAULT "", created_time INT NOT NULL, updated_time INT NOT NULL, is_conflict INT NOT NULL DEFAULT 0, latitude NUMERIC NOT NULL DEFAULT 0, longitude NUMERIC NOT NULL DEFAULT 0, altitude NUMERIC NOT NULL DEFAULT 0, author TEXT NOT NULL DEFAULT "", source_url TEXT NOT NULL DEFAULT "", is_todo INT NOT NULL DEFAULT 0, todo_due INT NOT NULL DEFAULT 0, todo_completed INT NOT NULL DEFAULT 0, source TEXT NOT NULL DEFAULT "", source_application TEXT NOT NULL DEFAULT "", application_data TEXT NOT NULL DEFAULT "", \`order\` INT NOT NULL DEFAULT 0 ); CREATE INDEX notes_title ON notes (title); CREATE INDEX notes_updated_time ON notes (updated_time); CREATE INDEX notes_is_conflict ON notes (is_conflict); CREATE INDEX notes_is_todo ON notes (is_todo); CREATE INDEX notes_order ON notes (\`order\`); CREATE TABLE tags ( id TEXT PRIMARY KEY, title TEXT NOT NULL DEFAULT "", created_time INT NOT NULL, updated_time INT NOT NULL ); CREATE INDEX tags_title ON tags (title); CREATE INDEX tags_updated_time ON tags (updated_time); CREATE TABLE note_tags ( id TEXT PRIMARY KEY, note_id TEXT NOT NULL, tag_id TEXT NOT NULL, created_time INT NOT NULL, updated_time INT NOT NULL ); CREATE INDEX note_tags_note_id ON note_tags (note_id); CREATE INDEX note_tags_tag_id ON note_tags (tag_id); CREATE INDEX note_tags_updated_time ON note_tags (updated_time); CREATE TABLE resources ( id TEXT PRIMARY KEY, title TEXT NOT NULL DEFAULT "", mime TEXT NOT NULL, filename TEXT NOT NULL DEFAULT "", created_time INT NOT NULL, updated_time INT NOT NULL ); CREATE INDEX resources_title ON resources (title); CREATE INDEX resources_updated_time ON resources (updated_time); CREATE TABLE settings ( \`key\` TEXT PRIMARY KEY, \`value\` TEXT, \`type\` INT NOT NULL ); CREATE TABLE table_fields ( id INTEGER PRIMARY KEY, table_name TEXT NOT NULL, field_name TEXT NOT NULL, field_type INT NOT NULL, field_default TEXT ); CREATE TABLE sync_items ( id INTEGER PRIMARY KEY, sync_target INT NOT NULL, sync_time INT NOT NULL DEFAULT 0, item_type INT NOT NULL, item_id TEXT NOT NULL ); CREATE INDEX sync_items_sync_time ON sync_items (sync_time); CREATE INDEX sync_items_sync_target ON sync_items (sync_target); CREATE INDEX sync_items_item_type ON sync_items (item_type); CREATE INDEX sync_items_item_id ON sync_items (item_id); CREATE TABLE deleted_items ( id INTEGER PRIMARY KEY, item_type INT NOT NULL, item_id TEXT NOT NULL, deleted_time INT NOT NULL ); CREATE TABLE version ( version INT NOT NULL ); INSERT INTO version (version) VALUES (1); `; class JoplinDatabase extends Database { constructor(driver) { super(driver); this.initialized_ = false; this.tableFields_ = null; this.version_ = null; } initialized() { return this.initialized_; } async open(options) { await super.open(options); return this.initialize(); } tableFieldNames(tableName) { let tf = this.tableFields(tableName); let output = []; for (let i = 0; i < tf.length; i++) { output.push(tf[i].name); } return output; } tableFields(tableName, options = null) { if (options === null) options = {}; if (!this.tableFields_) throw new Error('Fields have not been loaded yet'); if (!this.tableFields_[tableName]) throw new Error(`Unknown table: ${tableName}`); const output = this.tableFields_[tableName].slice(); if (options.includeDescription) { for (let i = 0; i < output.length; i++) { output[i].description = this.fieldDescription(tableName, output[i].name); } } return output; } async clearForTesting() { const tableNames = [ 'notes', 'folders', 'resources', 'tags', 'note_tags', // 'master_keys', 'item_changes', 'note_resources', // 'settings', 'deleted_items', 'sync_items', 'notes_normalized', 'revisions', 'resources_to_download', 'key_values', ]; const queries = []; for (const n of tableNames) { queries.push(`DELETE FROM ${n}`); queries.push(`DELETE FROM sqlite_sequence WHERE name="${n}"`); // Reset autoincremented IDs } queries.push('DELETE FROM settings WHERE key="sync.1.context"'); queries.push('DELETE FROM settings WHERE key="sync.2.context"'); queries.push('DELETE FROM settings WHERE key="sync.3.context"'); queries.push('DELETE FROM settings WHERE key="sync.4.context"'); queries.push('DELETE FROM settings WHERE key="sync.5.context"'); queries.push('DELETE FROM settings WHERE key="sync.6.context"'); queries.push('DELETE FROM settings WHERE key="sync.7.context"'); queries.push('DELETE FROM settings WHERE key="revisionService.lastProcessedChangeId"'); queries.push('DELETE FROM settings WHERE key="resourceService.lastProcessedChangeId"'); queries.push('DELETE FROM settings WHERE key="searchEngine.lastProcessedChangeId"'); await this.transactionExecBatch(queries); } createDefaultRow() { const row = {}; const fields = this.tableFields('resource_local_states'); for (let i = 0; i < fields.length; i++) { const f = fields[i]; row[f.name] = Database.formatValue(f.type, f.default); } return row; } fieldDescription(tableName, fieldName) { const sp = sprintf; if (!this.tableDescriptions_) { this.tableDescriptions_ = { notes: { parent_id: sp('ID of the notebook that contains this note. Change this ID to move the note to a different notebook.'), body: sp('The note body, in Markdown. May also contain HTML.'), is_conflict: sp('Tells whether the note is a conflict or not.'), is_todo: sp('Tells whether this note is a todo or not.'), todo_due: sp('When the todo is due. An alarm will be triggered on that date.'), todo_completed: sp('Tells whether todo is completed or not. This is a timestamp in milliseconds.'), source_url: sp('The full URL where the note comes from.'), }, folders: {}, resources: {}, tags: {}, }; const baseItems = ['notes', 'folders', 'tags', 'resources']; for (let i = 0; i < baseItems.length; i++) { const n = baseItems[i]; const singular = n.substr(0, n.length - 1); this.tableDescriptions_[n].title = sp('The %s title.', singular); this.tableDescriptions_[n].created_time = sp('When the %s was created.', singular); this.tableDescriptions_[n].updated_time = sp('When the %s was last updated.', singular); this.tableDescriptions_[n].user_created_time = sp('When the %s was created. It may differ from created_time as it can be manually set by the user.', singular); this.tableDescriptions_[n].user_updated_time = sp('When the %s was last updated. It may differ from updated_time as it can be manually set by the user.', singular); } } const d = this.tableDescriptions_[tableName]; return d && d[fieldName] ? d[fieldName] : ''; } refreshTableFields() { this.logger().info('Initializing tables...'); let queries = []; queries.push(this.wrapQuery('DELETE FROM table_fields')); return this.selectAll('SELECT name FROM sqlite_master WHERE type="table"') .then(tableRows => { let chain = []; for (let i = 0; i < tableRows.length; i++) { let tableName = tableRows[i].name; if (tableName == 'android_metadata') continue; if (tableName == 'table_fields') continue; if (tableName == 'sqlite_sequence') continue; if (tableName.indexOf('notes_fts') === 0) continue; chain.push(() => { return this.selectAll(`PRAGMA table_info("${tableName}")`).then(pragmas => { for (let i = 0; i < pragmas.length; i++) { let item = pragmas[i]; // In SQLite, if the default value is a string it has double quotes around it, so remove them here let defaultValue = item.dflt_value; if (typeof defaultValue == 'string' && defaultValue.length >= 2 && defaultValue[0] == '"' && defaultValue[defaultValue.length - 1] == '"') { defaultValue = defaultValue.substr(1, defaultValue.length - 2); } let q = Database.insertQuery('table_fields', { table_name: tableName, field_name: item.name, field_type: Database.enumId('fieldType', item.type), field_default: defaultValue, }); queries.push(q); } }); }); } return promiseChain(chain); }) .then(() => { return this.transactionExecBatch(queries); }); } addMigrationFile(num) { const timestamp = Date.now(); return { sql: 'INSERT INTO migrations (number, created_time, updated_time) VALUES (?, ?, ?)', params: [num, timestamp, timestamp] }; } async upgradeDatabase(fromVersion) { // INSTRUCTIONS TO UPGRADE THE DATABASE: // // 1. Add the new version number to the existingDatabaseVersions array // 2. Add the upgrade logic to the "switch (targetVersion)" statement below // IMPORTANT: // // Whenever adding a new database property, some additional logic might be needed // in the synchronizer to handle this property. For example, when adding a property // that should have a default value, existing remote items will not have this // default value and thus might cause problems. In that case, the default value // must be set in the synchronizer too. // Note: v16 and v17 don't do anything. They were used to debug an issue. const existingDatabaseVersions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]; let currentVersionIndex = existingDatabaseVersions.indexOf(fromVersion); // currentVersionIndex < 0 if for the case where an old version of Joplin used with a newer // version of the database, so that migration is not run in this case. if (currentVersionIndex < 0) throw new Error('Unknown profile version. Most likely this is an old version of Joplin, while the profile was created by a newer version. Please upgrade Joplin at https://joplinapp.org and try again.'); if (currentVersionIndex == existingDatabaseVersions.length - 1) return fromVersion; let latestVersion = fromVersion; while (currentVersionIndex < existingDatabaseVersions.length - 1) { const targetVersion = existingDatabaseVersions[currentVersionIndex + 1]; this.logger().info(`Converting database to version ${targetVersion}`); let queries = []; if (targetVersion == 1) { queries = this.wrapQueries(this.sqlStringToLines(structureSql)); } if (targetVersion == 2) { const newTableSql = ` CREATE TABLE deleted_items ( id INTEGER PRIMARY KEY, item_type INT NOT NULL, item_id TEXT NOT NULL, deleted_time INT NOT NULL, sync_target INT NOT NULL ); `; queries.push({ sql: 'DROP TABLE deleted_items' }); queries.push({ sql: this.sqlStringToLines(newTableSql)[0] }); queries.push({ sql: 'CREATE INDEX deleted_items_sync_target ON deleted_items (sync_target)' }); } if (targetVersion == 3) { queries = this.alterColumnQueries('settings', { key: 'TEXT PRIMARY KEY', value: 'TEXT' }); } if (targetVersion == 4) { queries.push('INSERT INTO settings (`key`, `value`) VALUES (\'sync.3.context\', (SELECT `value` FROM settings WHERE `key` = \'sync.context\'))'); queries.push('DELETE FROM settings WHERE `key` = "sync.context"'); } if (targetVersion == 5) { const tableNames = ['notes', 'folders', 'tags', 'note_tags', 'resources']; for (let i = 0; i < tableNames.length; i++) { const n = tableNames[i]; queries.push(`ALTER TABLE ${n} ADD COLUMN user_created_time INT NOT NULL DEFAULT 0`); queries.push(`ALTER TABLE ${n} ADD COLUMN user_updated_time INT NOT NULL DEFAULT 0`); queries.push(`UPDATE ${n} SET user_created_time = created_time`); queries.push(`UPDATE ${n} SET user_updated_time = updated_time`); queries.push(`CREATE INDEX ${n}_user_updated_time ON ${n} (user_updated_time)`); } } if (targetVersion == 6) { queries.push('CREATE TABLE alarms (id INTEGER PRIMARY KEY AUTOINCREMENT, note_id TEXT NOT NULL, trigger_time INT NOT NULL)'); queries.push('CREATE INDEX alarm_note_id ON alarms (note_id)'); } if (targetVersion == 7) { queries.push('ALTER TABLE resources ADD COLUMN file_extension TEXT NOT NULL DEFAULT ""'); } if (targetVersion == 8) { queries.push('ALTER TABLE sync_items ADD COLUMN sync_disabled INT NOT NULL DEFAULT "0"'); queries.push('ALTER TABLE sync_items ADD COLUMN sync_disabled_reason TEXT NOT NULL DEFAULT ""'); } if (targetVersion == 9) { const newTableSql = ` CREATE TABLE master_keys ( id TEXT PRIMARY KEY, created_time INT NOT NULL, updated_time INT NOT NULL, source_application TEXT NOT NULL, encryption_method INT NOT NULL, checksum TEXT NOT NULL, content TEXT NOT NULL ); `; queries.push(this.sqlStringToLines(newTableSql)[0]); const tableNames = ['notes', 'folders', 'tags', 'note_tags', 'resources']; for (let i = 0; i < tableNames.length; i++) { const n = tableNames[i]; queries.push(`ALTER TABLE ${n} ADD COLUMN encryption_cipher_text TEXT NOT NULL DEFAULT ""`); queries.push(`ALTER TABLE ${n} ADD COLUMN encryption_applied INT NOT NULL DEFAULT 0`); queries.push(`CREATE INDEX ${n}_encryption_applied ON ${n} (encryption_applied)`); } queries.push('ALTER TABLE sync_items ADD COLUMN force_sync INT NOT NULL DEFAULT 0'); queries.push('ALTER TABLE resources ADD COLUMN encryption_blob_encrypted INT NOT NULL DEFAULT 0'); } const upgradeVersion10 = () => { const itemChangesTable = ` CREATE TABLE item_changes ( id INTEGER PRIMARY KEY AUTOINCREMENT, item_type INT NOT NULL, item_id TEXT NOT NULL, type INT NOT NULL, created_time INT NOT NULL ); `; const noteResourcesTable = ` CREATE TABLE note_resources ( id INTEGER PRIMARY KEY, note_id TEXT NOT NULL, resource_id TEXT NOT NULL, is_associated INT NOT NULL, last_seen_time INT NOT NULL ); `; queries.push(this.sqlStringToLines(itemChangesTable)[0]); queries.push('CREATE INDEX item_changes_item_id ON item_changes (item_id)'); queries.push('CREATE INDEX item_changes_created_time ON item_changes (created_time)'); queries.push('CREATE INDEX item_changes_item_type ON item_changes (item_type)'); queries.push(this.sqlStringToLines(noteResourcesTable)[0]); queries.push('CREATE INDEX note_resources_note_id ON note_resources (note_id)'); queries.push('CREATE INDEX note_resources_resource_id ON note_resources (resource_id)'); queries.push({ sql: 'INSERT INTO item_changes (item_type, item_id, type, created_time) SELECT 1, id, 1, ? FROM notes', params: [Date.now()] }); }; if (targetVersion == 10) { upgradeVersion10(); } if (targetVersion == 11) { // This trick was needed because Electron Builder incorrectly released a dev branch containing v10 as it was // still being developed, and the db schema was not final at that time. So this v11 was created to // make sure any invalid db schema that was accidentally created was deleted and recreated. queries.push('DROP TABLE item_changes'); queries.push('DROP TABLE note_resources'); upgradeVersion10(); } if (targetVersion == 12) { queries.push('ALTER TABLE folders ADD COLUMN parent_id TEXT NOT NULL DEFAULT ""'); } if (targetVersion == 13) { queries.push('ALTER TABLE resources ADD COLUMN fetch_status INT NOT NULL DEFAULT "2"'); queries.push('ALTER TABLE resources ADD COLUMN fetch_error TEXT NOT NULL DEFAULT ""'); queries.push({ sql: 'UPDATE resources SET fetch_status = ?', params: [Resource.FETCH_STATUS_DONE] }); } if (targetVersion == 14) { const resourceLocalStates = ` CREATE TABLE resource_local_states ( id INTEGER PRIMARY KEY, resource_id TEXT NOT NULL, fetch_status INT NOT NULL DEFAULT "2", fetch_error TEXT NOT NULL DEFAULT "" ); `; queries.push(this.sqlStringToLines(resourceLocalStates)[0]); queries.push('INSERT INTO resource_local_states SELECT null, id, fetch_status, fetch_error FROM resources'); queries.push('CREATE INDEX resource_local_states_resource_id ON resource_local_states (resource_id)'); queries.push('CREATE INDEX resource_local_states_resource_fetch_status ON resource_local_states (fetch_status)'); queries = queries.concat( this.alterColumnQueries('resources', { id: 'TEXT PRIMARY KEY', title: 'TEXT NOT NULL DEFAULT ""', mime: 'TEXT NOT NULL', filename: 'TEXT NOT NULL DEFAULT ""', created_time: 'INT NOT NULL', updated_time: 'INT NOT NULL', user_created_time: 'INT NOT NULL DEFAULT 0', user_updated_time: 'INT NOT NULL DEFAULT 0', file_extension: 'TEXT NOT NULL DEFAULT ""', encryption_cipher_text: 'TEXT NOT NULL DEFAULT ""', encryption_applied: 'INT NOT NULL DEFAULT 0', encryption_blob_encrypted: 'INT NOT NULL DEFAULT 0', }) ); } if (targetVersion == 15) { queries.push('CREATE VIRTUAL TABLE notes_fts USING fts4(content="notes", notindexed="id", id, title, body)'); queries.push('INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes WHERE is_conflict = 0 AND encryption_applied = 0'); // Keep the content tables (notes) and the FTS table (notes_fts) in sync. // More info at https://www.sqlite.org/fts3.html#_external_content_fts4_tables_ queries.push(` CREATE TRIGGER notes_fts_before_update BEFORE UPDATE ON notes BEGIN DELETE FROM notes_fts WHERE docid=old.rowid; END;`); queries.push(` CREATE TRIGGER notes_fts_before_delete BEFORE DELETE ON notes BEGIN DELETE FROM notes_fts WHERE docid=old.rowid; END;`); queries.push(` CREATE TRIGGER notes_after_update AFTER UPDATE ON notes BEGIN INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes WHERE is_conflict = 0 AND encryption_applied = 0 AND new.rowid = notes.rowid; END;`); queries.push(` CREATE TRIGGER notes_after_insert AFTER INSERT ON notes BEGIN INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes WHERE is_conflict = 0 AND encryption_applied = 0 AND new.rowid = notes.rowid; END;`); } if (targetVersion == 18) { const notesNormalized = ` CREATE TABLE notes_normalized ( id TEXT NOT NULL, title TEXT NOT NULL DEFAULT "", body TEXT NOT NULL DEFAULT "" ); `; queries.push(this.sqlStringToLines(notesNormalized)[0]); queries.push('CREATE INDEX notes_normalized_id ON notes_normalized (id)'); queries.push('DROP TRIGGER IF EXISTS notes_fts_before_update'); queries.push('DROP TRIGGER IF EXISTS notes_fts_before_delete'); queries.push('DROP TRIGGER IF EXISTS notes_after_update'); queries.push('DROP TRIGGER IF EXISTS notes_after_insert'); queries.push('DROP TABLE IF EXISTS notes_fts'); queries.push('CREATE VIRTUAL TABLE notes_fts USING fts4(content="notes_normalized", notindexed="id", id, title, body)'); // Keep the content tables (notes) and the FTS table (notes_fts) in sync. // More info at https://www.sqlite.org/fts3.html#_external_content_fts4_tables_ queries.push(` CREATE TRIGGER notes_fts_before_update BEFORE UPDATE ON notes_normalized BEGIN DELETE FROM notes_fts WHERE docid=old.rowid; END;`); queries.push(` CREATE TRIGGER notes_fts_before_delete BEFORE DELETE ON notes_normalized BEGIN DELETE FROM notes_fts WHERE docid=old.rowid; END;`); queries.push(` CREATE TRIGGER notes_after_update AFTER UPDATE ON notes_normalized BEGIN INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes_normalized WHERE new.rowid = notes_normalized.rowid; END;`); queries.push(` CREATE TRIGGER notes_after_insert AFTER INSERT ON notes_normalized BEGIN INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes_normalized WHERE new.rowid = notes_normalized.rowid; END;`); } if (targetVersion == 19) { const newTableSql = ` CREATE TABLE revisions ( id TEXT PRIMARY KEY, parent_id TEXT NOT NULL DEFAULT "", item_type INT NOT NULL, item_id TEXT NOT NULL, item_updated_time INT NOT NULL, title_diff TEXT NOT NULL DEFAULT "", body_diff TEXT NOT NULL DEFAULT "", metadata_diff TEXT NOT NULL DEFAULT "", encryption_cipher_text TEXT NOT NULL DEFAULT "", encryption_applied INT NOT NULL DEFAULT 0, updated_time INT NOT NULL, created_time INT NOT NULL ); `; queries.push(this.sqlStringToLines(newTableSql)[0]); queries.push('CREATE INDEX revisions_parent_id ON revisions (parent_id)'); queries.push('CREATE INDEX revisions_item_type ON revisions (item_type)'); queries.push('CREATE INDEX revisions_item_id ON revisions (item_id)'); queries.push('CREATE INDEX revisions_item_updated_time ON revisions (item_updated_time)'); queries.push('CREATE INDEX revisions_updated_time ON revisions (updated_time)'); queries.push('ALTER TABLE item_changes ADD COLUMN source INT NOT NULL DEFAULT 1'); queries.push('ALTER TABLE item_changes ADD COLUMN before_change_item TEXT NOT NULL DEFAULT ""'); } if (targetVersion == 20) { const newTableSql = ` CREATE TABLE migrations ( id INTEGER PRIMARY KEY, number INTEGER NOT NULL, updated_time INT NOT NULL, created_time INT NOT NULL ); `; queries.push(this.sqlStringToLines(newTableSql)[0]); queries.push('ALTER TABLE resources ADD COLUMN `size` INT NOT NULL DEFAULT -1'); queries.push(this.addMigrationFile(20)); } if (targetVersion == 21) { queries.push('ALTER TABLE sync_items ADD COLUMN item_location INT NOT NULL DEFAULT 1'); } if (targetVersion == 22) { const newTableSql = ` CREATE TABLE resources_to_download ( id INTEGER PRIMARY KEY, resource_id TEXT NOT NULL, updated_time INT NOT NULL, created_time INT NOT NULL ); `; queries.push(this.sqlStringToLines(newTableSql)[0]); queries.push('CREATE INDEX resources_to_download_resource_id ON resources_to_download (resource_id)'); queries.push('CREATE INDEX resources_to_download_updated_time ON resources_to_download (updated_time)'); } if (targetVersion == 23) { const newTableSql = ` CREATE TABLE key_values ( id INTEGER PRIMARY KEY, \`key\` TEXT NOT NULL, \`value\` TEXT NOT NULL, \`type\` INT NOT NULL, updated_time INT NOT NULL ); `; queries.push(this.sqlStringToLines(newTableSql)[0]); queries.push('CREATE UNIQUE INDEX key_values_key ON key_values (key)'); } if (targetVersion == 24) { queries.push('ALTER TABLE notes ADD COLUMN `markup_language` INT NOT NULL DEFAULT 1'); // 1: Markdown, 2: HTML } if (targetVersion == 25) { queries.push(`CREATE VIEW tags_with_note_count AS SELECT tags.id as id, tags.title as title, tags.created_time as created_time, tags.updated_time as updated_time, COUNT(notes.id) as note_count FROM tags LEFT JOIN note_tags nt on nt.tag_id = tags.id LEFT JOIN notes on notes.id = nt.note_id WHERE notes.id IS NOT NULL GROUP BY tags.id`); } if (targetVersion == 26) { const tableNames = ['notes', 'folders', 'tags', 'note_tags', 'resources']; for (let i = 0; i < tableNames.length; i++) { const n = tableNames[i]; queries.push(`ALTER TABLE ${n} ADD COLUMN is_shared INT NOT NULL DEFAULT 0`); } } if (targetVersion == 27) { queries.push(this.addMigrationFile(27)); } queries.push({ sql: 'UPDATE version SET version = ?', params: [targetVersion] }); try { await this.transactionExecBatch(queries); } catch (error) { if (targetVersion === 15 || targetVersion === 18) { this.logger().warn('Could not upgrade to database v15 or v18 - FTS feature will not be used', error); } else { throw error; } } latestVersion = targetVersion; currentVersionIndex++; } return latestVersion; } async ftsEnabled() { try { await this.selectOne('SELECT count(*) FROM notes_fts'); } catch (error) { this.logger().warn('FTS check failed', error); return false; } this.logger().info('FTS check succeeded'); return true; } version() { return this.version_; } async initialize() { this.logger().info('Checking for database schema update...'); let versionRow = null; try { // Will throw if the database has not been created yet, but this is handled below versionRow = await this.selectOne('SELECT * FROM version LIMIT 1'); } catch (error) { if (error.message && error.message.indexOf('no such table: version') >= 0) { // Ignore } else { console.info(error); } } const version = !versionRow ? 0 : versionRow.version; this.version_ = version; this.logger().info('Current database version', version); const newVersion = await this.upgradeDatabase(version); this.version_ = newVersion; if (newVersion !== version) await this.refreshTableFields(); this.tableFields_ = {}; let rows = await this.selectAll('SELECT * FROM table_fields'); for (let i = 0; i < rows.length; i++) { let row = rows[i]; if (!this.tableFields_[row.table_name]) this.tableFields_[row.table_name] = []; this.tableFields_[row.table_name].push({ name: row.field_name, type: row.field_type, default: Database.formatValue(row.field_type, row.field_default), }); } } } Database.TYPE_INT = 1; Database.TYPE_TEXT = 2; Database.TYPE_NUMERIC = 3; module.exports = { JoplinDatabase };
1
11,293
Please use packageInfo here instead. As it is used in `app.js`.
laurent22-joplin
js
@@ -49,7 +49,10 @@ public class AnalysisResult { // the analysis will fail and report the error on it's own since the checksum won't match } - return 0; + // we couldn't read the file, maybe the file doesn't exist + // in any case, we can't use the cache. Returning here the timestamp should make + // sure, we see that the file changed every time we analyze it. + return System.currentTimeMillis(); } public long getFileChecksum() {
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.cache; import java.io.BufferedInputStream; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; import java.util.List; import java.util.zip.Adler32; import java.util.zip.CheckedInputStream; import org.apache.commons.io.IOUtils; import net.sourceforge.pmd.RuleViolation; /** * The result of a single file analysis. * Includes a checksum of the file and the complete list of violations detected. */ public class AnalysisResult { private final long fileChecksum; private final List<RuleViolation> violations; public AnalysisResult(final long fileChecksum, final List<RuleViolation> violations) { this.fileChecksum = fileChecksum; this.violations = violations; } public AnalysisResult(final File sourceFile) { this(computeFileChecksum(sourceFile), new ArrayList<RuleViolation>()); } private static long computeFileChecksum(final File sourceFile) { try ( CheckedInputStream stream = new CheckedInputStream( new BufferedInputStream(Files.newInputStream(sourceFile.toPath())), new Adler32()); ) { // Just read it, the CheckedInputStream will update the checksum on it's own IOUtils.skipFully(stream, sourceFile.length()); return stream.getChecksum().getValue(); } catch (final IOException ignored) { // We don't really care, if it's unreadable // the analysis will fail and report the error on it's own since the checksum won't match } return 0; } public long getFileChecksum() { return fileChecksum; } public List<RuleViolation> getViolations() { return violations; } public void addViolations(final List<RuleViolation> violations) { this.violations.addAll(violations); } public void addViolation(final RuleViolation ruleViolation) { this.violations.add(ruleViolation); } }
1
16,422
this seems unrelated to the rest of the PR, although related to the original report
pmd-pmd
java
@@ -11386,7 +11386,7 @@ NABoolean HbaseAccess::isHbaseFilterPredV2(Generator * generator, ItemExpr * ie, } //check if not an added column with default non null if ((foundBinary || foundUnary)&& (NOT hbaseLookupPred)){ - if (colVID.isAddedColumnWithNonNullDefault()){ + if (colVID.isColumnWithNonNullDefault()){ foundBinary=FALSE; foundUnary=FALSE; }
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ***************************************************************************** * * File: GenPreCode.C * Description: Fixes up the query tree before code generation. * This is the post-opt and pre-gen stage. * Created: 4/15/95 * Language: C++ * * ***************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Platform.h" #include <math.h> #include "OperTypeEnum.h" #include "Sqlcomp.h" #include "GroupAttr.h" #include "AllRelExpr.h" #include "RelPackedRows.h" #include "ReadTableDef.h" #include "Generator.h" #include "GenExpGenerator.h" #include "dfs2rec.h" #include "vegrewritepairs.h" #include "exp_clause_derived.h" #include "keycolumns.h" #include "ValueDesc.h" #include "BindWA.h" #include "TriggerDB.h" #include "Cost.h" #include "CostMethod.h" #include "ItmFlowControlFunction.h" #include "UdfDllInteraction.h" #include "NATable.h" #include "NumericType.h" #include "CmpStatement.h" #include "OptimizerSimulator.h" #include "ItemFunc.h" #include "ControlDB.h" #include "CmpSeabaseDDL.h" #include "NAExecTrans.h" #include "exp_function.h" #include "SqlParserGlobals.h" // must be last #include extern ItemExpr * buildComparisonPred ( ItemExpr *, ItemExpr *, ItemExpr *, OperatorTypeEnum, NABoolean specialNulls=FALSE //++MV - Irena ); // ----------------------------------------------------------------------- // generateKeyExpr() // // This method is used by the code generator for building expressions // that are of the form <key column> = <value> for each key column. // // Parameters: // // const ValueIdSet & externalInputs // IN : The set of values that are available here and can be // used for replacing any wildcards that appear in the // listOfKeyValues. // // const ValueIdList & listOfKeyColumns // IN : A read-only reference to the list of key columns // corresponding to which certain key values have // been chosen. // // const ValueIdList & listOfKeyValues // IN : A read-only reference to a list of key values that // are chosen for the corresponding listOfKeyColumns. // Values for missing key columns have already been // computed and supplied in this list. // // ValueIdList & listOfKeyExpr // OUT: An assignment expression of the form <key column> = <value> // for each key column. // // ----------------------------------------------------------------------- static void generateKeyExpr(const ValueIdSet & externalInputs, const ValueIdList & listOfKeyColumns, const ValueIdList & listOfKeyValues, ValueIdList & listOfKeyExpr, Generator* generator, NABoolean replicatePredicates = FALSE) { ItemExpr * keyExpr; CollIndex keyCount = listOfKeyColumns.entries(); for (CollIndex keyNum = 0; keyNum < keyCount; keyNum++) { // Build the assignment expression. ItemExpr *ieKeyVal = listOfKeyValues[keyNum].getItemExpr() ->replaceVEGExpressions(externalInputs, externalInputs, FALSE, NULL, replicatePredicates); ItemExpr *ieKeyCol = listOfKeyColumns[keyNum].getItemExpr(); ValueId KeyColId = ieKeyCol->getValueId(); keyExpr = new(generator->wHeap()) BiRelat(ITM_EQUAL, ieKeyCol, ieKeyVal); // Synthesize its type for and assign a ValueId to it. keyExpr->synthTypeAndValueId(); // INsert it in the list of key expressions listOfKeyExpr.insertAt(keyNum, keyExpr->getValueId()); } // end For Loop } // static generateKeyExpr() static NABoolean processConstHBaseKeys(Generator * generator, RelExpr *relExpr, const SearchKey *skey, const IndexDesc *idesc, const ValueIdSet &executorPreds, NAList<HbaseSearchKey*> &mySearchKeys, ListOfUniqueRows &listOfUpdUniqueRows, ListOfRangeRows &listOfUpdSubsetRows) { if (! skey) return TRUE; // convert built-in search key to entries with constants, if possible if (skey->areAllKeysConstants(TRUE)) { ValueIdSet nonKeyColumnSet; idesc->getNonKeyColumnSet(nonKeyColumnSet); // seed keyPreds with only the full key predicate from skey ValueIdSet keyPreds = skey->getFullKeyPredicates(); // include executorPreds and selection predicates // but exclude the full key predicates. ValueIdSet exePreds; exePreds += executorPreds; exePreds += relExpr->getSelectionPred(); exePreds.subtractSet(keyPreds); HbaseSearchKey::makeHBaseSearchKeys( skey, skey->getIndexDesc()->getIndexKey(), skey->getIndexDesc()->getOrderOfKeyValues(), relExpr->getGroupAttr()->getCharacteristicInputs(), TRUE, /* forward scan */ keyPreds, nonKeyColumnSet, idesc, relExpr->getGroupAttr()->getCharacteristicOutputs(), mySearchKeys); // Include any remaining key predicates that have not been // picked up (to be used as the HBase search keys). exePreds += keyPreds; TableDesc *tdesc = NULL; if (mySearchKeys.entries()>0) { switch (relExpr->getOperatorType()) { case REL_HBASE_ACCESS: { HbaseAccess *hba = static_cast<HbaseAccess *>(relExpr); hba->setSearchKey(NULL); hba->executorPred() = exePreds; tdesc = hba->getTableDesc(); } break; case REL_HBASE_DELETE: { HbaseDelete *hbd = static_cast<HbaseDelete *>(relExpr); hbd->setSearchKey(NULL); hbd->beginKeyPred().clear(); hbd->endKeyPred().clear(); hbd->executorPred() = exePreds; tdesc = hbd->getTableDesc(); } break; case REL_HBASE_UPDATE: { HbaseUpdate *hbu = static_cast<HbaseUpdate *>(relExpr); hbu->setSearchKey(NULL); hbu->beginKeyPred().clear(); hbu->endKeyPred().clear(); hbu->executorPred() = exePreds; tdesc = hbu->getTableDesc(); } break; default: CMPASSERT(tdesc); // unsupported operator type break; } // switch relExpr->selectionPred().clear(); } if (HbaseAccess::processSQHbaseKeyPreds(generator, mySearchKeys, listOfUpdUniqueRows, listOfUpdSubsetRows)) return FALSE; } // key uses all constants return TRUE; } // // replaceVEGExpressions1() - a helper routine for ItemExpr::replaceVEGExpressions() // // NOTE: The code in this routine came from the previous version of // ItemExpr::replaceVEGExpressions(). It has been pulled out // into a separate routine so that the C++ compiler will produce // code that needs signficantly less stack space for the // recursive ItemExpr::replaceVEGExpressions() routine. // ItemExpr * ItemExpr::replaceVEGExpressions1( VEGRewritePairs* lookup ) { // see if this expression is already in there ValueId rewritten; if (lookup->getRewritten(rewritten /* out */, getValueId())) { if (rewritten == NULL_VALUE_ID) return NULL; else return rewritten.getItemExpr(); } return (ItemExpr *)( (char *)(NULL) -1 ) ; } // // replaceVEGExpressions2() - a helper routine for ItemExpr::replaceVEGExpressions() // // NOTE: The code in this routine came from the previous version of // ItemExpr::replaceVEGExpressions(). It has been pulled out // into a separate routine so that the C++ compiler will produce // code that needs signficantly less stack space for the // recursive ItemExpr::replaceVEGExpressions() routine. // void ItemExpr::replaceVEGExpressions2( Int32 index , const ValueIdSet& availableValues , const ValueIdSet& inputValues , ValueIdSet& currAvailableValues , const GroupAttributes * left_ga , const GroupAttributes * right_ga ) { // If we have asked that the EquiPredicate resolve // each child of the equipred by available values from the // respectively input GAs, make sure we pick the right one. // First we find out what GA covers the current EquiPred child // we are processing (0 or 1), and pick the one that covers, unless // both GAs do. If both GAs cover, the just make sure we pick a // different one for each child. The hash join will later fix up // the predicate expression to match its children. // If none of the GAs covers, we have a problem... // This fix was put in to solve solution: 10-100722-1962 ValueIdSet dummy; NABoolean leftGaCovers = left_ga->covers(child(index)->getValueId(), inputValues, dummy); NABoolean rightGaCovers = right_ga->covers(child(index)->getValueId(), inputValues, dummy); if (leftGaCovers == FALSE && rightGaCovers == FALSE) { // for the moment it is assumed that this code is only // executed for hash and merge joins, and in general each // side of the expression should be coverd by a child. // So if we have neither, we have a problem .. cout << "Unable to pick GA to use: " << getArity() << endl; CMPASSERT(FALSE); } else { const GroupAttributes *coveringGa = NULL; currAvailableValues.clear(); currAvailableValues += inputValues; if (leftGaCovers && rightGaCovers) coveringGa = (index == 0 ? left_ga : right_ga); else coveringGa = (leftGaCovers ? left_ga : right_ga); currAvailableValues += coveringGa->getCharacteristicOutputs(); } } // ----------------------------------------------------------------------- // ItemExpr::replaceVEGExpressions() // It performs a top-down, left-to-right tree walk in the ItemExpr tree // and expands any wildcards (VEGReference or VEGPredicate expressions) // by replacing them with an expression that belongs to the // availableValues. // IF isKeyPredicate is TRUE then the ItemExpr is a KeyPredicate: // A KeyPredicate is of a restricted form. If we are here it is // because the predicate is a KeyPredicate. Then, it must satisfy // very specific characteristics (see Key::isAKeyPredicate(...)) // for instance, one of its sides must be a key column // This method *guarantees* that a key predicate will be // generated from the rewritten predicate (i.e. we avoid // cases like VegRef{T1.A, 2} > 7 being generated like // 2 > 7 when T1.A is a key column. // ----------------------------------------------------------------------- ItemExpr * ItemExpr::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean thisIsAnMdamKeyPredicate, VEGRewritePairs* lookup, NABoolean replicateExpression, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * left_ga, const GroupAttributes * right_ga) { // --------------------------------------------------------------------- // If this expression has already been resolved because it exists in // availableValues, the replacement of VEGReferences is not required. // --------------------------------------------------------------------- if (availableValues.contains(getValueId())) return this; // terminate processing ItemExpr* iePtr = this; if (lookup && replicateExpression) // if lookup table is present { ItemExpr* tmpIePtr = ItemExpr::replaceVEGExpressions1( lookup ) ; if ( tmpIePtr != (ItemExpr *)( (char *)(NULL) -1 ) ) return tmpIePtr ; }; if (replicateExpression) iePtr = copyTopNode(0, CmpCommon::statementHeap()); // virtual copy constructor // ----------------------------------------------------------------------- // In the case of mdam key predicates we need to be careful with // binary operators whose child is a VegRef that contains both a // key column and a constant because the rewrite logic for VEGRef // favors the generation of constants over other ItemExprs. In // MDAM we *need* to generate the key column and not the constant. // With the gated logic below we ensure this. // ----------------------------------------------------------------------- if (thisIsAnMdamKeyPredicate) { #if DEBUG // at the moment it is assumed the left and right ga's are only // used for hash/merge joins equijoin predicates and with the // mdamKeyPredicate flag turned off. If this assumption is no longer // true we need to add some additional code in this "if" clause. GENASSERT(left_ga == NULL && right_ga == NULL); #endif switch (getArity()) { case 0: // const, VEGRef, and VEGPred have arity 0 break; // If it reached here it means that // the ItemExpr does not need to do any special // processing for this operator (i.e. a constant) // VEG predicates should never reach here case 1: // Example: T1.A IS NULL { ItemExpr *newChild; // the child must be a key column: newChild = child(0)->replaceVEGExpressions(availableValues ,inputValues ,TRUE // no constants! ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); if (newChild != iePtr->child(0)) { if (replicateExpression) iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap()); iePtr->child(0) = newChild; } } break; case 2: case 3: { // Rewrite children (one of them MUST be a key column, the // other MUST be a constant or a host var) ItemExpr *leftChild = NULL, *rightChild = NULL, *thirdChild = NULL; OperatorTypeEnum newOperType = getOperatorType(); if ((child(0)->getOperatorType() == ITM_VEG_REFERENCE) OR (child(1)->getOperatorType() == ITM_VEG_REFERENCE)) { //--------------------------------------------------------- // Assume we have an expression of // the form VegRef{T1.A, 2} > 7 //------------------------------------------------------ // Force the generation of a key column by // telling replacevegexprs not to generate them: leftChild = child(0)->replaceVEGExpressions(availableValues ,inputValues ,TRUE // want key col ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); // generate a constant in this branch rightChild = child(1)->replaceVEGExpressions(availableValues ,inputValues ,FALSE // want constant ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); // However, the above will fail if the predicate is // of the form // 7 < VegRef{T1.A,2}, thus, if it failed, redrive with // the roles reversed: if (leftChild == NULL OR rightChild == NULL) { leftChild = child(1)->replaceVEGExpressions(availableValues ,inputValues ,TRUE // want constant ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); rightChild = child(0)->replaceVEGExpressions(availableValues ,inputValues ,FALSE // want key col ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); // We have reversed the operands, reverse // the operator if it is a greater/eq BiRelat operator: switch(getOperatorType()) { case ITM_LESS: case ITM_LESS_EQ: case ITM_GREATER: case ITM_GREATER_EQ: // need to reverse! newOperType = ((BiRelat*)iePtr)->getReverseOperatorType(); break; } } // if need to reverse operands // now we must have succeeded! CMPASSERT(leftChild != NULL && rightChild != NULL); } // if one of the children of the operator is a reference else { // No children are references, normal rewrite: leftChild = child(0)->replaceVEGExpressions(availableValues, inputValues, FALSE, // constants OK lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); rightChild = child(1)->replaceVEGExpressions(availableValues, inputValues, FALSE, // constants OK lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); CMPASSERT(leftChild != NULL && rightChild != NULL); } if (getArity() == 3) { // rewrite the exclusion part of the PA key predicate: thirdChild = child(2)->replaceVEGExpressions(availableValues, inputValues, thisIsAnMdamKeyPredicate, lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); } if (iePtr->child(0) != (void *)leftChild OR iePtr->child(1) != (void *)rightChild OR (thirdChild AND iePtr->child(2) != (void *)thirdChild) OR iePtr->getOperatorType() != newOperType) { // we have to change data members, make a copy of the // node if other users may share this node if (replicateExpression) iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap()); // Set the left and right children of the iePtr // to their rewritten nodes: // $$ What happens to all those nodes that were // $$ replicated and the rewrite failed? iePtr->child(0) = leftChild; iePtr->child(1) = rightChild; if (thirdChild) iePtr->child(2) = thirdChild; iePtr->setOperatorType(newOperType); } break; } // case 2, case 3 default: // $$ modify this when predicates of arity > 3 come into // $$ existance cout << "Invalid arity: " << getArity() << endl; CMPASSERT(FALSE); // No predicates of arity > 3 (so far) } } else // ItemExpr is not an mdam key predicate, go ahead with the rewrite: for (Lng32 index = 0; index < getArity(); index++) { ValueIdSet currAvailableValues(availableValues); if (left_ga != NULL && right_ga != NULL && getArity() == 2 ) { ItemExpr::replaceVEGExpressions2( index , availableValues , inputValues , currAvailableValues , left_ga , right_ga ) ; } ItemExpr *newChild = child(index)->replaceVEGExpressions( currAvailableValues, inputValues, FALSE, // this is not a key predicate lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); if ( newChild->isPreCodeGenNATypeChanged()) iePtr->setpreCodeGenNATypeChangeStatus(); // is the result a different ItemExpr or does iePtr not point to // the (possibly unchanged) result yet? if (iePtr->child(index) != (void *)newChild) { if (iePtr == this AND replicateExpression) { // don't change "this" if it may be shared, make a // copy instead and also copy the unchanged children // so far iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap()); for (Int32 j = 0; j < index; j++) iePtr->child(j) = this->child(j); } iePtr->child(index) = newChild; } } if(lookup && replicateExpression && iePtr != this) { iePtr->synthTypeAndValueId(FALSE); lookup->insert(getValueId(), iePtr->getValueId()); } return iePtr; } // ItemExpr::replaceVEGExpressions() // ----------------------------------------------------------------------- // ValueIdUnion::replaceVEGExpressions() // The parameter replicateExpression is ignored because the // ValueIdUnion implements a special policy for rewriting // an ItemExpr, in that it manages three sets of values. // ----------------------------------------------------------------------- ItemExpr * ValueIdUnion::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean thisIsAnMdamKeyPredicate, VEGRewritePairs* lookup, NABoolean replicateExpression, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * left_ga, const GroupAttributes * right_ga) { CMPASSERT(NOT thisIsAnMdamKeyPredicate); // sanity check // we are ignoring the replicateExpression and // joinInputAndPotentialOutput flags .. ValueIdUnion * viduPtr = (ValueIdUnion *)this; // --------------------------------------------------------------------- // If this expression has already been resolved because it exists in // availableValues, the replacement of VEGExpressions is not required. // --------------------------------------------------------------------- if (availableValues.contains(getValueId()) ) return this; for(CollIndex i = 0; i < entries(); i++) { viduPtr-> #pragma nowarn(1506) // warning elimination setSource(i, #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination (viduPtr->getSource(i).getItemExpr() #pragma warn(1506) // warning elimination ->replaceVEGExpressions(availableValues,inputValues, thisIsAnMdamKeyPredicate,lookup, FALSE, /* replicateExpression default */ NULL,/*joinInputAndPotentialOutput default*/ iDesc, left_ga, right_ga)) ->getValueId()); } // If the result is not this ValueIdUnion if (viduPtr->getResult() != viduPtr->getValueId()) viduPtr->setResult((viduPtr->getResult().getItemExpr() ->replaceVEGExpressions(availableValues, inputValues, thisIsAnMdamKeyPredicate, lookup, FALSE,/*replicateExpression*/ NULL, /*joinInputAndPotentialOutput*/ iDesc, left_ga, right_ga)) ->getValueId()); return this; } // ValueIdUnion::replaceVEGExpressions() // ----------------------------------------------------------------------- // VEGPredicate::replaceVEGExpressions() // The parameter replicateExpression is ignored because the // VEGPredicate implements a special policy for rewriting // an ItemExpr. The policies are implemented by replaceVEGPredicate(). // ----------------------------------------------------------------------- ItemExpr * VEGPredicate::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean /* thisIsAnMdamKeyPredicate*/, VEGRewritePairs* lookup, NABoolean /*replicateExpression*/, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * /* left_ga */, const GroupAttributes * /* right_ga */) { // we ignore the thisIsAnMdamKeyPredicate flag, and so we also ignore the // iDesc for VEGPredicates. No need to guarantee a keyColumn. return replaceVEGPredicate(availableValues,inputValues,lookup,joinInputAndPotentialOutput); } // VEGPredicate::replaceVEGExpressions() // ----------------------------------------------------------------------- // VEGReference::replaceVEGExpressions() // The parameter replicateExpression is ignored because the // VEGReference implements a special policy for rewriting // an ItemExpr. The policies are implemented by replaceVEGReference(). // ----------------------------------------------------------------------- ItemExpr * VEGReference::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean thisIsAnMdamKeyPredicate, VEGRewritePairs* /*lookup*/, NABoolean /*replicateExpression*/, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * /* left_ga */ , const GroupAttributes * /* right_ga */ ) { // we ignore the replicateExpression, lookup and // joinInputAndPotentialOutput parameters. return replaceVEGReference(availableValues,inputValues, thisIsAnMdamKeyPredicate, iDesc); } // VEGReference::replaceVEGExpressions() // ----------------------------------------------------------------------- // ItemExpr::replaceOperandsOfInstantiateNull() // This method is used by the code generator for replacing the // operands of an ITM_INSTANTIATE_NULL with a value that belongs // to availableValues. // ----------------------------------------------------------------------- void ItemExpr::replaceOperandsOfInstantiateNull( const ValueIdSet & availableValues, const ValueIdSet & inputValues) { switch (getOperatorType()) { case ITM_INSTANTIATE_NULL: { child(0) = child(0)->replaceVEGExpressions(availableValues,inputValues); break; } default: { for (Lng32 i = 0; i < getArity(); i++) { child(i) = child(i)->replaceVEGExpressions(availableValues, inputValues); } break; } } } // ItemExpr::replaceOperandsOfInstantiateNull() // ----------------------------------------------------------------------- // VEG::setBridgeValue() // ----------------------------------------------------------------------- void VEG::setBridgeValue(const ValueId & bridgeValueId) { bridgeValues_ += bridgeValueId; } // VEG::setBridgeValue() // ----------------------------------------------------------------------- // VEG::markAsReferenced() // Add a member of the set to the referenced values set to indicate // that it has been used (at least once) in a "=" predicate that // was generated by the code generator. // ----------------------------------------------------------------------- void VEG::markAsReferenced(const ValueId & vid) { referencedValues_ += vid; switch (vid.getItemExpr()->getOperatorType()) { case ITM_INDEXCOLUMN: // Also add the ValueId of the column from the base table, which is // used as the key column for an index. referencedValues_ += ((IndexColumn *)(vid.getItemExpr())) ->getDefinition(); break; default: break; } } // VEG::markAsReferenced() // ----------------------------------------------------------------------- // VEGPredicate::replaceVEGPredicate // // This method is used by the code generator for replacing a // reference to a VEGPredicate with an tree of equality predicates. // Each equality predicate is between two values that belong to // the VEG as well as to availableValues. // // Terminology : // *********** // VEG // A ValueId Equality Group. It is a set of values such that its members // have an equality predicate specified on them. // // availableValues // This is the set of values that are available at the relational operator // with which the VEGPredicate is associated. It is usually the set union // of the Charactersitic Inputs of the operator with the Characteristic // Outputs of each of its children. // // inputValues // This is the set of values that is being provided to this node // from above, and therefore is constant for each invocation of // the operator when executing. // This are good values to use to build key predicates. // // bridgeValues // This is a set of values for which "=" predicates MUST be generated // for correctness as well as to guarantee that transitivity is upheld. // For example, the following query: // // select ax, by, cx, dy // from (select A.x, B.y from A join B on A.x = B.y) T1(ax,by) // join (select C.x, D.y from C join D on C.x = D.y) T2(cx,dy) // on T1.ax = T2.cx // // shows two "islands" (self-contained pool of rows) defined by the // derived tables T1 and T2 respectively. It is possible to deduce // that A.x = D.y only after the predicate A.x = C.x has been applied. // The values A.x, C.x establish the transitivity between the two // islands. Such values are called inter-island links or bridge values. // // referencedValues // A subset of the members of the VEG. Each member in this set is // referenced in at least one "=" predicate that was generated by // a call to replaceVEGPredicate. // // unboundValues // The unbound values of a VEG are those that require an "=" // predicate to be generated between them. It is given by // bridge values union available values intersect members of the VEG. // // Note that if the outputs of the join have already been resolved then // joinInputAndPotentialOutput should really be joinInputAndOutputValues. // All potential output values are no longer available, only the resolved // values. Please see similar comment in Hashjoin::PrecodeGen. // ----------------------------------------------------------------------- ItemExpr * VEGPredicate::replaceVEGPredicate(const ValueIdSet& origAvailableValues, const ValueIdSet& origInputValues, VEGRewritePairs* lookup, const ValueIdSet * joinInputAndPotentialOutput) { // If we want processing to be idempotent, check to see if we have // already written this VEGPredicate. And if so, return the rewritten // result. if (lookup) // if lookup table is present { // see if this expression is already in there ValueId rewritten; if (lookup->getRewritten(rewritten /* out */,getValueId())) { if (rewritten == NULL_VALUE_ID) return NULL; else return rewritten.getItemExpr(); } }; // We assume that inputValues is a (perhaps improper) subset of // available values. Verify this. ValueIdSet scratchPad; scratchPad = origInputValues; scratchPad -= origAvailableValues; GenAssert(scratchPad.isEmpty(),"NOT scratchPad.isEmpty()"); // Replace VEGReferences in the members of this VEG. // Copy values in the set and expand wild cards in the copy. ValueIdSet vegMembers; vegMembers.replaceVEGExpressionsAndCopy(getVEG()->getAllValues()); // Constants are not passed as input values but they are available. // Have availableValues and availableInputs contain the VEG members // that are constant values. ValueIdSet availableValues = origAvailableValues; ValueIdSet inputValues = origInputValues; ValueIdSet vegConstants; vegMembers.getConstants(vegConstants); availableValues += vegConstants; inputValues += vegConstants; // If each member of this VEG is referenced in at least one "=" predicate // that was generated here and there is only one "unbound" value remaining, // then we are done. Terminate the generation of more "=" predicates. if ( (vegMembers == getVEG()->getReferencedValues()) AND (getVEG()->getBridgeValues().entries() < 2) ) return NULL; ItemExpr * rootPtr = NULL; // We can only bind those values that are available here. ValueIdSet valuesToBeBound = vegMembers; valuesToBeBound.intersectSet(availableValues); ValueIdSet unReferencedValues = vegMembers; unReferencedValues -= getVEG()->getReferencedValues(); // Compute the set of values that are available, but // are already referenced and are not a bridge value. scratchPad = valuesToBeBound; scratchPad -= unReferencedValues; scratchPad -= getVEG()->getBridgeValues(); valuesToBeBound -= scratchPad; // look for an invariant among the input values ValueIdSet vegInputs = valuesToBeBound; vegInputs.intersectSet(inputValues); // If we didn't have any input values that were a member of the // VEG then pick the invariant from the bridge Values if (vegInputs.isEmpty()) { vegInputs = valuesToBeBound; vegInputs.intersectSet(getVEG()->getBridgeValues()); } // If no input values are part of the VEG and there are // no available bridge value then just pick any of the // remaining (unreferenced) values if (vegInputs.isEmpty()) { vegInputs = valuesToBeBound; } // look for an invariant value ValueId iterExprId, invariantExprId; NABoolean invariantChosen = FALSE; if (NOT vegInputs.isEmpty()) { for (invariantExprId = vegInputs.init(); vegInputs.next(invariantExprId); vegInputs.advance(invariantExprId)) { //check if the item expr is a non-strict constant //a strict constant is somethine like cos(1) //where as cos(?p) can be considered a constant //in the non-strict definition since it remains //constant for a given execution of a query - Solution 10-020912-1647 if (invariantExprId.getItemExpr()->doesExprEvaluateToConstant(FALSE)) { invariantChosen = TRUE; break; } } // endfor // if invariantExprId does not contain the ValueId of a constant value, // then it must be initialized to contain any one value from // the input values. if (NOT invariantChosen) { if (vegInputs.entries() <= 1) vegInputs.getFirst(invariantExprId); else { // The EXISTS query reported in case 10-091027-8459, soln // 10-091028-5770 exposed a flaw in this code that used to // implicitly assume that the first element of vegInputs is // always a valid choice for an invariantExprId. When replacing // a semijoin's VEGPredicate, the invariantExprId must be a // member of that semijoin's characteristic output. Otherwise, // *Join::preCodeGen hjp.replaceVEGExpressions() will silently // delete that equijoin predicate and incorrectly generate a // cartesian product. scratchPad = vegInputs; if (joinInputAndPotentialOutput) { // for an outer join, joinInputAndPotentialOutput will have // instantiate_null wrappers. intersectSetDeep digs into // those wrappers. scratchPad.intersectSetDeep(*joinInputAndPotentialOutput); } #ifdef _DEBUG // we want to GenAssert here but regress/core/test027 raises // a false alarm. So, for now, we don't. // GenAssert(!scratchPad.isEmpty(),"vegInputs.isEmpty()"); #endif if (scratchPad.isEmpty()) vegInputs.getFirst(invariantExprId); else scratchPad.getFirst(invariantExprId); } } // remove it from further consideration valuesToBeBound -= invariantExprId; } // endif (NOT vegInputs.isEmpty()) else // have no values { // The predicate pushdown logic places predicates on those // operators where it knows that values will be available // for evaluating the predicate. // If you have reached this point because of a bug, // **************************************************************** // DO NOT EVEN CONSIDER COMMENTING OUT THE FOLLOWING ASSERT. // **************************************************************** GenAssert(NOT valuesToBeBound.isEmpty(),"valuesToBeBound.isEmpty()"); // **************************************************************** // YOU WILL BE DELIBERATELY MASKING OUT A SERIOUS BUG IF YOU // DISABLE THE ASSERT STATEMENT ABOVE. DON'T TOUCH IT! // **************************************************************** } if (valuesToBeBound.entries() >= 1) { // Replace this reference to the VEG with a tree of '=' predicates. for (iterExprId = valuesToBeBound.init(); valuesToBeBound.next(iterExprId); valuesToBeBound.advance(iterExprId)) { rootPtr = buildComparisonPred ( rootPtr, iterExprId.getItemExpr(), invariantExprId.getItemExpr(), ITM_EQUAL, getSpecialNulls() //++MV - Irena ); getVEG()->markAsReferenced(iterExprId); } } else { // We have only the invariant. Generate an IS NOT NULL if it // is nullable and has not been compared with someone else. // MVs: // If specialNulls option is set, nulls are values (null=null) // and ITM_IS_NOT_NULL filters out some valid rows also. // For more info on specialNulls -- see <ItemOther.h> if (NOT getVEG()->getReferencedValues().contains(invariantExprId) && invariantExprId.getType().supportsSQLnull() && NOT getVEG()->getVEGPredicate()->getSpecialNulls() // ++MV - Irena ) { rootPtr = new(CmpCommon::statementHeap()) UnLogic(ITM_IS_NOT_NULL, invariantExprId.getItemExpr()); } } // mark as referenced the invariant. Make it the Bridge value getVEG()->markAsReferenced(invariantExprId); getVEG()->removeBridgeValues(valuesToBeBound); getVEG()->setBridgeValue(invariantExprId); // Assign a ValueId to the "=" and synthesize the type for the expression. if (rootPtr != NULL) { rootPtr->synthTypeAndValueId(); // If there is a lookup table, enter the rewritten tree in the table if (lookup) { if (rootPtr) lookup->insert(getValueId(),rootPtr->getValueId()); else lookup->insert(getValueId(),NULL_VALUE_ID); } } // Return the tree of '=' predicates (or NULL) return rootPtr; } // VEGPredicate::replaceVEGPredicate() // ----------------------------------------------------------------------- // VEGReference::replaceVEGReference // This method is used by the code generator. for replacing a // VEGReference with one of its candidate values // thisIsAnMdamKeyPredicate is FALSE by default. However, when // key predicates are being rewritten, it should be set to TRUE // when we need to guarantee that a key column must be generated by // the veg reference. // In this case, // then bridge values MUST NOT be usen because we need to pick either // a constant or a key column (depending on the child we are // working on (see ItemExpr::replaceVEGExpressions(...)) // ----------------------------------------------------------------------- ItemExpr * VEGReference::replaceVEGReference(const ValueIdSet &origAvailableValues, const ValueIdSet &origInputValues, NABoolean thisIsAnMdamKeyPredicate, const IndexDesc *iDesc) { ItemExpr *result = NULL; #ifndef _DEBUG const NABoolean VEG_DEBUG = FALSE; #else NABoolean VEG_DEBUG = getenv("VEG_DEBUG") != NULL; #endif // We assume that inputValues is a (perhaps improper) subset of // available values. Verify it. ValueIdSet scratchPad; scratchPad = origInputValues; scratchPad -= origAvailableValues; GenAssert(scratchPad.isEmpty(),"NOT scratchPad.isEmpty()"); // Copy values in the set and expand wild cards in the copy. ValueIdSet valuesToBeBound; valuesToBeBound.replaceVEGExpressionsAndCopy(getVEG()->getAllValues()); // Constants are not passed as input values but they are available // Have availableValues and availableInputs contain the VEG members // that are constant values ValueIdSet availableValues = origAvailableValues; ValueIdSet inputValues = origInputValues; // -------------------------------------------------------------------- // Don't add constants if the caller don't want them to be generated // from this vegref (i.e. when thisIsAnMdamKeyPredicate is TRUE) // -------------------------------------------------------------------- ValueIdSet vegConstants; valuesToBeBound.getConstants(vegConstants); if (NOT thisIsAnMdamKeyPredicate) { availableValues += vegConstants; inputValues += vegConstants; } if (VEG_DEBUG) { NAString av,iv,vb; availableValues.unparse(av); inputValues.unparse(iv); valuesToBeBound.unparse(vb); cout << endl; cout << "VEGReference " << getValueId() << " :" << endl; cout << "AV: " << av << endl; cout << "IV: " << iv << endl; cout << "VB: " << vb << endl; } // ----------------------------------------------------------------------- // // The commented out code implements a different resolution strategies // for VEGReference. Inputs are no longer favored. This is in order to // handle peculiar scenario where a predicate is not pushed down to the // right hand side of a NJ even if it's covered because of the special // semantics of the NJ itself (left join). The inputs from the operators // in the right leg of the NJ shouldn't be used to resolve the output // values since the VEGPred which relates the two hasn't been evaluated. // // This support is not ready yet for FCS, and therefore the code has been // commented out. // ----------------------------------------------------------------------- #if 0 // non-input available values: ValueIdSet nonInputAvailableValues = availableValues; nonInputAvailableValues -= inputValues; #endif // We can only bind those values that are available here. valuesToBeBound.intersectSet(availableValues); #if 0 // try using nonInputAvailableValues first. ValueIdSet nonInputValuesToBeBound = valuesToBeBound; nonInputValuesToBeBound.intersectSet(nonInputAvailableValues); // try not to use input values since some predicate might not have // be evaluated yet. if ( (NOT thisIsAnMdamKeyPredicate) AND (NOT nonInputValuesToBeBound.isEmpty()) ) { // Try to pick a bridge value. ValueIdSet candidateValues = nonInputValuesToBeBound; candidateValues.intersectSet(getVEG()->getBridgeValues()); // If unsuccessful, try to pick any of the remaining unreferenced. if (candidateValues.isEmpty()) { candidateValues = nonInputValuesToBeBound; } CMPASSERT(NOT candidateValues.isEmpty()); ValueId resultVid; candidateValues.getFirst(resultVid); return resultVid.getItemExpr(); } #endif if (thisIsAnMdamKeyPredicate ) { GenAssert(iDesc != NULL,"VEGReference::replaceVEGReference: Mdam KeyPredicates flag requires an iDesc to go with"); if (iDesc != NULL) { ValueIdSet keyCols = iDesc->getIndexKey(); for (ValueId exprId = keyCols.init(); keyCols.next(exprId); keyCols.advance(exprId)) { // pick the first value - assuming it is the key column.. if (valuesToBeBound.contains(exprId)) { result = exprId.getItemExpr(); break; } } } if (result && NOT (result->getValueId().getType() == getValueId().getType()) ) result->setpreCodeGenNATypeChangeStatus(); return result; // A null is fine here. } // look for an invariant among the input values ValueIdSet vegInputs = valuesToBeBound; vegInputs.intersectSet(inputValues); // If we didn't have any input values that were a member of the // VEG then pick the invariant from the bridge Values // Do not use bridge values for key predicates: if ((NOT thisIsAnMdamKeyPredicate) && vegInputs.isEmpty()) { vegInputs = valuesToBeBound; vegInputs.intersectSet(getVEG()->getBridgeValues()); if (VEG_DEBUG) { NAString vb,br; valuesToBeBound.unparse(vb); // Stupid, ValueIdSet::unparse should be declared const; // for now, just cast away constness... ValueIdSet(getVEG()->getBridgeValues()).unparse(br); cout << "VB: " << vb << endl; cout << "BR: " << br << endl; } } // If no input values are part of the VEG and there are // no available bridge value then just pick any of the // remaining (unreferenced) values if (vegInputs.isEmpty()) { vegInputs = valuesToBeBound; } // look for a constant value ValueId invariantExprId; NABoolean invariantChosen = FALSE; if (NOT vegInputs.isEmpty()) { for (invariantExprId = vegInputs.init(); vegInputs.next(invariantExprId); vegInputs.advance(invariantExprId)) { //check if the item expr is a non-strict constant //a strict constant is somethine like cos(1) //where as cos(?p) can be considered a constant //in the non-strict definition since it remains //constant for a given execution of a query - Solution 10-020912-1647 if (invariantExprId.getItemExpr()->doesExprEvaluateToConstant(FALSE)) { invariantChosen = TRUE; break; } } // endfor // if invariantExprId does not contain the ValueId of a constant value, // then it must be initialized to contain any one value from // the input values. if (NOT invariantChosen) { vegInputs.getFirst(invariantExprId); } // we found the invariant assign it! result = invariantExprId.getItemExpr(); CMPASSERT(result != NULL); } // endif (NOT vegInputs.isEmpty()) else // have no values { // It is ok for an MDAM key pred to not have valuesToBeBound because // this is how ItemExpr::replaceVEGExpressions guarantees the generation of // key predicates. It expects a NULL pointer sometimes if (NOT thisIsAnMdamKeyPredicate) { // If there is a VEGReference to the value then a member of // the VEG should be available. GenAssert(NOT valuesToBeBound.isEmpty(),"valuesToBeBound.isEmpty()"); } } // result can be NULL only if thisIsAnMdamKeyPredicate is TRUE (see note above) if (NOT thisIsAnMdamKeyPredicate) { CMPASSERT(result); } if (VEG_DEBUG) { // coverity cid 10004 thinks result may be null but we know it is not. // coverity[var_deref_model] cout << "Result: " << result->getValueId() << endl; } // see if NAType has changed, if so need to rebind it if (result && NOT (result->getValueId().getType() == getValueId().getType()) ) { result->setpreCodeGenNATypeChangeStatus(); } return result; } // VEGReference::replaceVEGReference() // ----------------------------------------------------------------------- // RelExpr::getOutputValuesOfMyChilren() // Accumulates the characteristic outputs of all my children for // operators that have one or more children. Returns the // potential output values for operators that can have no children. // ----------------------------------------------------------------------- void RelExpr::getOutputValuesOfMyChildren(ValueIdSet & vs) const { ValueIdSet valueMask; Lng32 nc = getArity(); if (nc > 0) { for (Lng32 i = 0; i < nc; i++) { valueMask += child(i)->getGroupAttr()->getCharacteristicOutputs(); } } else // if leaf operators, use all available values { getPotentialOutputValues(valueMask); } // Copy values in the set and expand wild cards in the copy. vs.clear(); vs.replaceVEGExpressionsAndCopy(valueMask); } // RelExpr::getOutputValuesOfMyChildren() // ----------------------------------------------------------------------- // RelExpr::getInputValuesFromParentAndChildren() // Uses getOutputValuesOfMyChildren() to collect the output values // and adds the characteristic input values of this operator to them. // ----------------------------------------------------------------------- void RelExpr::getInputValuesFromParentAndChildren(ValueIdSet & vs) const { getOutputValuesOfMyChildren(vs); vs += getGroupAttr()->getCharacteristicInputs(); } // RelExpr::getInputValuesFromParentAndChildren() // ----------------------------------------------------------------------- // RelExpr::getInputAndPotentialOutputValues() // Uses getPotentialOutputs() to collect the output values // and adds the characteristic input values of this operator to them. // ----------------------------------------------------------------------- void RelExpr::getInputAndPotentialOutputValues(ValueIdSet & vs) const { ValueIdSet potentialOutputValues; getPotentialOutputValues(potentialOutputValues); potentialOutputValues += getGroupAttr()->getCharacteristicInputs(); vs.clear(); vs.replaceVEGExpressionsAndCopy(potentialOutputValues); } // RelExpr::getInputAndPotentialOutputValues() // ----------------------------------------------------------------------- // GenericUpdate::replaceVEGExpressionsAndGet... // ----------------------------------------------------------------------- void GenericUpdate::getInputValuesFromParentAndChildren(ValueIdSet & vs) const { ValueIdSet updTableCols; ValueIdSet vs2; updTableCols.insertList (getIndexDesc()->getIndexColumns()); // updTableCols.insertList(getTableDesc()->getColumnVEGList()); vs2.replaceVEGExpressionsAndCopy(updTableCols); getOutputValuesOfMyChildren(vs); vs += getGroupAttr()->getCharacteristicInputs(); vs += vs2; } // GenericUpdate::getInputValuesFromParentAndChildren() // ----------------------------------------------------------------------- // HbaseDelete::replaceVEGExpressionsAndGet... // ----------------------------------------------------------------------- void HbaseDelete::getInputValuesFromParentAndChildren(ValueIdSet & vs) const { // Do not include IndexColumn as the input values. Otherwise, we will // have duplicated predicates in Executor predicate in HbaseDelete. getOutputValuesOfMyChildren(vs); vs += getGroupAttr()->getCharacteristicInputs(); } // HbaseDelete::getInputValuesFromParentAndChildren() // ----------------------------------------------------------------------- // RelExpr::preCodeGen() // // RelExpr * result // OUT: a node that calls preCodeGen for its child should replace // that child with the result value. This allows preCodeGen // to transform the RelExpr tree. Examples for such trans- // formations are additional exchange nodes for repartitioning. // Generator * generator // INOUT: a global work area with useful helper methods // const ValueIdSet & externalInputs // IN: a value id set with values that already have been // replaced such that they don't contain VEGies any more. // Use this set to replace VEGies for expressions that depend // on the characteristic inputs of the node. // ValueIdSet & pulledNewInputs // OUT: a set of value ids that the node wants to add to its // characteristic inputs ("pull" from its parent). There are // several cases in which we need to add value ids to // characteristic inputs during preCodeGen: // a) partition input variables for parallel execution, // b) the COMMON datetime function which needs to be generated // by the root node, // c) an "open cursor timestamp" that helps a materialize node // to decide whether it can reuse its materialized table. // ----------------------------------------------------------------------- RelExpr * RelExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. Int32 nc = getArity(); for (Int32 index = 0; index < nc; index++) { ValueIdSet childPulledInputs; child(index) = child(index)->preCodeGen(generator, externalInputs, childPulledInputs); if (! child(index).getPtr()) return NULL; // process additional input value ids the child wants getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. ValueIdSet availableValues; getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need to generate key predicates here 0 /* no need for idempotence here */, replicatePredicates ); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); return this; } // RelExpr::preCodeGen // // Recuvsively call the method on each RelExpr node, accumulating // # of rows from each node. // void RelExpr::prepareDopReduction(Generator* generator) { pcgEspFragment* currentEspFragmentPCG = generator->getCurrentEspFragmentPCG(); if ( currentEspFragmentPCG ) currentEspFragmentPCG->accumulateRows(getEstRowsUsed()); Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { child(i)->prepareDopReduction(generator); } } void Exchange::prepareDopReduction(Generator* generator) { pcgEspFragment* parentEspFragPCG = generator->getCurrentEspFragmentPCG(); // // Save the current pcg fragment and add myself as the child to it. // if ( parentEspFragPCG ) { parentEspFragPCG->accumulateRows(getEstRowsUsed()); parentEspFragPCG->addChild(this); } // // Let the global pointer point at my pcg esp fragment (for the // fragment rooted at me). Do this only for above-DP2 Exchanges. // Note a PA is represented by an Exchange with "execute in Master or ESP" // as location. So a PA exchange with a SCAN as a child will have an empty // childPcgEsp array. // generator->setCurrentEspFragmentPCG ( (getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2) ? getEspFragPCG() : NULL ); child(0)->prepareDopReduction(generator); // // Restore the pcg esp fragment // generator->setCurrentEspFragmentPCG(parentEspFragPCG); // Try to reduce the dop and if it fails, invalidate any proposed // dop reductions for this. // if ( parentEspFragPCG && parentEspFragPCG ->tryToReduceDoP() == FALSE ) parentEspFragPCG->invalidate(); } RelExpr * RelRoot::preCodeGen(Generator * generator, const ValueIdSet & /* externalInputs */, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // For all the inputVars, if it is with UNKNOWN data type, make it a // varchar type. This is from SQL/MP extension. Example query // select ?p1 from any-table; if (isTrueRoot()) { CollIndex i; ValueId vid; ValueIdList vidList = inputVars(); for ( i=0; i < vidList.entries(); i++ ) if ((vid=vidList[i]).getType().getTypeQualifier() == NA_UNKNOWN_TYPE) { vid.coerceType(NA_CHARACTER_TYPE); } } // if root has GET_N indication set, insert a FirstN node. // Usually this transformation is done in the binder, but in // some special cases it is not. // For example, if there is an 'order by' in the query, then // the Sort node is added by the optimizer. In this case, we // want to add the FirstN node on top of the Sort node and not // below it. If we add the FirstN node in the binder, the optimizer // will add the Sort node on top of the FirstN node. Maybe we // can teach optimizer to do this. if ((getFirstNRows() != -1) || (getFirstNRowsParam())) { RelExpr * firstn = new(generator->wHeap()) FirstN(child(0), getFirstNRows(), getFirstNRowsParam()); // move my child's attributes to the firstN node. // Estimated rows will be mine. firstn->setEstRowsUsed(getEstRowsUsed()); firstn->setMaxCardEst(getMaxCardEst()); firstn->setInputCardinality(child(0)->getInputCardinality()); firstn->setPhysicalProperty(child(0)->getPhysicalProperty()); firstn->setGroupAttr(child(0)->getGroupAttr()); //10-060516-6532 -Begin //When FIRSTN node is created after optimization phase, the cost //of that node does not matter.But, display_explain and explain //show zero operator costs and rollup cost which confuses the user. //Also, the VQP crashes when cost tab for FIRSTN node is selected. //So, creating a cost object will fix this. //The operator cost is zero and rollup cost is same as it childs. Cost* firstnNodecost = new HEAP Cost(); firstn->setOperatorCost(firstnNodecost); Cost* rollupcost = (Cost *)(child(0)->getRollUpCost()); *rollupcost += *firstnNodecost; firstn->setRollUpCost(rollupcost); //10-060516-6532 -End setChild(0, firstn); // reset firstN indication in the root node. setFirstNRows(-1); setFirstNRowsParam(NULL); } if (isTrueRoot()) { // Set the internal format to use for the plan being generated ... // Checks the CQD COMPRESSED_INTERNAL_FORMAT to decide whether to use // SQLARK_EXPLODED_FORMAT or SQLMX_ALIGNED_FORMAT as the internal // data format // When the CIF CQD is set to SYSTEM we decide whether to use aligned or exploded format // as the tuple format for the whole query. In precodeGEn we visit all the copy // operators (Hash join, hash group by, exchange and sort) in a query // tree and keep a count of the nodes that are in favor of aligned format and those // that are in favor of exploded format. // The final decision about the tuple format for the whole query will depend on those two // numbers. if the number of nodes in favor of aligned format is greater than those // in favor of exploded than aligned format is select otherwise exploded is selected // The function that determine the format for each of the copy operators + relroot // is determineInternalFormat(..) is is called in the precodeGen of the copy operators generator->initNNodes(); isCIFOn_ = FALSE; if ((CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_ON )|| generator->isFastExtract()) { isCIFOn_ = TRUE; generator->setCompressedInternalFormat(); } else if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_OFF ) { generator->setExplodedInternalFormat(); } else { NABoolean resize = FALSE; NABoolean considerBufferDefrag = FALSE; ValueIdSet vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = generator->determineInternalFormat( vidSet, this, resize, RelExpr::CIF_SYSTEM, FALSE, considerBufferDefrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } //generator->setInternalFormat(); // Some operators will revert the internal format back to exploded format // when they are directly under the root node - such as the top level ESPs, // Sort, and HJ operators. // This is so there is no bottleneck in the master flipping the data back // to exploded format (required for bulk move out). child(0)->setParentIsRoot( TRUE ); // create a list of NATypes corresponding to each entry in the // userColumnList_ in RETDesc. Used by generator to convert to // this type during output expr code gen. // The value ids in userColumnList_ cannot be used as the type // corresponding to that value id may change due to VEG transformation // in the preCodeGen phase. if (getRETDesc()->createNATypeForUserColumnList(CmpCommon::statementHeap())) { // error case. GenAssert(FALSE, "error from createNATypeForUserColumnList."); } if ( (child(0)->getOperatorType() == REL_EXCHANGE) && (child(0)->child(0)->getOperatorType() == REL_COMPOUND_STMT) ) { ((Exchange *)((RelExpr *)child(0)))->setDP2TransactionIndicator( TRUE ); } } unsigned short prevNumBMOs = 0; CostScalar prevBMOsMemoryUsage; if (isTrueRoot()) { if (oltOptInfo().oltAnyOpt()) { if (treeContainsEspExchange()) { // turn off oltQueryOptimization if the query plan contains an // esp_exchange. // 10-070316-3325: childOperType_ = REL_UNARY_DELETE // 10-080118-9942: select query contains esp_exchange that is // not directly under root. oltOptInfo().setOltOpt(FALSE); } else if (childOperType() == REL_SCAN) { // if this was a scan query to start with but is no longer // a scan query(which means it got transformed to join, etc), // then turn off oltQueryOptimization. RelExpr *childExpr = child(0)->castToRelExpr(); if (childExpr->getOperatorType() == REL_FIRST_N) childExpr = childExpr->child(0)->castToRelExpr(); if ((childExpr->getOperatorType() != REL_EXCHANGE) && (childExpr->getOperatorType() != REL_HBASE_ACCESS)) oltOptInfo().setOltCliOpt(FALSE); } } // oltAnyOpt *generator->oltOptInfo() = oltOptInfo(); if (generator->oltOptInfo()->oltAnyOpt()) { // Also, PubSub streams' STREAM_TIMEOUT not handled by opt'd root if (getGroupAttr()->isStream()) { generator->oltOptInfo()->setOltCliOpt(FALSE); } if ((CmpCommon::getDefault(EID_SPACE_USAGE_OPT) == DF_ON) && (NOT generator->downrevCompileNeeded())) { generator->setDoEidSpaceUsageOpt(TRUE); } else { generator->setDoEidSpaceUsageOpt(FALSE); } // olt opt not chosen if ALL stats are being collected. // We may support this case later. // In case of operator stats, don't disable OLT optimization // But, when the query is OLT optimized, switch it to pertable stats if ((generator->computeStats()) && ((generator->collectStatsType() == ComTdb::ALL_STATS))) generator->oltOptInfo()->setOltOpt(FALSE); if (CmpCommon::getDefault(OLT_QUERY_OPT) == DF_OFF) generator->oltOptInfo()->setOltOpt(FALSE); // In the case of an embedded insert, // do not execute the query OLT optimized. if (getGroupAttr()->isEmbeddedInsert()) generator->oltOptInfo()->setOltMsgOpt(FALSE); #ifdef _DEBUG if (getenv("NO_OLT_QUERY_OPT")) generator->oltOptInfo()->setOltOpt(FALSE); #endif if (generator->oltOptInfo()->oltEidOpt()) { generator->oltOptInfo()->setOltEidLeanOpt(FALSE); if (generator->doEidSpaceUsageOpt()) { generator->oltOptInfo()->setOltEidLeanOpt(TRUE); } } if (generator->downrevCompileNeeded()) generator->oltOptInfo()->setOltEidLeanOpt(FALSE); if (CmpCommon::getDefault(OLT_QUERY_OPT_LEAN) == DF_OFF) generator->oltOptInfo()->setOltEidLeanOpt(FALSE); } // oltAnyOpt // mark exchange operator for maxOneRow optimization. RelExpr *childExpr = child(0)->castToRelExpr(); NABoolean doMaxOneRowOpt = TRUE; NABoolean doMaxOneInputRowOpt = FALSE; NABoolean firstN = FALSE; RelExpr *exchExpr = NULL; if (NOT generator->doEidSpaceUsageOpt()) { doMaxOneRowOpt = FALSE; doMaxOneInputRowOpt = FALSE; } else { doMaxOneRowOpt = TRUE; doMaxOneInputRowOpt = TRUE; } if (childExpr->getOperatorType() == REL_FIRST_N) { firstN = TRUE; if (((FirstN *)childExpr)->getFirstNRows() != 1) doMaxOneRowOpt = FALSE; childExpr = childExpr->child(0)->castToRelExpr(); } if ((childExpr->getOperatorType() != REL_EXCHANGE) || (childExpr->child(0)->castToRelExpr()-> getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2)) { doMaxOneRowOpt = FALSE; doMaxOneInputRowOpt = FALSE; } else { exchExpr = childExpr; childExpr = childExpr->child(0)->castToRelExpr(); if (NOT childExpr->getOperator().match(REL_FORCE_ANY_SCAN)) { doMaxOneInputRowOpt = FALSE; } else if (childExpr->getOperatorType() == REL_FILE_SCAN) { FileScan * s = (FileScan *)childExpr; if (NOT firstN) doMaxOneRowOpt = FALSE; if ((s->getGroupAttr()->isStream()) || (s->accessOptions().accessType() == SKIP_CONFLICT_)) { //doMaxOneInputRowOpt = FALSE; //doMaxOneRowOpt = FALSE; } } } if (doMaxOneRowOpt) { exchExpr->oltOptInfo().setMaxOneRowReturned(TRUE); } if (doMaxOneInputRowOpt) { exchExpr->oltOptInfo().setMaxOneInputRow(TRUE); } generator->setUpdErrorInternalOnError(FALSE); if (rollbackOnError()) generator->setUpdErrorOnError(FALSE); else generator->setUpdErrorOnError(TRUE); if (CmpCommon::getDefault(UPD_ABORT_ON_ERROR) == DF_ON) generator->setUpdAbortOnError(TRUE); else generator->setUpdAbortOnError(FALSE); if (CmpCommon::getDefault(UPD_PARTIAL_ON_ERROR) == DF_ON) generator->setUpdPartialOnError(TRUE); else generator->setUpdPartialOnError(FALSE); if (CmpCommon::getDefault(UPD_SAVEPOINT_ON_ERROR) == DF_ON) generator->setUpdSavepointOnError(TRUE); else generator->setUpdSavepointOnError(FALSE); generator->setSkipUnavailablePartition(FALSE); if ((childOperType() == REL_SCAN) && (CmpCommon::getDefault(SKIP_UNAVAILABLE_PARTITION) == DF_ON)) generator->setSkipUnavailablePartition(TRUE); if (avoidHalloween_) { // At beginning of preCodeGen, assume DP2Locks will be // used. The NestedJoin::preCodeGen will change this // if its left child is a sort. generator->setHalloweenProtection(Generator::DP2LOCKS); } if (generator->getBindWA()->getUdrStoiList().entries () > 0) generator->setAqrEnabled(FALSE); // Reset the accumulated # of BMOs and memory usages in // the generator prevNumBMOs = generator->replaceNumBMOs(0); prevBMOsMemoryUsage = generator->replaceBMOsMemoryUsage(0); } // true root // propagate the need to return top sorted N rows to all sort // nodes in the query. if (needFirstSortedRows() == TRUE) { needSortedNRows(TRUE); } // Delete any VEGReference that appear in the Characteristic Inputs. // The Characteristic Inputs of the root of the execution plan MUST // only contain external dataflow inputs that are provided by the // user. The VEGReferences may have been introduced as a side-effect // of predicate pushdown. They are redundant in the Characteristic // Inputs of the root. ValueIdSet availableValues; for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init(); getGroupAttr()->getCharacteristicInputs().next(exprId); getGroupAttr()->getCharacteristicInputs().advance(exprId) ) { if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE) availableValues += exprId; } getGroupAttr()->setCharacteristicInputs(availableValues); // If this is the root for a parallel extract producer query then // there should be an Exchange node immediately below and we need to // set a flag in that Exchange. if (numExtractStreams_ > 0) { if (child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->castToRelExpr(); e->setExtractProducerFlag(); } // fix for soln 10-090506-1407: parallel extract for a union distinct // can sometimes have root->mapvalueidsl->exchange. It should be OK. else if (child(0)->getOperatorType() == REL_MAP_VALUEIDS && child(0)->child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->child(0)->castToRelExpr(); e->setExtractProducerFlag(); } } // // If there is no hard requirement on #ESPs, reduce the dop based on // the total # of rows processed per ESP. The reduction can modify // the number of partitions attribute of the partition function stored // in the synthesized physical property of an Exchange operator. // // CQD DOP_REDUCTION_ROWCOUNT_THRESHOLD set to 0.0 will disable the // feature. float threshold; ActiveSchemaDB()-> getDefaults().getFloat(DOP_REDUCTION_ROWCOUNT_THRESHOLD, threshold); if ( threshold > 0.0 && CURRSTMT_OPTDEFAULTS->getRequiredESPs() <= 0 ) { generator->setCurrentEspFragmentPCG(NULL); // reset the 'global' // to the current esp frag. RelExpr::prepareDopReduction(generator); RelExpr::doDopReduction(); } // Now walk through the execution plan and initialize it for code generation. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! child(0).getPtr()) return NULL; if (! RelExpr::preCodeGen( generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs)) return NULL; if ( isTrueRoot() && CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { if (generator->getNCIFNodes()>0) { isCIFOn_ = TRUE; generator->setCompressedInternalFormat(); } else { generator->setExplodedInternalFormat(); isCIFOn_ = FALSE; } } // If the RelRoot is marked as a parallel extract producer then the // root's child must be an Exchange and the child must also be // marked for parallel extract. Even though we checked the type of // the child a few lines above, we do it again here because the call // to RelExpr::preCodeGen can potentially eliminate Exchange nodes. NABoolean extractPlanLooksOK = TRUE; if (numExtractStreams_ > 0) { if (child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->castToRelExpr(); if (!e->getExtractProducerFlag()) extractPlanLooksOK = FALSE; } // fix for soln 10-090506-1407: parallel extract for a union distinct // can sometimes have root->mapvalueidsl->exchange. It should be OK. else if (child(0)->getOperatorType() == REL_MAP_VALUEIDS && child(0)->child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->child(0)->castToRelExpr(); if (!e->getExtractProducerFlag()) extractPlanLooksOK = FALSE; } else { extractPlanLooksOK = FALSE; } if (!extractPlanLooksOK) { *CmpCommon::diags() << DgSqlCode(-7004); GenExit(); return NULL; } } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. getInputValuesFromParentAndChildren(availableValues); // Rebuild the computable expressions using a bridge value, if possible compExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // Rebuild the required order reqdOrder().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // Rebuild the pkey list pkeyList().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // add internally generated inputs to the input vars and make sure that // the root isn't left with "pulled" input values that aren't "internal" // inputs (the assert will most likely fire for leftover partition input // variables) inputVars().insertSet(generator->getInternalInputs()); pulledNewInputs -= (ValueIdSet) inputVars(); GenAssert(pulledNewInputs.isEmpty(),"root can't produce these values"); // propagate the need to return top sorted N rows to all sort // nodes in the query. if (needFirstSortedRows() == TRUE) { needSortedNRows(TRUE); } // Do not rollback on error for INTERNAL REFRESH commands. if (isRootOfInternalRefresh()) { generator->setUpdErrorInternalOnError(TRUE); generator->setUpdAbortOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdSavepointOnError(FALSE); } // do not abort transaction for internal compiles, even if abort // is needed for this statement. // Catman depends on no abort for individual IUD stmts. // It aborts the transaction when it gets an error from cli. if ( ( CmpCommon::context()->internalCompile() == CmpContext::INTERNAL_MODULENAME ) || ( CmpCommon::statement()->isSMDRecompile() ) ) { generator->setUpdErrorInternalOnError(TRUE); generator->setUpdAbortOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdSavepointOnError(FALSE); } oltOptInfo().setOltCliOpt(generator->oltOptInfo()->oltCliOpt()); if ((isTrueRoot()) && (CmpCommon::getDefault(LAST0_MODE) == DF_ON) && (child(0))) { OperatorTypeEnum op = child(0)->getOperatorType(); if (op != REL_DESCRIBE && op != REL_EXPLAIN && op != REL_DDL && op != REL_LOCK && op != REL_UNLOCK && op != REL_SET_TIMEOUT && op != REL_STATISTICS && op != REL_TRANSACTION && op != REL_EXE_UTIL) { // do not return any rows at runtime. // Setting of -2 tells executor to simulate [last 0] // without having to put [last 0] in the query. setFirstNRows(-2); } } if (isTrueRoot()) { // if warnings 6008 or 6011 were raised, set missingStats indication. if (CmpCommon::diags()->containsWarning(SINGLE_COLUMN_STATS_NEEDED) || CmpCommon::diags()->containsWarning(SINGLE_COLUMN_STATS_NEEDED_AUTO)) { generator->compilerStatsInfo().setMissingStats(TRUE); } // change the following number(16) to whatever is considered 'large'. //#define LARGE_NUMBER_OF_JOINS 16 //if (generator->compilerStatsInfo().totalJoins() > LARGE_NUMBER_OF_JOINS) //generator->compilerStatsInfo().setLargeNumOfJoins(TRUE); // set mandatoryXP indication in generator. if (hasMandatoryXP()) generator->compilerStatsInfo().setMandatoryCrossProduct(TRUE); // Remember # of BMOs that children's preCodeGen found for my fragment. setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) ); setBMOsMemoryUsage( generator->replaceBMOsMemoryUsage(prevBMOsMemoryUsage) ); // Compute the total available memory quota for BMOs NADefaults &defs = ActiveSchemaDB()->getDefaults(); // total per CPU double m = defs.getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) * (1024*1024); // total memory usage for all nBMOs double m1 = (generator->getTotalNBMOsMemoryPerCPU()).value(); // total memory limit for all BMOs double m2 = m-m1; double ratio = defs.getAsDouble(EXE_MEMORY_LIMIT_NONBMOS_PERCENT) / 100; if ( m2 < 0 ) { // EXE_MEMORY_LIMIT_PER_CPU is set too small, set the total // memory limit for BMOs to zero. When the memory quota for // each BMO is computed (via method RelExpr::computeMemoryQuota()), // the lower-bound for each BMO will kick in and each will receive // a quota equal to the lower-bound value. m2 = 0; } else { // nBMOs use more memory than the portion, adjust m2 to // that of (1-ratio)*m if (m1 > m*ratio ) m2 = m*(1-ratio); } generator->setBMOsMemoryLimitPerCPU(m2); } if (isTrueRoot()) { if (generator->isAqrWnrInsert()) { ExeUtilWnrInsert * wi = new(generator->getBindWA()->wHeap()) ExeUtilWnrInsert(generator->utilInsertTable(), child(0)->castToRelExpr()); child(0)->markAsBound(); wi->bindNode(generator->getBindWA()); if (generator->getBindWA()->errStatus()) return NULL; // Use the same characteristic inputs and outputs as my child wi->setGroupAttr(new(generator->wHeap()) GroupAttributes(*(child(0)->getGroupAttr()))); //pass along some of the estimates wi->setEstRowsUsed(child(0)->getEstRowsUsed()); wi->setMaxCardEst(child(0)->getMaxCardEst()); wi->setInputCardinality(child(0)->getInputCardinality()); wi->setPhysicalProperty(child(0)->getPhysicalProperty()); wi->setOperatorCost(0); wi->setRollUpCost(child(0)->getRollUpCost()); if (! wi->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs)) return NULL; child(0) = wi; } } // if blob values are being selected out, retrieve them and return them either in file // or as a stream if (isTrueRoot()) { RETDesc * rd = getRETDesc(); const ColumnDescList * cdl = rd->getColumnList(); for (CollIndex i = 0; i < compExpr().entries(); i++) { ValueId val_id = compExpr()[i]; ItemExpr * expr = val_id.getItemExpr(); if ((val_id.getType().isLob()) && ((expr->getOperatorType() == ITM_BASECOLUMN) || (expr->getOperatorType() == ITM_INDEXCOLUMN))) { LOBconvertHandle * lc = new(generator->wHeap()) LOBconvertHandle(val_id.getItemExpr(), LOBoper::STRING_); lc->bindNode(generator->getBindWA()); lc->preCodeGen(generator); compExpr().removeAt(i); compExpr().insertAt(i, lc->getValueId()); ColumnDesc *cd = (*cdl)[i]; NAColumn * col = cd->getValueId().getNAColumn(TRUE); lc->lobNum() = col->lobNum(); lc->lobStorageType() = col->lobStorageType(); lc->lobStorageLocation() = col->lobStorageLocation(); cd->setValueId(lc->getValueId()); rd->changeNATypeForUserColumnList(i, &lc->getValueId().getType()); } } // for if (getPredExprTree()) { getPredExprTree()->preCodeGen(generator); } } // isTrueRoot setHdfsAccess(generator->hdfsAccess()); markAsPreCodeGenned(); #ifdef _DEBUG if(getenv("SHOW_PLAN")) { NAString plan; unparse(plan); printf("PLAN: %s\n",convertNAString(plan,generator->wHeap())); } #endif return this; } // RelRoot::preCodeGen RelExpr * Join::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // In the case of an embedded insert, // and there is a selection predicate, // we need to retrieve the stored available outputs // from the GenericUpdate group attr. ValueIdSet availableGUOutputs; // clear any prefix sort key generator->clearPrefixSortKey(); if (getGroupAttr()->isEmbeddedInsert() && !selectionPred().isEmpty() && getArity() > 1) { if (child(1)->getArity() > 0) child(1)->child(0)->getInputAndPotentialOutputValues(availableGUOutputs); } NABoolean isALeftJoin = (getOperator().match(REL_ANY_LEFT_JOIN)); NABoolean isARightJoin = (getOperator().match(REL_ANY_RIGHT_JOIN)); ValueIdSet availableValues; ValueIdSet childPulledInputs; if (isALeftJoin) { ValueId instNullId, exprId, vid; // Prune the nullInstatiatedOutputs list.Retain only those values // that are either: // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the Join. // 3) The Characteristic Outputs of the first child of the Join. // 4) Values required for evaluating the selection expression // on the Join. // Discard all other values. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); ValueIdSet discardSet; CollIndex ne = nullInstantiatedOutput().entries(); for (CollIndex j = 0; j < ne; j++) // NT_PORT FIX SK 07/16/96 { instNullId = nullInstantiatedOutput_[j]; GenAssert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL,"NOT instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL"); // Access the operand of the InstantiateNull exprId = (((InstantiateNull *)(instNullId.getItemExpr())) ->getExpr()->getValueId()); if ( (NOT availableValues.contains(exprId)) AND (NOT getGroupAttr()->getCharacteristicOutputs() .referencesTheGivenValue(instNullId, vid)) AND (NOT selectionPred().referencesTheGivenValue(instNullId, vid)) ) { discardSet += nullInstantiatedOutput_[j]; } } // Delete all those elements that do not require null instantiation. for (exprId = discardSet.init(); discardSet.next(exprId); discardSet.advance(exprId)) { nullInstantiatedOutput_.remove(exprId); } } // endif (getOperator().match(REL_ANY_LEFT_JOIN)) else // Null Instantiation will not be necessary. nullInstantiatedOutput().clear(); // clear in case a LJ was transformed to an IJ if (isARightJoin) { ValueId instNullIdForRightJoin, exprIdForRightJoin, vidForRightJoin; ValueIdSet discardSetForRightJoin; // Prune the nullInstatiatedOutputs list.Retain only those values // that are either: // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the Join. // 3) The Characteristic Outputs of the second child of the Join. // 4) Values required for evaluating the selection expression // on the Join. // Discard all other values. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(1)->getGroupAttr()->getCharacteristicOutputs(); CollIndex neR = nullInstantiatedForRightJoinOutput().entries(); for (CollIndex j = 0; j < neR; j++) // NT_PORT FIX SK 07/16/96 { instNullIdForRightJoin = nullInstantiatedForRightJoinOutput_[j]; GenAssert(instNullIdForRightJoin.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL,"NOT instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL"); // Access the operand of the InstantiateNull exprIdForRightJoin = (((InstantiateNull *)(instNullIdForRightJoin.getItemExpr())) ->getExpr()->getValueId()); if ( (NOT availableValues.contains(exprIdForRightJoin)) AND (NOT getGroupAttr()->getCharacteristicOutputs() .referencesTheGivenValue(instNullIdForRightJoin, vidForRightJoin)) AND (NOT selectionPred().referencesTheGivenValue(instNullIdForRightJoin, vidForRightJoin)) ) { discardSetForRightJoin += nullInstantiatedForRightJoinOutput_[j]; } } // Delete all those elements that do not require null instantiation. for (exprIdForRightJoin = discardSetForRightJoin.init(); discardSetForRightJoin.next(exprIdForRightJoin); discardSetForRightJoin.advance(exprIdForRightJoin)) { nullInstantiatedForRightJoinOutput_.remove(exprIdForRightJoin); } } // endif (getOperator().match(REL_ANY_RIGHT_JOIN)) else // Null Instantiation will not be necessary. nullInstantiatedForRightJoinOutput().clear(); // clear in case a LJ was transformed to an IJ // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); availableValues = getGroupAttr()->getCharacteristicInputs(); bool precodeHalloweenLHSofTSJ = false; bool savePrecodeHalloweenLHSofTSJ = false; if ((getHalloweenForceSort() != NO_SELF_REFERENCE) && (generator->getR251HalloweenPrecode())) { savePrecodeHalloweenLHSofTSJ = generator->setPrecodeHalloweenLHSofTSJ(true); precodeHalloweenLHSofTSJ = true; if (getHalloweenForceSort() == FORCED) generator->setHalloweenSortForced(); } NABoolean savedOltMsgOpt = generator->oltOptInfo()->oltMsgOpt(); // My Characteristic Inputs become the external inputs for my left child. child(0) = child(0)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(0).getPtr()) return NULL; // For HashJoin Min/Max optimization if (isHashJoin()) { HashJoin *hj = (HashJoin *)this; for(CollIndex i = hj->getStartMinMaxIndex(); i < hj->getEndMinMaxIndex(); i++) { // A scan may have decided to use the min/max values that // belongs to this join, remove them from the // childPulledInputs. We do not need to pull them from the // parent as this Hash Join will generate them. if(generator->getWillUseMinMaxKeys()[i] != NULL_VALUE_ID) { childPulledInputs -= generator->getMinVals()[i]; childPulledInputs -= generator->getMaxVals()[i]; } // Clear the candidate values generated by this HashJoin, We // are done with the left child, so no one else can use // these values. generator->getMinMaxKeys()[i] = NULL_VALUE_ID; generator->getMinVals()[i] = NULL_VALUE_ID; generator->getMaxVals()[i] = NULL_VALUE_ID; } } if (precodeHalloweenLHSofTSJ) { generator->setPrecodeHalloweenLHSofTSJ(savePrecodeHalloweenLHSofTSJ); if (generator->getUnblockedHalloweenScans() == 0) { // Turn off DP2_LOCKS for codeGen, using either the FORCED_SORT // or PASSIVE values. if (getHalloweenForceSort() == FORCED) { generator->setHalloweenProtection(Generator::FORCED_SORT); } else generator->setHalloweenProtection(Generator::PASSIVE); } else if (updateSelectValueIdMap() && updateTableDesc() && (NOT updateTableDesc()->getNATable()->getClusteringIndex()->hasSyskey())) { // if the key columns of the table being inserted into are // equal to constants or inputs then no sort is required // to enforce Halloween blocking. Example statements are // update tt set a = 1 ;(a is the primary key for table tt) // insert into tt select * from tt where a = 1 ; ValueIdList reqdOrder ; updateSelectValueIdMap()->rewriteValueIdListDown( updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(), reqdOrder); reqdOrder.removeCoveredExprs( getGroupAttr()->getCharacteristicInputs()); if (reqdOrder.isEmpty()) { generator->setHalloweenProtection(Generator::PASSIVE); } } } NABoolean leftMultipleRowsReturned = generator->oltOptInfo()->multipleRowsReturned(); // if nested join and left child could return multiple rows, then // disable olt msg opt for the right child. This is done since // olt msg opt can only handle input and output of max 1 row. if ((getOperatorType() == REL_NESTED_JOIN) || (getOperatorType() == REL_LEFT_NESTED_JOIN) || (getOperatorType() == REL_NESTED_SEMIJOIN) || (getOperatorType() == REL_NESTED_ANTI_SEMIJOIN) || (getOperatorType() == REL_NESTED_JOIN_FLOW)) { if (generator->oltOptInfo()->multipleRowsReturned()) { generator->oltOptInfo()->setOltMsgOpt(FALSE); } } // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; availableValues += childPulledInputs; childPulledInputs.clear(); // If this is a tuple substitution join that is implemented by the nested join // method, then the values produced as output by my left child can be used as // "external" inputs by my right child. NABoolean replicatePredicates = TRUE; ValueIdSet joinInputAndPotentialOutput; getInputAndPotentialOutputValues(joinInputAndPotentialOutput); if (isTSJ()) { availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); // For a TSJ the joinPred() is a predicate between the inputs // and the first child that could not be pushed down to the first // child because it is either a left join or an anti-semi-join // Rebuild the join predicate tree now joinPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndPotentialOutput ); } bool didSetRHS = false; bool saveSetRHS = false; if (generator->getPrecodeHalloweenLHSofTSJ() && isNestedJoin()) { saveSetRHS = generator->setPrecodeRHSofNJ(true); didSetRHS = true; } // Process the right child child(1) = child(1)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(1).getPtr()) return NULL; if (didSetRHS) generator->setPrecodeRHSofNJ(saveSetRHS); NABoolean rightMultipleRowsReturned = generator->oltOptInfo()->multipleRowsReturned(); if (leftMultipleRowsReturned || rightMultipleRowsReturned) generator->oltOptInfo()->setMultipleRowsReturned(TRUE); // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. getInputValuesFromParentAndChildren(availableValues); // Rebuild the join predicate tree if (! isTSJ()) joinPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndPotentialOutput ); if (isALeftJoin) { // Replace the operands of the ITM_INSTANTIATE_NULL with values from // the Characteristic Outputs of the right child. // The following values are available for resolving the nullInstantiatedOuptut // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the second (right) child of the Join. // 3) The Characteristic Outputs of the first(left)child of the Join. // Needed when nested_join plan is chosen. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(1)->getGroupAttr()->getCharacteristicOutputs(); availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); nullInstantiatedOutput_.replaceOperandsOfInstantiateNull (availableValues, getGroupAttr()->getCharacteristicInputs()); } if (isARightJoin) { // Replace the operands of the ITM_INSTANTIATE_NULL with values from // the Characteristic Outputs of the left child. // The following values are available for resolving the nullInstantiatedForRightJoinOutput // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the first (left) child of the Join. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); nullInstantiatedForRightJoinOutput_.replaceOperandsOfInstantiateNull (availableValues, getGroupAttr()->getCharacteristicInputs()); } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); // If this is an embedded insert, with a selection predicate, // add in the characteristic outputs from the generic update RelExpr if (getGroupAttr()->isEmbeddedInsert() && !selectionPred().isEmpty()) { availableValues += availableGUOutputs; } // Rebuild the selection predicate tree. selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicates here 0 /* no need for idempotence here */, replicatePredicates ); //New code was added to avoid the following situation: // // Query: select max(t1.a) from t1,t2 where t1.a = t2.b; // Plan: shortcut_groupby // | // esp_exchange // | // merge_join in parallel 4 ways on // | // | | // scan t2 scan T1 // // By the time we get to precodegen merge_join has orderby // on VEG(a,b) and characteristic output VEG(a,b) // because scan T2 get precode gen'd first it resolves its // orderby VEG(a,b) to t2.b this also changes orderby VEG // in merge_join and thereafter to T2.b. Now when merge join // resolves it characteristic output it resolves it to T1.a because // T1 is first in the from clause and T1.a has a smaller value id and // so the combined set of T1. and T2's characteristic output has T1.a // in front of T2.b. Now esp_exchange during code gen time expects // T2.b to be characteristic output of the child because it needs to // do merge of sorted streams of its orderby value which is T2.b. // this causes an assertion failure because merge_join produces T1.a. // Following code counters that by making sure that if the sort key is // part of the available values then characteristic output first gets // resolved by sortkey then by rest of the available values. // ValueIdSet sortKey = getPhysicalProperty()->getSortKey(); sortKey = sortKey.simplifyOrderExpr(); sortKey.intersectSet(availableValues); if(sortKey.entries()) { ValueIdSet reqOutput = getGroupAttr()->getCharacteristicOutputs(); ValueIdSet copyOfSet(reqOutput); ValueIdSet inputValues; ValueIdSet newExpr; ItemExpr * iePtr; // --------------------------------------------------------------------- // Iterate over the predicate factors in the given predicate tree. // --------------------------------------------------------------------- for (ValueId exprId = copyOfSet.init(); copyOfSet.next(exprId); copyOfSet.advance(exprId)) { // ----------------------------------------------------------------- // Walk through the item expression tree and replace any // VEGPredicates or VEGReferences that are found. // ----------------------------------------------------------------- iePtr = exprId.getItemExpr()->replaceVEGExpressions(availableValues, inputValues, FALSE, NULL, FALSE); if (iePtr) // expression was not discarded { iePtr->synthTypeAndValueId(TRUE); // redrive type synthesis if (iePtr != exprId.getItemExpr()) // a replacement was done { reqOutput.subtractElement(exprId); // remove existing ValueId reqOutput += iePtr->getValueId(); // replace with a new one } } } // loop over predTree getGroupAttr()->setCharacteristicOutputs(reqOutput); } // Rewrite the Characteristic Outputs. getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); // propagate the children olt settings in case of a pushed down to dp2 NLJ if ( !getPhysicalProperty()->executeInDP2() OR !(generator->getBindWA()->getTopRoot()->getInliningInfo()).isUsedForMvLogging() ) { generator->oltOptInfo()->setOltMsgOpt(savedOltMsgOpt); } // In the case of an embedded insert, // set the generator is embedded insert flag to TRUE. if (getGroupAttr()->isEmbeddedInsert()) generator->setEmbeddedInsert(TRUE) ; markAsPreCodeGenned(); // Done. return this; } // Join::preCodeGen() RelExpr * GenericUtilExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; if (xnNeeded()) { generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); } markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; if (NOT aqrSupported()) generator->setAqrEnabled(FALSE); markAsPreCodeGenned(); // Done. return this; } RelExpr * DDLExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; if (specialDDL()) { generator->setAqrEnabled(FALSE); } markAsPreCodeGenned(); // Done. return this; } RelExpr * NestedJoinFlow::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; /* child(0) = child(0)->preCodeGen( generator, externalInputs, pulledNewInputs); if (! child(0).getPtr()) return NULL; */ RelExpr * nj = NestedJoin::preCodeGen(generator, externalInputs, pulledNewInputs); if (nj == NULL) return NULL; return nj; } RelExpr * NestedJoin::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; NABoolean espExchangeWithMerge = FALSE; NABoolean childIsBlocking = FALSE; if ((getHalloweenForceSort() != NO_SELF_REFERENCE) && (!generator->getR251HalloweenPrecode())) { GenAssert(Generator::NOT_SELF_REF != generator->getHalloweenProtection(), "Inconsistency in Generator and NestedJoin."); // Look for either of two patterns on the left hand side: // sort or exchange+sort. if (child(0)->getOperatorType() == REL_SORT) childIsBlocking = TRUE; else if ((child(0)->getOperatorType() == REL_EXCHANGE) && (child(0)->child(0)->getOperatorType() == REL_SORT)) { childIsBlocking = TRUE; // The espExchangeWithMerge flag is used to conditionally // assert that the exchange will merge. The assertion // is deferred until after preCodeGen on the left subtree, // because the Exchange::doesMerge() method should not be // called until Exchange::preCodeGen is finished. espExchangeWithMerge = TRUE; } if (childIsBlocking) { if (getHalloweenForceSort() == FORCED) { if (espExchangeWithMerge) ((Sort *)(child(0)->child(0).getPtr()))-> markAsHalloweenProtection(); else ((Sort *)(child(0).getPtr()))->markAsHalloweenProtection(); generator->setHalloweenProtection(Generator::FORCED_SORT); } else generator->setHalloweenProtection(Generator::PASSIVE); } else if (updateSelectValueIdMap() && updateTableDesc() && (NOT updateTableDesc()->getNATable()->getClusteringIndex()->hasSyskey())) { // if the key columns of the table being inserted into are // equal to constants or inputs then no sort is required // to enforce Halloween blocking. Example statements are // update tt set a = 1 ;(a is the primary key for table tt) // insert into tt select * from tt where a = 1 ; ValueIdList reqdOrder ; updateSelectValueIdMap()->rewriteValueIdListDown( updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(), reqdOrder); reqdOrder.removeCoveredExprs( getGroupAttr()->getCharacteristicInputs()); if (reqdOrder.isEmpty()) { generator->setHalloweenProtection(Generator::PASSIVE); } } } // Insert a probe cache above the inner table if applicable if ( isProbeCacheApplicable( castToRelExpr()->getPhysicalProperty()->getPlanExecutionLocation() ) ) { ProbeCache *probeCache = new (generator->wHeap()) ProbeCache( child(1), getDefault(GEN_PROBE_CACHE_NUM_ENTRIES), generator->wHeap()); // look for an aggregate right child node RelExpr *rightChildExpr = child(1).getPtr(); GroupByAgg *rightChildGrby = NULL; RelExpr *rightChildExch = NULL; MapValueIds *rightChildMvi = NULL; ValueIdMap *optionalMap = NULL; NABoolean done = FALSE; while (!done) { if (rightChildExpr->getOperator().match(REL_ANY_GROUP)) { rightChildGrby = (GroupByAgg *) rightChildExpr; done = TRUE; } else if (rightChildExpr->getOperator() == REL_EXCHANGE) { if (rightChildExch == NULL) rightChildExch = rightChildExpr; else done = TRUE; // can't handle more than one exchange } else if (rightChildExpr->getOperator() == REL_MAP_VALUEIDS) { if (rightChildMvi == NULL) { rightChildMvi = (MapValueIds *) rightChildExpr; optionalMap = &rightChildMvi->getMap(); } else done = TRUE; // can't handle more than one MVI } else done = TRUE; if (!done) rightChildExpr = rightChildExpr->child(0); } // Among other things, this will give the probeCache // the characteristic inputs and outputs of the // inner table. probeCache->setGroupAttr(new(generator->wHeap()) GroupAttributes(*(child(1)->getGroupAttr()))); // Try to pull up predicates from the child, if that reduces // the char. inputs sent to the child. We only try this right // now if the child is an aggregate or groupby. if (rightChildGrby && CmpCommon::getDefault(NESTED_JOIN_CACHE_PREDS) != DF_OFF && (// if right child exchange exists, it must have same char inputs rightChildExch == NULL || rightChildExch->getGroupAttr()->getCharacteristicInputs() == rightChildGrby->getGroupAttr()->getCharacteristicInputs()) && (rightChildMvi == NULL || rightChildMvi->getGroupAttr()->getCharacteristicInputs() == rightChildGrby->getGroupAttr()->getCharacteristicInputs())) { ValueIdSet pcAvailableInputs( probeCache->getGroupAttr()->getCharacteristicInputs()); // predicates can refer to both char. inputs and outputs pcAvailableInputs += probeCache->getGroupAttr()->getCharacteristicOutputs(); // note that this will overwrite the ProbeCache's selection preds rightChildGrby->tryToPullUpPredicatesInPreCodeGen( pcAvailableInputs, probeCache->selectionPred(), optionalMap); // adjust char. inputs of intervening nodes - this is not // exactly good style, just overwriting the char. inputs, but // hopefully we'll get away with it at this stage in the // processing if (rightChildExch) rightChildExch->getGroupAttr()->setCharacteristicInputs( rightChildGrby->getGroupAttr()->getCharacteristicInputs()); if (rightChildMvi) rightChildMvi->getGroupAttr()->setCharacteristicInputs( rightChildGrby->getGroupAttr()->getCharacteristicInputs()); } // propagate estimates, physical properties, and costings // from the child to the ProbeCache: probeCache->setEstRowsUsed(child(1)->getEstRowsUsed()); probeCache->setMaxCardEst(child(1)->getMaxCardEst()); probeCache->setInputCardinality(child(1)->getInputCardinality()); probeCache->setPhysicalProperty(child(1)->getPhysicalProperty()); probeCache->setOperatorCost(0); probeCache->setRollUpCost(child(1)->getRollUpCost()); // Glue the ProbeCache to the NestedJoin's right leg. child(1) = probeCache; } if (isTSJForUndo()) { Sort *sortNode = new(generator->wHeap()) Sort(child(0)); ItemExpr *sk = new (generator->wHeap()) SystemLiteral(1); sk->synthTypeAndValueId(TRUE); ValueIdList skey; skey.insert(sk->getValueId()); sortNode->getSortKey() = skey; // Use the same characteristic inputs and outputs as the left child sortNode->setGroupAttr(new(generator->wHeap()) GroupAttributes(*(child(0)->getGroupAttr()))); //pass along some of the estimates sortNode->setEstRowsUsed(child(0)->getEstRowsUsed()); sortNode->setMaxCardEst(child(0)->getMaxCardEst()); sortNode->setInputCardinality(child(0)->getInputCardinality()); sortNode->setPhysicalProperty(child(0)->getPhysicalProperty()); sortNode->setCollectNFErrors(); sortNode->setOperatorCost(0); sortNode->setRollUpCost(child(0)->getRollUpCost()); child(0) = sortNode; } if ( childIsBlocking && generator->preCodeGenParallelOperator() ) { if (espExchangeWithMerge == FALSE) { // A "halloween sort" needs to ensure that if it is parallel, but executes // in the same ESP as the generic update's TSJ flow node, then the Sort // will block until all scans are finished. ((Sort *)(child(0).getPtr()))->doCheckAccessToSelfRefTable(); } else { // An ESP Exchange can be eliminated in its preCodeGen method if it is // redundant. If this happens, then the Sort will be executing in the // same ESP as the TSJ after all. So we set this flag now, so that the // Exchange preCodeGen will call doCheckAccessToSelfRefTable() for the // Sort before eliminating itself. This is part of the fix for Sol // 10-090310-9876. ((Exchange *)(child(0).getPtr()))->markHalloweenSortIsMyChild(); } } RelExpr * re = Join::preCodeGen(generator, externalInputs, pulledNewInputs); if ( espExchangeWithMerge && (child(0)->getOperatorType() == REL_EXCHANGE)) GenAssert(((Exchange *)((RelExpr *)child(0)))->doesMerge(), "Exchange operator does not block for Halloween problem."); generator->compilerStatsInfo().nj()++; return re; } RelExpr * MergeJoin::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Join::preCodeGen(generator, externalInputs, pulledNewInputs)) return 0; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // find if the left child and/or the right child will have atmost // one matching row. If so, an faster merge join implementation // will be used at runtime. ValueIdSet vidSet = getOrderedMJPreds(); ValueIdSet valuesUsedForPredicates; computeValuesReqdForPredicates(vidSet, valuesUsedForPredicates); leftUnique() = child(0)->getGroupAttr()->isUnique(valuesUsedForPredicates); rightUnique() = child(1)->getGroupAttr()->isUnique(valuesUsedForPredicates); ValueIdList mjp(getOrderedMJPreds()); NABoolean replicatePredicates = TRUE; /* For merge join the characteristic outputs have already been resolved by the time the equijoin preds are resolved below. The outputs are resolved at the very end of Join::precodegen, which was called a few lines above. Therefore when we resolve the equijoin preds we have only the actually resolved output values available. We do not have all the potential output values available. */ ValueIdSet joinInputAndOutputValues; joinInputAndOutputValues = getGroupAttr()->getCharacteristicInputs(); joinInputAndOutputValues += getGroupAttr()->getCharacteristicOutputs(); // Pass in the children GAs so that the equipreds can have one side // resolved to one child and the other side resolved to the other child. // solution 10-100722-1962 mjp.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndOutputValues, NULL /* no indexDesc since we have no key preds*/, child(0)->getGroupAttr(), child(1)->getGroupAttr()); // must have at least 1 merge join predicate GenAssert(!mjp.isEmpty(),"mjp.isEmpty()"); // The generator expects the merge join predicates to be in the form // leftcol = rightcol where leftcol references a column from the left // table and rightcol references a column from the right table. Switch // the expression if it is the other way around. Also handle rare cases // where a VEGPred is resolved into two equalities connected by an AND. // ValueIdSet dummy1; ValueIdList newJoinPreds; ValueIdList newLeftOrder; ValueIdList newRightOrder; CollIndex ne = (CollIndex)(mjp.entries()); NABoolean isANewJoinPred ; for (CollIndex i = 0; i < ne; i++) { // Will store all the conjuncts under the pred mjp[i] being considered. ValueIdSet conjuncts; conjuncts.clear(); conjuncts.insert(mjp[i]); ValueIdSet finerConjuncts; do { finerConjuncts.clear(); // Go through the set of conjuncts, breaking down any AND seen into // finer conjuncts. // for (ValueId vid = conjuncts.init(); conjuncts.next(vid); conjuncts.advance(vid)) { ItemExpr *pred = vid.getItemExpr(); if (pred->getOperatorType() == ITM_AND) { // Found another AND, break it down into finer conjuncts. Store // them in finerConjuncts so that we can return to them later. // finerConjuncts.insert(pred->child(0)->getValueId()); finerConjuncts.insert(pred->child(1)->getValueId()); } else { // This is the "finest" conjunct - cannot be broken down further. // Make sure it's in the form of (leftCol = rightCol). Add the // equality predicate to the final list of MJ predicates. leftOrder // and rightOrder are set up correspondingly so that they match up // with the predicates. // GenAssert(pred->getOperatorType() == ITM_EQUAL, "pred->getOperatorType() != ITM_EQUAL"); ItemExpr *left = pred->child(0)->castToItemExpr(); ItemExpr *right = pred->child(1)->castToItemExpr(); isANewJoinPred = TRUE; NABoolean child0Covered = child(0).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; NABoolean child1Covered = child(1).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if (NOT (child0Covered && child1Covered)) { //++MV - Irena // save the pred's specialNulls_ flag before replacing the pred BiRelat *biRelat = new(generator->wHeap()) BiRelat(ITM_EQUAL, right, left); // restore specialNulls_ biRelat->setSpecialNulls(((BiRelat*)pred)->getSpecialNulls()); biRelat->bindNode(generator->getBindWA()); pred = biRelat; //--MV - Irena child0Covered = child(0).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; child1Covered = child(1).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if(!(child0Covered && child1Covered)) { if (isInnerNonSemiJoin()) { selectionPred() += pred->getValueId(); } else { // for an outer or semi join, the ON clause is stored in "joinPred" // while the WHERE clause is stored in "selectionPred". joinPred() += pred->getValueId(); } isANewJoinPred = FALSE; } } if (isANewJoinPred) { // Store the finest conjuncts in the final list of MJ predicates. // Make sure the list is matched up with corresponding leftOrder // and rightOrder. // newJoinPreds.insert(pred->getValueId()); newLeftOrder.insert(getLeftSortOrder()[i]); newRightOrder.insert(getRightSortOrder()[i]); } } } // for over conjuncts. // Come back to process the new set of broken-down conjuncts if the set // is non-empty. // conjuncts = finerConjuncts; } while (NOT conjuncts.isEmpty()); } // for over mjp. if (ne > 0) GenAssert(NOT newJoinPreds.isEmpty(), "MergeJoin::PreCodeGen has no resolved join predicates"); // Count merge join as a Big Memory Operator (BMO) if use of BMO quota // is enabled for merge join. if (CmpCommon::getDefaultLong(MJ_BMO_QUOTA_PERCENT) != 0) { generator->incrNumBMOs(); } setOrderedMJPreds(newJoinPreds); setLeftSortOrder(newLeftOrder); setRightSortOrder(newRightOrder); generator->compilerStatsInfo().mj()++; markAsPreCodeGenned(); return this; } // MergeJoin::preCodeGen() RelExpr * HashJoin::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; ValueIdSet vidSet0 = child(0)->getGroupAttr()->getCharacteristicOutputs(); ValueIdSet vidSet1 = child(1)->getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet1, vidSet0, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } // Determine if we should attempt to use the HashJoin min/max optimization. NABoolean useMinMaxOpt = ((CmpCommon::getDefault(GEN_HSHJ_MIN_MAX_OPT) == DF_ON) && ! getEquiJoinPredicates().isEmpty() && ! isLeftJoin() && ! isRightJoin() && ! isAntiSemiJoin()); // These indexes define the subset of min max values which belong to this HashJoin. CollIndex startMinMaxIndex = 0; CollIndex endMinMaxIndex = 0; // If min/max opt is used, these lists are used to hold local copies of the // generators min and max values. These are the min and max values // generated by HashJoins that may be used by scans. ValueIdList myMinVals; ValueIdList myMaxVals; // If min/max opt is used, this list are used to hold a local copy // of the generators minmaxKeys. These are the values for which min // and max values are available ValueIdList myMinMaxKeys; if (useMinMaxOpt) { // This HashJoin will append to the end of the generator lists. // startMinMaxIndex = generator->getMinMaxKeys().entries(); // Find the candidate values from the right hand side of the join. // For now, only consider VEGPreds. for (ValueId valId = getEquiJoinPredicates().init(); getEquiJoinPredicates().next(valId); getEquiJoinPredicates().advance(valId)) { ItemExpr * itemExpr = valId.getItemExpr(); NAType *mmType = NULL; if (itemExpr->getOperatorType() == ITM_VEG_PREDICATE) { VEGPredicate *vPred = (VEGPredicate *)itemExpr; VEGReference *vRef = vPred->getVEG()->getVEGReference(); mmType = vRef->getValueId().getType().newCopy(generator->wHeap()); } // mmType is the type of the VEGRef relating a left and right value. // We will compute the Min and Max using this type if(mmType) { // Min/Max are typed as nullable. mmType->setNullable(true); // Construct the host vars which will represent the min and // max values for this join key. char name[80]; sprintf(name, "_sys_MinVal%d", generator->getMinMaxKeys().entries()); ItemExpr *minVal = new(generator->wHeap()) HostVar(name, mmType, TRUE); sprintf(name, "_sys_MaxVal%d", generator->getMinMaxKeys().entries()); ItemExpr *maxVal = new(generator->wHeap()) HostVar(name, mmType, TRUE); minVal->synthTypeAndValueId(); maxVal->synthTypeAndValueId(); // Insert the value and min and max into generator lists to // make the available to scans as key predicates. generator->getMinMaxKeys().insert(itemExpr->getValueId()); generator->getMinVals().insert(minVal->getValueId()); generator->getMaxVals().insert(maxVal->getValueId()); // Initialize the 'will use' list to a NULL_VALUE_ID. A scan // that decides to use the min max values will change this // entry to be the same as the corresponding entry in the // minMaxKeys list. generator->getWillUseMinMaxKeys().insert(NULL_VALUE_ID); } } // This is the end index (exclusive) for this HashJoin. endMinMaxIndex = generator->getMinMaxKeys().entries(); // Keep local copies of the generators lists. myMinVals = generator->getMinVals(); myMaxVals = generator->getMaxVals(); myMinMaxKeys = generator->getMinMaxKeys(); } // Register the start and end indexes for this Hash Join // (Join::preCodeGen() needs to have access to the indexes) setStartMinMaxIndex(startMinMaxIndex); setEndMinMaxIndex(endMinMaxIndex); if (! Join::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; // List for min and max values that will be used by a scan and which // will be generated by this HashJoin minMaxVals_.clear(); minMaxCols_.clear(); { // For each min/max value belonging to this HashJoin, check to see // if any scan decided to use it. If so, add the min and max // values to the list. Also, clear the 'will use' entry as no // other HashJoin can supply this value. for (CollIndex i = startMinMaxIndex; i < endMinMaxIndex; i++) { if (generator->getWillUseMinMaxKeys()[i] != NULL_VALUE_ID) { minMaxVals_.insert(myMinVals[i]); minMaxVals_.insert(myMaxVals[i]); VEGPredicate *vPred = (VEGPredicate *)myMinMaxKeys[i].getItemExpr(); VEGReference *vRef = vPred->getVEG()->getVEGReference(); minMaxCols_.insert(vRef->getValueId()); generator->getWillUseMinMaxKeys()[i] = NULL_VALUE_ID; } } // If we have some minMaxCols, then replace any VEGReferences. if (minMaxCols_.entries()) { ValueIdSet availForMinMax; availForMinMax += child(1)->getGroupAttr()->getCharacteristicOutputs(); availForMinMax += getGroupAttr()->getCharacteristicInputs(); minMaxCols_.replaceVEGExpressions(availForMinMax, getGroupAttr()->getCharacteristicInputs()); } } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); ValueIdSet hjp(getEquiJoinPredicates()); NABoolean replicatePredicates = TRUE; /* For hash join the characteristic outputs have already been resolved by the time the equijoin preds are resolved below. The outputs are resolved at the very end of Join::precodegen, which was called a few lines above. Therefore when we resolve the equijoin preds we have only the actually resolved output values available. We do not have all the potential output values available. */ ValueIdSet joinInputAndOutputValues; joinInputAndOutputValues = getGroupAttr()->getCharacteristicInputs(); joinInputAndOutputValues += getGroupAttr()->getCharacteristicOutputs(); // Pass in the children GAs so that the equipreds can have one side // resolved to one child and the other side resolved to the other child. // solution 10-100722-1962 hjp.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndOutputValues, NULL /* no indexDesc since we have no key preds*/, child(0)->getGroupAttr(), child(1)->getGroupAttr()); // Will store the rewritten hjp's which compile with the format of // leftCol= rightCol. // ValueIdSet newJoinPreds; if (hjp.isEmpty()) { if (generator->downrevCompileNeeded()) { // This is a cartesian product. // create a join predicate " 1 = 1 " which will always be true. // ItemExpr *left = new(generator->wHeap()) ConstValue(1); ItemExpr *right = new(generator->wHeap()) ConstValue(1); BiRelat *pred = new(generator->wHeap()) BiRelat(ITM_EQUAL,left,right); pred->bindNode(generator->getBindWA()); newJoinPreds.insert(pred->getValueId()); } } else { // The generator expects the hash join predicates to be in the form // leftcol = rightcol where leftcol references a column from the left // table and rightcol references a column from the right table. Switch // the expression if it is the other way around. Also handle rare cases // where a VEGPred is resolved into two equalities connected by an AND. // ValueIdSet dummy1; NABoolean isANewJoinPred ; do { ValueIdSet finerConjuncts; finerConjuncts.clear(); for (ValueId vid = hjp.init(); hjp.next(vid); hjp.advance(vid)) { ItemExpr *pred = vid.getItemExpr(); // Break this up into the finer conjuncts. Store them in a separate // set so that we can return to it later. // of the set so that we could return if (pred->getOperatorType() == ITM_AND) { finerConjuncts.insert(pred->child(0)->getValueId()); finerConjuncts.insert(pred->child(1)->getValueId()); } else { GenAssert(pred->getOperatorType() == ITM_EQUAL, "pred->getOperatorType() != ITM_EQUAL"); ItemExpr *left = pred->child(0)->castToItemExpr(); ItemExpr *right = pred->child(1)->castToItemExpr(); isANewJoinPred = TRUE; NABoolean child0Covered = child(0).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; NABoolean child1Covered = child(1).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if (NOT (child0Covered && child1Covered)) { //++MV - Irena // save the pred's specialNulls_ flag before replacing the pred BiRelat *biRelat = new(generator->wHeap()) BiRelat(ITM_EQUAL, right, left); // restore specialNulls_ biRelat->setSpecialNulls(((BiRelat*)pred)->getSpecialNulls()); biRelat->bindNode(generator->getBindWA()); pred = biRelat; //--MV - Irena child0Covered = child(0).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; child1Covered = child(1).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if(!(child0Covered && child1Covered)) { if (isInnerNonSemiJoin()) { selectionPred() += pred->getValueId(); } else { // for an outer or semi join, the ON clause is stored in "joinPred" // while the WHERE clause is stored in "selectionPred". joinPred() += pred->getValueId(); } isANewJoinPred = FALSE; } } if (isANewJoinPred) newJoinPreds.insert(pred->getValueId()); } } // for over hjp. // Come back to process the new set of broken-down conjuncts if the set // is non-empty. // hjp = finerConjuncts; } while (NOT hjp.isEmpty()); GenAssert(NOT newJoinPreds.isEmpty(), "HashJoin::PreCodeGen has no resolved join predicates"); } // Value IDs given to the right/inner child ValueIdSet valuesGivenToRightChild = child(1)->getGroupAttr()->getCharacteristicInputs(); if ( ! valuesGivenToRightChild.isEmpty() ) { // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; const ValueIdSet & HJInputs = getGroupAttr()->getCharacteristicInputs(); getInputValuesFromParentAndChildren(availableValues); valuesGivenToRightChild.replaceVEGExpressions(availableValues, HJInputs); } // before computing the move and check expressions, add one more // value to "valuesGivenToRightChild": a statement execution count // that will cause re-hashing each time the statement is // re-executed. It is not legal to keep a hash table across // statement executions (and possibly transactions). ValueId execCount = generator->getOrAddStatementExecutionCount(); valuesGivenToRightChild += execCount; pulledNewInputs += execCount; getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // add move and search expressions for (ValueId val_id = valuesGivenToRightChild.init(); valuesGivenToRightChild.next(val_id); valuesGivenToRightChild.advance(val_id)) { ItemExpr * item_expr = val_id.getItemExpr(); // add this converted value to the map table. Convert * conv_node = new(generator->wHeap()) Convert (item_expr); // bind/type propagate the new node conv_node->bindNode(generator->getBindWA()); moveInputValues().insert(conv_node->getValueId()); // add the search condition BiRelat * bi_relat = new(generator->wHeap()) BiRelat(ITM_EQUAL, item_expr, conv_node); bi_relat->allocValueId(); checkInputValues().insert(bi_relat->getValueId()); } // for val_id // Count this BMO and add its needed memory to the total needed generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0) generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE)); // store the transformed predicates back into the hash join node storeEquiJoinPredicates(newJoinPreds); generator->compilerStatsInfo().hj()++; // // case of hash anti semi join optimization (NOT IN) // add/build expression to detect inner and outer null : // checkOuteNullexpr_ and checkInnerNullExpr_ addCheckNullExpressions(generator->wHeap()); markAsPreCodeGenned(); return this; } // HashJoin::preCodeGen() RelExpr * FileScan::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; const PartitioningFunction* myPartFunc = getPartFunc(); NABoolean usePartKeyPreds = (isHbaseTable() && myPartFunc && myPartFunc->isPartitioned() && !myPartFunc->isAReplicationPartitioningFunction()); if (isRewrittenMV()) generator->setNonCacheableMVQRplan(TRUE); if (usePartKeyPreds) { // partition key predicates will be applied to this file scan, // "pull" the partition input values from the parent pulledNewInputs += myPartFunc->getPartitionInputValues(); getGroupAttr()->addCharacteristicInputs(myPartFunc->getPartitionInputValues()); } // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. ValueIdSet availableValues; getInputAndPotentialOutputValues(availableValues); sampledColumns().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // Rewrite the partitioning function in terms of the available values. if (getIndexDesc()->isPartitioned()) getIndexDesc()->getPartitioningFunction()->preCodeGen(availableValues); // VEGPredicates that are key predicates but are also replicated in // the executor predicates must be replaced with the same expression // in both the places after they are rewritten. The VEGRewritePairs // data structure, when passed to replaceVEGExpressions(), causes // replaceVEGExpressions() to be idempotent. VEGRewritePairs vegPairs(generator->wHeap()); ValueIdSet partKeyPredsHBase; if (usePartKeyPreds) { // add the partitioning key predicates to this scan node, // to make sure that each ESP reads only the part of the // data that it is supposed to process ValueId saltCol; if (myPartFunc->isATableHashPartitioningFunction()) { // find the _SALT_ column and make a partitioning key // predicate for it const ValueIdList &keyCols = getIndexDesc()->getIndexKey(); // the first salt column we find in the key is the one // we are looking for for (CollIndex i=0; i<keyCols.entries(); i++) if (keyCols[i].isSaltColumn()) { saltCol = keyCols[i]; break; } if (saltCol != NULL_VALUE_ID) ((TableHashPartitioningFunction *) myPartFunc)-> createPartitioningKeyPredicatesForSaltedTable(saltCol); } partKeyPredsHBase = myPartFunc->getPartitioningKeyPredicates(); } if (getMdamKeyPtr() != NULL) { NABoolean replicatePredicates = TRUE; // mdamKeyPtr()->print(); // for debugging purposes ValueIdSet executorPredicates; ValueIdSet augmentedPreds = getSelectionPredicates(); const LogPhysPartitioningFunction *logPhysPartFunc = getPartFunc()->castToLogPhysPartitioningFunction(); if (!partKeyPredsHBase.isEmpty()) { augmentedPreds += partKeyPredsHBase; mdamKeyPtr()->setNoExePred(FALSE); } augmentedPreds += getComputedPredicates(); if ( logPhysPartFunc != NULL ) { LogPhysPartitioningFunction::logPartType logPartType = logPhysPartFunc->getLogPartType(); if ( logPartType == LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING OR logPartType == LogPhysPartitioningFunction::HORIZONTAL_PARTITION_SLICING ) augmentedPreds += logPhysPartFunc->getPartitioningKeyPredicates(); } mdamKeyPtr()->preCodeGen(executorPredicates, augmentedPreds, availableValues, getGroupAttr()->getCharacteristicInputs(), &vegPairs, replicatePredicates, !partKeyPredsHBase.isEmpty()); setExecutorPredicates(executorPredicates); // mdamKeyPtr()->print(); // for debugging purposes } else if (! isHiveTable() && (getSearchKey() || !partKeyPredsHBase.isEmpty())) { // --------------------------------------------------- // --------------------- Rewrite preds for search key: // --------------------------------------------------- if (!partKeyPredsHBase.isEmpty()) { // These predicates can compete with other key predicates; // decide which of them to use as key preds and which as // executor preds: // - No search key: Use part key preds as search key // - Search key with non-unique preds: Replace it with // a new search key with part key preds // - Search key with unique preds (unlikely, this shouldn't // have been a parallel query): add part key preds as // executor preds ValueIdSet combinedInputs(externalInputs); combinedInputs += pulledNewInputs; ValueIdSet existingKeyPreds; if (getSearchKey()) existingKeyPreds += getSearchKey()->getKeyPredicates(); // create a new search key that has the partitioning key preds SearchKey * partKeySearchKey = myPartFunc->createSearchKey(getIndexDesc(), combinedInputs, existingKeyPreds); ValueIdSet exePreds(partKeySearchKey->getExecutorPredicates()); NABoolean replaceSearchKey = !(getSearchKey() && getSearchKey()->isUnique()); if (getSearchKey()) exePreds += getSearchKey()->getExecutorPredicates(); // pick one search key and add the remaining // predicates (if any) to exePreds if (replaceSearchKey) setSearchKey(partKeySearchKey); else exePreds += partKeySearchKey->getKeyPredicates(); searchKey()->setExecutorPredicates(exePreds); } NABoolean replicatePredicates = TRUE; setExecutorPredicates(searchKey()->getExecutorPredicates()); // Rebuild the search key expressions ValueIdSet& keyPred = searchKey()->keyPredicates(); keyPred.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &vegPairs, replicatePredicates); // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &vegPairs, replicatePredicates ); // Generate the begin and end keys. if ( getDoUseSearchKey() ) { generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getBeginKeyValues(), beginKeyPred_, generator, replicatePredicates); generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getEndKeyValues(), endKeyPred_, generator, replicatePredicates); } // Check to see if there are any MIN/MAX values coming from a // HashJoin which could be used as begin/end key values for the // leading key of this scan. Don't consider doing this if this // is a unique scan (can't improve on that) or if the leading // key is already unique or if both the begin and end key are // exclusive (min max are inclusive and no easy way to mix // them). if (generator->getMinMaxKeys().entries() && (getSearchKey()->getBeginKeyValues()[0] != getSearchKey()->getEndKeyValues()[0]) && (!getSearchKey()->isBeginKeyExclusive() || !getSearchKey()->isEndKeyExclusive())) { // The keys of the scan. const ValueIdList &keys = getIndexDesc()->getIndexKey(); ValueId minMaxKeyCol = keys[0]; IndexColumn *ixCol = (IndexColumn *) (minMaxKeyCol.getItemExpr()); BaseColumn *baseCol = NULL; ValueId underlyingCol; NABoolean needToComputeActualMinMax = FALSE; ItemExpr *computedColExpr = NULL; // The candidate values for min and max. const ValueIdList &minMaxKeys = generator->getMinMaxKeys(); CollIndex keyIdx = NULL_COLL_INDEX; // Determine how min/max is related to begin/end. depends // on ordering (ASC vs DESC) and scan direction (forward vs // reverse) NABoolean ascKey = getIndexDesc()->getNAFileSet()->getIndexKeyColumns().isAscending(0); if(getReverseScan()) ascKey = !ascKey; // If the leading key column is a divisioning column, then // look for min/max values of an underlying column GenAssert(ixCol->getOperatorType() == ITM_INDEXCOLUMN, "unexpected object type"); baseCol = (BaseColumn *) (((IndexColumn *) ixCol)->getDefinition().getItemExpr()); GenAssert(baseCol->getOperatorType() == ITM_BASECOLUMN, "unexpected object type"); if (baseCol->getNAColumn()->isDivisioningColumn()) { ValueIdSet underlyingCols; baseCol->getUnderlyingColumnsForCC(underlyingCols); if (underlyingCols.entries() == 1) { // We have a leading division column that's computed from // 1 base column, now get the underlying column and the // divisioning expression needToComputeActualMinMax = TRUE; underlyingCols.getFirst(minMaxKeyCol); computedColExpr = baseCol->getComputedColumnExpr().getItemExpr(); BaseColumn *underlyingBaseCol = (BaseColumn *) minMaxKeyCol.getItemExpr(); GenAssert(underlyingBaseCol->getOperatorType() == ITM_BASECOLUMN, "unexpected object type"); // the computed column expression has been rewritten to use // VEGRefs, so get the corresponding VEGRef for the underlying column underlyingCol = underlyingBaseCol->getTableDesc()-> getColumnVEGList()[underlyingBaseCol->getColNumber()]; } } // Check all the candidate values. If any one of them matches // the leading key of this scan, then select it for use in the // begin/end key value of the leading key. // Scalar min/max functions cause an exponential growth when // combined with each other, see ItmScalarMinMax::codeGen() Int32 limitItems = 3 ; // use at most 3 for(CollIndex i = 0; i < minMaxKeys.entries() && limitItems; i++) { ValueId mmKeyId = minMaxKeys[i]; if(mmKeyId != NULL_VALUE_ID) { ItemExpr *mmItem = mmKeyId.getItemExpr(); if (mmItem->getOperatorType() == ITM_VEG_PREDICATE) { VEGPredicate *vPred = (VEGPredicate *)mmItem; const ValueIdSet &members = vPred->getVEG()->getAllValues(); if (members.contains(minMaxKeyCol)) { // some other operator is producing min/max values // for our leading key column, now check whether we // can use them keyIdx = i; // Indicate in the 'will use' list that we will use these // min/max values. This will indicate to the HashJoin that // it should produce these values. generator->getWillUseMinMaxKeys()[keyIdx] = generator->getMinMaxKeys()[keyIdx]; addMinMaxHJColumn(baseCol->getValueId()); limitItems-- ; // one more is used // If we can use a min/max value for the begin key, do so... if(!getSearchKey()->isBeginKeyExclusive()) { ItemExpr *keyPred = getBeginKeyPred()[0].getItemExpr(); ItemExpr *currentBeg = keyPred->child(1); // Get the proper begin key (min or max) that came from // the HashJoin ValueId hashJoinBeg = (ascKey ? generator->getMinVals()[keyIdx] : generator->getMaxVals()[keyIdx]); // Construct an expression which determines at runtime // which BK to use. Either the existing one or the one // coming from HashJoin whichever is larger (smaller). // ItemExpr *newBeg = hashJoinBeg.getItemExpr(); if (needToComputeActualMinMax) { ValueIdMap divExprMap; ValueId computedBeg; // If hashJoinBeg is :sysHV1 and the computed column // expression is A/100, then the begin value for // the computed column is :sysHV1/100. Do this // rewrite by using a ValueIdMap divExprMap.addMapEntry(underlyingCol, hashJoinBeg); divExprMap.rewriteValueIdDown(computedColExpr->getValueId(), computedBeg); newBeg = computedBeg.getItemExpr(); } newBeg = new (generator->wHeap()) ItmScalarMinMax((ascKey ? ITM_SCALAR_MAX : ITM_SCALAR_MIN), currentBeg, newBeg); newBeg->synthTypeAndValueId(); // Replace the RHS of the key pred. keyPred->child(1) = newBeg->getValueId(); // The value coming from the HashJoin must be in out inputs. getGroupAttr()->addCharacteristicInputs(hashJoinBeg); // And we must pull those values from the HashJoin. pulledNewInputs += hashJoinBeg; availableValues += hashJoinBeg; } // If we can use a min/max value for the end key, do so... if(!getSearchKey()->isEndKeyExclusive()) { ItemExpr *keyPred = getEndKeyPred()[0].getItemExpr(); ItemExpr *currentEnd = keyPred->child(1); // Get the proper end key (max or min) that came from // the HashJoin ValueId hashJoinEnd = (ascKey ? generator->getMaxVals()[keyIdx] : generator->getMinVals()[keyIdx]); // Construct an expression which determines at runtime // which EK to use. Either the existing one or the one // coming from HashJoin whichever is smaller (larger). // ItemExpr *newEnd = hashJoinEnd.getItemExpr(); if (needToComputeActualMinMax) { ValueIdMap divExprMap; ValueId computedEnd; divExprMap.addMapEntry(underlyingCol, hashJoinEnd); divExprMap.rewriteValueIdDown(computedColExpr->getValueId(), computedEnd); newEnd = computedEnd.getItemExpr(); } newEnd = new (generator->wHeap()) ItmScalarMinMax((ascKey ? ITM_SCALAR_MIN : ITM_SCALAR_MAX), currentEnd, newEnd); newEnd->synthTypeAndValueId(); // Replace the RHS of the key pred. keyPred->child(1) = newEnd->getValueId(); // The value coming from the HashJoin must be in out inputs. getGroupAttr()->addCharacteristicInputs(hashJoinEnd); // And we must pull those values from the HashJoin. pulledNewInputs += hashJoinEnd; availableValues += hashJoinEnd; } } } } } } } else { // Hive table scan (HBase scan has executor preds set up already) if (isHiveTable()) setExecutorPredicates(selectionPred()); // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &vegPairs, TRUE); if (isHiveTable()) // assign individual files and blocks to each ESPs ((NodeMap *) getPartFunc()->getNodeMap())->assignScanInfos(hiveSearchKey_); } // Selection predicates are not needed anymore: selectionPred().clear(); // Add the sampled columns to the set of available values. This is // basically a kluge to get the GroupAttributes right. availableValues += sampledColumns(); // This call also rewrites predicates // $$$ Does it need vegPairs too? $$$ getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); markAsPreCodeGenned(); return this; } // FileScan::preCodeGen() RelExpr * GenericUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Determine whether OLT optimization must be avoided. if (getGroupAttr()->isEmbeddedUpdateOrDelete()) { generator->oltOptInfo()->setOltMsgOpt(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdErrorOnError(FALSE); } if ((accessOptions().accessType() == SKIP_CONFLICT_) || (getGroupAttr()->isStream()) || (newRecBeforeExprArray().entries() > 0)) // set on rollback { generator->oltOptInfo()->setOltEidOpt(FALSE); oltOptInfo().setOltEidOpt(FALSE); setExpandShortRows(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdErrorOnError(FALSE); } // If RI, IM, MV or triggers are being used, abort on error. // This is because internal data consistency // cannot be guaranteed for these cases. if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) { // cannot do partial updates. generator->setUpdPartialOnError(FALSE); if (CmpCommon::getDefault(COMP_BOOL_206) == DF_ON) { if (NOT ((getInliningInfo().hasTriggers()) || (getInliningInfo().hasRI()) || (getInliningInfo().hasIM()) || (getInliningInfo().isMVLoggingInlined()))) { generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); } else generator->setUpdErrorOnError(FALSE); } else { // abort on error for non-IM cases(RI,MV,Trig). if ((NOT getInliningInfo().hasIM()) || (getInliningInfo().hasRI())) { generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); } else generator->setUpdErrorOnError(FALSE); } } // If RI, MV or triggers are being used, turn off the lean optimization for // the complete plan; all other optimizations will still apply. if ( generator->oltOptInfo()->oltEidLeanOpt() && ( getInliningInfo().hasTriggers() || getInliningInfo().hasRI() || getInliningInfo().isMVLoggingInlined() ) ) { generator->oltOptInfo()->setOltEidLeanOpt(FALSE); oltOptInfo().setOltEidLeanOpt(FALSE); } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); newRecExpr_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); newRecBeforeExpr_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // VEGPredicates that are key predicates but are also replicated // in the executor predicates must be replaced with the same // expression in both places after they are rewritten. // Therefore, we want replaceVEGExpressions() processing to be // idempotent. By passing the VEGRewritePairs data structure // to replaceVEGExpressions(), we get idempotence. VEGRewritePairs lookup(generator->wHeap()); // so replaceVEGExpressions will be idempotent if (getSearchKey() == NULL) { // Begin and end key preds may already be available. beginKeyPred_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &lookup); endKeyPred_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &lookup); // In the case of an embedded insert from VALUES, // any predicates need to have their VEGreferences resolved. if (getGroupAttr()->isEmbeddedInsert()) { NABoolean replicatePredicates = TRUE; // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation &lookup, replicatePredicates ); } } else { // Build begin and end key predicates from the search key structure. //## It *might* be a good idea to add here: //## CMPASSERT(beginKeyPred_.isEmpty() && endKeyPred_.isEmpty()); //## as that *seems* to be the assumption here. //## (But I haven't the time to make the change and test it.) ValueIdSet& keyPred = getSearchKey()->keyPredicates(); NABoolean replicatePredicates = TRUE; // Rebuild the search key expressions keyPred.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation &lookup, replicatePredicates); // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation &lookup, replicatePredicates ); // Generate the begin and end keys. generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getBeginKeyValues(), beginKeyPred_, generator); generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getEndKeyValues(), endKeyPred_, generator); } // --------------------------------------------------------------------- // Rewrite the check constraint expressions. // --------------------------------------------------------------------- checkConstraints().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->setFoundAnUpdate(TRUE); generator->setPartnAccessChildIUD(); #ifdef _DEBUG // Compile in the index maintenance ... just for testing // if(getenv("IM_COMPILE")) generator->imUpdateRel() = this; #endif if (oltOptLean() && ((isinBlockStmt()) || (getTableDesc()->getNATable()->hasAddedColumn()) || (getTableDesc()->getNATable()->hasVarcharColumn()))) { oltOptInfo().setOltEidLeanOpt(FALSE); } generator->setSkipUnavailablePartition(FALSE); if (isMtsStatement()) generator->setEmbeddedIUDWithLast1(TRUE) ; if (isMerge()) { // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); mergeInsertRecExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); mergeUpdatePred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); ValueIdList tempVIDlist; getTableDesc()->getIdentityColumn(tempVIDlist); NAColumn *identityCol = NULL; if (tempVIDlist.entries() > 0) { ValueId valId = tempVIDlist[0]; identityCol = valId.getNAColumn(); } if (((getOperatorType() == REL_HBASE_DELETE) || (getOperatorType() == REL_HBASE_UPDATE)) && (getTableDesc()->getNATable()->getClusteringIndex()->hasSyskey())) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" SYSKEY not allowed."); GenExit(); } if ((getOperatorType() != REL_HBASE_UPDATE) && (mergeInsertRecExpr().entries() > 0) && (CmpCommon::getDefault(COMP_BOOL_175) == DF_OFF)) { // MERGE with INSERT is limited to HBase updates unless // the CQD is on *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" This MERGE is not allowed with INSERT."); GenExit(); } if (oltOpt()) { // if no update expr and only insert expr is specified for // this MERGE stmt, turn off olt opt. // if (newRecExprArray().entries() == 0) oltOptInfo().setOltEidOpt(FALSE); oltOptInfo().setOltEidLeanOpt(FALSE); } generator->setUpdErrorOnError(FALSE); generator->setUpdSavepointOnError(FALSE); } // isMerge generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); // Part of the fix for Soln 10-100425-9755. Don't AQR a // positioned update/delete because part of the recovery // for the error that triggers the AQR is rollback transaction // and this causes the referenced cursor to be closed. The other // part of the fix is in compiler cache: positioned update/deletes // will not be cached, and this should reduce the need to handle // errors with AQR, e.g., timestamp mismatch errors. if (updateCurrentOf()) generator->setAqrEnabled(FALSE); if (getTableDesc()->getNATable()->hasLobColumn()) { oltOptInfo().setOltOpt(FALSE); generator->oltOptInfo()->setOltOpt(FALSE); generator->setAqrEnabled(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); } if ((isNoRollback()) || (generator->getTransMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) { generator->setWithNoRollbackUsed(isNoRollback()); if (CmpCommon::getDefault(AQR_WNR) == DF_OFF) generator->setAqrEnabled(FALSE); } if (((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) && (getInliningInfo().hasRI())) { generator->setRIinliningForTrafIUD(TRUE); } if (precondition_.entries() > 0) { ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); precondition_. replaceVEGExpressions(availableValues, getGroupAttr()->getCharacteristicInputs()); } markAsPreCodeGenned(); return this; } // GenericUpdate::preCodeGen() RelExpr * Update::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUpdate::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * MergeUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Update::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * UpdateCursor::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Update::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // primary key columns cannot be updated, yet. After RI support // is in, they could be updated. const NAColumnArray & key_column_array = getTableDesc()->getNATable()->getClusteringIndex()->getIndexKeyColumns(); ValueIdSet& val_id_set = newRecExpr(); ValueId val_id; for (val_id = val_id_set.init(); val_id_set.next(val_id); val_id_set.advance(val_id)) { ItemExpr * item_expr = val_id.getItemExpr(); for (short i = 0; i < getTableDesc()->getNATable()->getKeyCount(); i++) { const char * key_colname = key_column_array[i]->getColName(); const char * upd_colname = ((BaseColumn *) (item_expr->child(0)->castToItemExpr()))-> getColName(); if ((strcmp(key_colname, upd_colname) == 0) && (item_expr->getOperatorType() == ITM_ASSIGN) && (((Assign*)item_expr)->isUserSpecified())) { *CmpCommon::diags() << DgSqlCode(-4033) << DgColumnName(key_colname); GenExit(); } } } generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); markAsPreCodeGenned(); return this; } // UpdateCursor::preCodeGen() RelExpr * Delete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUpdate::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * MergeDelete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Delete::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } static NABoolean hasColReference(ItemExpr * ie) { if (! ie) return FALSE; if ((ie->getOperatorType() == ITM_BASECOLUMN) || (ie->getOperatorType() == ITM_INDEXCOLUMN) || (ie->getOperatorType() == ITM_REFERENCE)) return TRUE; for (Lng32 i = 0; i < ie->getArity(); i++) { if (hasColReference(ie->child(i))) return TRUE; } return FALSE; } void HbaseAccess::addReferenceFromItemExprTree(ItemExpr * ie, NABoolean addCol, NABoolean addHBF, ValueIdSet &colRefVIDset) { if (! ie) return; if ((ie->getOperatorType() == ITM_BASECOLUMN) || (ie->getOperatorType() == ITM_INDEXCOLUMN) || (ie->getOperatorType() == ITM_REFERENCE)) { if (addCol) colRefVIDset.insert(ie->getValueId()); return; } if (ie->getOperatorType() == ITM_HBASE_TIMESTAMP) { if (addHBF) { colRefVIDset.insert(ie->getValueId()); } return; } if (ie->getOperatorType() == ITM_HBASE_VERSION) { if (addHBF) { colRefVIDset.insert(ie->getValueId()); } return; } for (Lng32 i = 0; i < ie->getArity(); i++) { addReferenceFromItemExprTree(ie->child(i), addCol, addHBF, colRefVIDset); } return; } void HbaseAccess::addColReferenceFromVIDlist(const ValueIdList &exprList, ValueIdSet &colRefVIDset) { for (CollIndex i = 0; i < exprList.entries(); i++) { addReferenceFromItemExprTree(exprList[i].getItemExpr(), TRUE, FALSE, colRefVIDset); } } void HbaseAccess::addReferenceFromVIDset(const ValueIdSet &exprList, NABoolean addCol, NABoolean addHBF, ValueIdSet &colRefVIDset) { for (ValueId v = exprList.init(); exprList.next(v); exprList.advance(v)) { addReferenceFromItemExprTree(v.getItemExpr(), addCol, addHBF, colRefVIDset); } } void HbaseAccess::addColReferenceFromRightChildOfVIDarray(ValueIdArray &exprList, ValueIdSet &colRefVIDset) { for (CollIndex i = 0; i < exprList.entries(); i++) { addReferenceFromItemExprTree(exprList[i].getItemExpr()->child(1), TRUE, FALSE, colRefVIDset); } } static NABoolean isEqGetExpr(ItemExpr * ie, ValueId &vid, NABoolean &isConstParam, const char * colName) { NABoolean found = FALSE; isConstParam = FALSE; if (ie && ie->getOperatorType() == ITM_EQUAL) { ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) && (((BaseColumn*)ie->child(0)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(1)))) { if (ie->child(1)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(1)->getValueId(); } else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) && (((BaseColumn*)ie->child(1)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(0)))) { if (ie->child(0)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(0)->getValueId(); } else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(0)->getValueId(); } } else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) && (((IndexColumn*)ie->child(0)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(1)))) { if (ie->child(1)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(1)->getValueId(); } else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) && (((IndexColumn*)ie->child(1)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(0)))) { if (ie->child(0)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(0)->getValueId(); } else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(0)->getValueId(); } } else if ((ie->child(0)->getOperatorType() == ITM_REFERENCE) && (((ColReference*)ie->child(0)->castToItemExpr())->getCorrNameObj().getQualifiedNameObj().getObjectName() == colName) && (NOT hasColReference(ie->child(1)))) { if (ie->child(1)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(1)->getValueId(); } else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_REFERENCE) && (((ColReference*)ie->child(1)->castToItemExpr())->getCorrNameObj().getQualifiedNameObj().getObjectName() == colName) && (NOT hasColReference(ie->child(0)))) { if (ie->child(0)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(0)->getValueId(); } else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(0)->getValueId(); } } } return found; } RelExpr * HbaseDelete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // if a column list is specified, make sure all column names are of valid hbase // column name format ("ColFam:ColNam") if (csl()) { for (Lng32 i = 0; i < csl()->entries(); i++) { const NAString * nas = (*csl())[i]; std::string colFam; std::string colName; if (nas) { ExFunctionHbaseColumnLookup::extractColFamilyAndName( nas->data(), -1, FALSE, colFam, colName); } if (colFam.empty()) { *CmpCommon::diags() << DgSqlCode(-1426) << DgString0(nas->data()); GenExit(); } } // for } // if if (!processConstHBaseKeys( generator, this, getSearchKey(), getIndexDesc(), executorPred(), getHbaseSearchKeys(), listOfDelUniqueRows_, listOfDelSubsetRows_)) return NULL; if (! Delete::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; if (((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) && (producesOutputs())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot return values from an hbase insert, update or delete."); GenExit(); } NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (producesOutputs()) { retColRefSet_ = getIndexDesc()->getIndexColumns(); } else { ValueIdSet colRefSet; // create the list of columns that need to be retrieved from hbase . // first add all columns referenced in the executor pred. HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet); if ((getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) { for (ValueId valId = executorPred().init(); executorPred().next(valId); executorPred().advance(valId)) { ItemExpr * ie = valId.getItemExpr(); if (ie->getOperatorType() == ITM_EQUAL) { BiRelat * br = (BiRelat*)ie; br->setSpecialNulls(TRUE); } } } // index_table if ((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable()) || isAlignedFormat) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]); } } for (ValueId valId = colRefSet.init(); colRefSet.next(valId); colRefSet.advance(valId)) { ValueId dummyValId; if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId)) { if ((valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) || (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION)) { *CmpCommon::diags() << DgSqlCode(-3242) << DgString0("Illegal use of Hbase Timestamp or Hbase Version function."); GenExit(); } retColRefSet_.insert(valId); } } if (NOT ((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable()) || (isAlignedFormat))) { // add all the key columns. If values are missing in hbase, then atleast the key // value is needed to retrieve a row. HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } } NABoolean inlinedActions = FALSE; if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) inlinedActions = TRUE; NABoolean isUnique = FALSE; if (listOfDelSubsetRows_.entries() == 0) { if ((getSearchKey() && getSearchKey()->isUnique()) && (listOfDelUniqueRows_.entries() == 0)) isUnique = TRUE; else if ((NOT (getSearchKey() && getSearchKey()->isUnique())) && (listOfDelUniqueRows_.entries() == 1) && (listOfDelUniqueRows_[0].rowIds_.entries() == 1)) isUnique = TRUE; } if (getInliningInfo().isIMGU()) { // There is no need to do checkAndDelete for IM canDoCheckAndUpdel() = FALSE; uniqueHbaseOper() = FALSE; if ((generator->oltOptInfo()->multipleRowsReturned()) && (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD())) uniqueRowsetHbaseOper() = TRUE; } else if (isUnique) { //If this unique delete is not part of a rowset operation , //don't allow it to be cancelled. if (!generator->oltOptInfo()->multipleRowsReturned()) generator->setMayNotCancel(TRUE); uniqueHbaseOper() = TRUE; canDoCheckAndUpdel() = FALSE; if ((NOT producesOutputs()) && (NOT inlinedActions) && (executorPred().isEmpty())) { if ((generator->oltOptInfo()->multipleRowsReturned()) && (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD())) uniqueRowsetHbaseOper() = TRUE; else if ((NOT generator->oltOptInfo()->multipleRowsReturned()) && (listOfDelUniqueRows_.entries() == 0)) { if ((CmpCommon::getDefault(HBASE_CHECK_AND_UPDEL_OPT) == DF_ON) && (CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON) && (NOT isAlignedFormat)) canDoCheckAndUpdel() = TRUE; } } } else if (producesOutputs()) { // Cannot do olt msg opt if: // -- values are to be returned and unique operation is not being used. // set an indication that multiple rows will be returned. generator->oltOptInfo()->setMultipleRowsReturned(TRUE); generator->oltOptInfo()->setOltCliOpt(FALSE); } generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) noDTMxn() = TRUE; // if unique oper with no index maintanence and autocommit is on, then // do not require a trnsaction. Hbase guarantees single row consistency. Int64 transId = -1; if (((uniqueHbaseOper()) && (NOT cursorHbaseOper()) && (NOT uniqueRowsetHbaseOper()) && (NOT inlinedActions) && (generator->getTransMode()->getAutoCommit() == TransMode::ON_) && (! NAExecTrans(0, transId)) && (NOT generator->oltOptInfo()->multipleRowsReturned())) || (noDTMxn())) { // no transaction needed } else { generator->setTransactionFlag(TRUE); if ((NOT uniqueHbaseOper()) || (cursorHbaseOper()) || (uniqueRowsetHbaseOper()) || (inlinedActions) || (generator->oltOptInfo()->multipleRowsReturned())) generator->setUpdAbortOnError(TRUE); } // flag for hbase tables generator->setHdfsAccess(TRUE); markAsPreCodeGenned(); return this; } RelExpr * HbaseUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!processConstHBaseKeys( generator, this, getSearchKey(), getIndexDesc(), executorPred(), getHbaseSearchKeys(), listOfUpdUniqueRows_, listOfUpdSubsetRows_)) return NULL; // if (! GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs)) // return NULL; if (! UpdateCursor::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; CollIndex totalColCount = getTableDesc()->getColumnList().entries(); NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (isAlignedFormat && (newRecExprArray().entries() > 0) && (newRecExprArray().entries() < totalColCount)) { ValueIdArray holeyArray(totalColCount); Lng32 i; for (i = 0; i < newRecExprArray().entries(); i++) { ItemExpr * assign = newRecExprArray()[i].getItemExpr(); const NAColumn *nacol = assign->child(0).getNAColumn(); Lng32 colPos = nacol->getPosition(); holeyArray.insertAt(colPos, assign->getValueId()); } // for for (i = 0; i < totalColCount; i++) { if (! (holeyArray.used(i))) { BaseColumn * bc = (BaseColumn*)getTableDesc()->getColumnList()[i].getItemExpr(); CMPASSERT(bc->getOperatorType() == ITM_BASECOLUMN); ValueId srcId = getIndexDesc()->getIndexColumns()[i]; ItemExpr * an = new(generator->wHeap()) Assign(bc, srcId.getItemExpr(), FALSE); an->bindNode(generator->getBindWA()); holeyArray.insertAt(i, an->getValueId()); } // if } // for newRecExprArray().clear(); newRecExprArray() = holeyArray; } // if aligned if ((isMerge()) && (mergeInsertRecExpr().entries() > 0)) { if ((listOfUpdSubsetRows_.entries() > 0) || (getSearchKey() && (NOT getSearchKey()->isUnique()))) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Non-unique ON clause not allowed with INSERT."); GenExit(); } } if (((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) && (producesOutputs())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot return values from an hbase insert, update or delete."); GenExit(); } NABoolean canDoRowsetOper = TRUE; NABoolean canDoCheckAndUpdate = TRUE; NABoolean needToGetCols = FALSE; if (producesOutputs()) { retColRefSet_ = getIndexDesc()->getIndexColumns(); } else { ValueIdSet colRefSet; // create the list of columns that need to be retrieved from hbase . // first add all columns referenced in the executor pred. HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet); if ((getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) { for (ValueId valId = executorPred().init(); executorPred().next(valId); executorPred().advance(valId)) { ItemExpr * ie = valId.getItemExpr(); if (ie->getOperatorType() == ITM_EQUAL) { BiRelat * br = (BiRelat*)ie; br->setSpecialNulls(TRUE); } } } // add all columns referenced in the right side of the update expr. HbaseAccess::addColReferenceFromRightChildOfVIDarray(newRecExprArray(), colRefSet); if (isMerge()) HbaseAccess::addReferenceFromVIDset(mergeUpdatePred(), TRUE, FALSE, colRefSet); if ((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable()) || (isAlignedFormat)) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]); } } else { for (ValueId valId = colRefSet.init(); colRefSet.next(valId); colRefSet.advance(valId)) { ValueId dummyValId; if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId)) { if ((valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) || (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION)) { *CmpCommon::diags() << DgSqlCode(-3242) << DgString0("Illegal use of Hbase Timestamp or Hbase Version function."); GenExit(); } retColRefSet_.insert(valId); } } } if (retColRefSet_.entries() > 0) { needToGetCols = TRUE; canDoRowsetOper = FALSE; canDoCheckAndUpdate = FALSE; } // nullable and added columns in the row may be missing. That will cause // a row to not be returned if those are the only columns that are being // retrieved. // To make sure that a row is always returned, add the key columns. These are // guaranteed to be present in an hbase row. HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } NABoolean inlinedActions = FALSE; if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) inlinedActions = TRUE; NABoolean isUnique = FALSE; if (listOfUpdSubsetRows_.entries() == 0) { if ((getSearchKey() && getSearchKey()->isUnique()) && (listOfUpdUniqueRows_.entries() == 0)) isUnique = TRUE; else if ((NOT (getSearchKey() && getSearchKey()->isUnique())) && (listOfUpdUniqueRows_.entries() == 1) && (listOfUpdUniqueRows_[0].rowIds_.entries() == 1)) isUnique = TRUE; } if (getInliningInfo().isIMGU()) { // There is no need to checkAndPut for IM canDoCheckAndUpdel() = FALSE; uniqueHbaseOper() = FALSE; if ((generator->oltOptInfo()->multipleRowsReturned()) && (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD())) uniqueRowsetHbaseOper() = TRUE; } else if (isUnique) { //If this unique delete is not part of a rowset operation , //don't allow it to be cancelled. if (!generator->oltOptInfo()->multipleRowsReturned()) generator->setMayNotCancel(TRUE); uniqueHbaseOper() = TRUE; canDoCheckAndUpdel() = FALSE; if ((NOT isMerge()) && (NOT producesOutputs()) && (executorPred().isEmpty()) && (NOT needToGetCols) && (NOT inlinedActions)) { if ((generator->oltOptInfo()->multipleRowsReturned()) && (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD())) uniqueRowsetHbaseOper() = TRUE; else if ((NOT generator->oltOptInfo()->multipleRowsReturned()) && (listOfUpdUniqueRows_.entries() == 0)) { if ((CmpCommon::getDefault(HBASE_CHECK_AND_UPDEL_OPT) == DF_ON) && (NOT isAlignedFormat)) canDoCheckAndUpdel() = TRUE; } } } else if (producesOutputs()) { // Cannot do olt msg opt if: // -- values are to be returned and unique operation is not being used. // set an indication that multiple rows will be returned. generator->oltOptInfo()->setMultipleRowsReturned(TRUE); generator->oltOptInfo()->setOltCliOpt(FALSE); } generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); // if seq gen metadata is being updated through an internal query // and we are running under a user Xn, // do not mark this stmt as a transactional stmt. // This is done so those updates can run in its own transaction and not be // part of the enclosing user Xn. // When we have support for local transactions and repeatable read, we // will then run this update in local transactional mode. if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) noDTMxn() = TRUE; // if unique oper with no index maintanence and autocommit is on, then // do not require a transaction. Hbase guarantees single row consistency. Int64 transId = -1; if (((uniqueHbaseOper()) && (NOT isMerge()) && (NOT cursorHbaseOper()) && (NOT uniqueRowsetHbaseOper()) && (NOT inlinedActions) && (generator->getTransMode()->getAutoCommit() == TransMode::ON_) && (! NAExecTrans(0, transId)) && (NOT generator->oltOptInfo()->multipleRowsReturned())) || (noDTMxn())) { // no transaction needed } else { generator->setTransactionFlag(TRUE); if ((NOT uniqueHbaseOper()) || (isMerge()) || (cursorHbaseOper()) || (uniqueRowsetHbaseOper()) || (inlinedActions) || (generator->oltOptInfo()->multipleRowsReturned())) generator->setUpdAbortOnError(TRUE); } // flag for hbase tables generator->setHdfsAccess(TRUE); if (getTableDesc()->getNATable()->hasLobColumn()) { for (CollIndex i = 0; i < newRecExprArray().entries(); i++) { NAColumn * col = newRecExprArray()[i].getItemExpr()->child(0)->castToItemExpr()-> getValueId().getNAColumn(TRUE); ItemExpr * val = newRecExprArray()[i].getItemExpr()->child(1)->castToItemExpr(); if ((col->getType()->isLob()) && (val->getOperatorType() == ITM_LOBUPDATE)) { LOBupdate * lu = (LOBupdate*)val; lu->updatedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); lu->updatedTableSchemaName() = "\""; lu->updatedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); lu->updatedTableSchemaName().append("\".\""); lu->updatedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); lu->updatedTableSchemaName() += "\""; lu->lobNum() = col->lobNum(); lu->lobStorageType() = col->lobStorageType(); lu->lobStorageLocation() = col->lobStorageLocation(); } } // for } // if markAsPreCodeGenned(); return this; } RelExpr * HiveInsert::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; generator->setHiveAccess(TRUE); return GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs); } RelExpr * HbaseInsert::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // char. outputs are set to empty after in RelExpr::genPreCode sometimes, // after a call to resolveCharOutputs. We need to remember if a returnRow // tdb flag should be set, even if no output columns are required if (getIsTrafLoadPrep() && !getGroupAttr()->getCharacteristicOutputs().isEmpty()) setReturnRow(TRUE); if (! GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; NABoolean inlinedActions = FALSE; if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) inlinedActions = TRUE; if (((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) && (producesOutputs())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot return values from an hbase insert, update or delete."); GenExit(); } if ((isUpsert()) && ((getInsertType() == Insert::VSBB_INSERT_USER) || (getInsertType() == Insert::UPSERT_LOAD))) { if ((inlinedActions || producesOutputs())&& !getIsTrafLoadPrep()) setInsertType(Insert::SIMPLE_INSERT); } // if there are blob columns, use simple inserts. if ( getTableDesc()->getNATable()->hasLobColumn()) { setInsertType(Insert::SIMPLE_INSERT); NAColumnArray colArray; NAColumn *col; for (CollIndex ii = 0; ii < newRecExprArray().entries(); ii++) { ItemExpr *assignExpr = newRecExprArray()[ii].getItemExpr(); ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId(); ValueId srcValueId = assignExpr->child(1)->castToItemExpr()->getValueId(); col = tgtValueId.getNAColumn( TRUE ); ItemExpr * child1Expr = assignExpr->child(1); if (srcValueId.getType().isLob()) { LOBinsert * li = NULL; if ((child1Expr->getOperatorType() != ITM_LOBINSERT) && (child1Expr->getOperatorType() != ITM_LOBUPDATE)) { li = new(generator->wHeap()) LOBinsert(child1Expr, NULL, LOBoper::LOB_); li->insertedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); li->insertedTableSchemaName() = "\""; li->insertedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); li->insertedTableSchemaName().append("\".\""); li->insertedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); li->insertedTableSchemaName() += "\""; // li->lobNum() = col->getPosition(); li->lobSize() = srcValueId.getType().getPrecision(); li->lobFsType() = tgtValueId.getType().getFSDatatype(); li->lobNum() = col->lobNum(); li->lobStorageType() = col->lobStorageType(); li->lobStorageLocation() = col->lobStorageLocation(); li->bindNode(generator->getBindWA()); child1Expr = li; assignExpr->child(1) = child1Expr; } else if (child1Expr->getOperatorType() == ITM_LOBINSERT) { li = (LOBinsert*)child1Expr; li->insertedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); li->insertedTableSchemaName() = "\""; li->insertedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); li->insertedTableSchemaName().append("\".\""); li->insertedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); li->insertedTableSchemaName() += "\""; li->lobNum() = col->lobNum(); li->lobStorageType() = col->lobStorageType(); li->lobStorageLocation() = col->lobStorageLocation(); li->lobSize() = srcValueId.getType().getPrecision(); if (li->lobFsType() != tgtValueId.getType().getFSDatatype()) { // create a new LOBinsert node since fsType has changed. ItemExpr * liChild = li->child(0); ItemExpr * liChild1 = li->child(1); li = new(generator->wHeap()) LOBinsert(liChild, liChild1, li->getObj()); li->insertedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); li->insertedTableSchemaName() = "\""; li->insertedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); li->insertedTableSchemaName().append("\".\""); li->insertedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); li->insertedTableSchemaName() += "\""; li->lobSize() = srcValueId.getType().getPrecision(); li->lobFsType() = tgtValueId.getType().getFSDatatype(); li->lobNum() = col->lobNum(); li->lobStorageType() = col->lobStorageType(); li->lobStorageLocation() = col->lobStorageLocation(); li->bindNode(generator->getBindWA()); assignExpr->child(1) = li; } } // lobinsert GenAssert(li, "must have a LobInsert node"); LOBload * ll = new(generator->wHeap()) LOBload(li->child(0), li->getObj()); ll->insertedTableObjectUID() = li->insertedTableObjectUID(); ll->insertedTableSchemaName() = li->insertedTableSchemaName(); ll->lobNum() = col->lobNum(); ll->lobStorageType() = col->lobStorageType(); ll->lobStorageLocation() = col->lobStorageLocation(); ll->bindNode(generator->getBindWA()); lobLoadExpr_.insert(ll->getValueId()); } // lob } } if ((getInsertType() == Insert::SIMPLE_INSERT) && (NOT getTableDesc()->getNATable()->hasLobColumn())) uniqueHbaseOper() = TRUE; generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) noDTMxn() = TRUE; // if unique oper with no index maintanence and autocommit is on, then // do not require a trnsaction. Hbase guarantees single row consistency. Int64 transId = -1; if (((uniqueHbaseOper()) && (NOT uniqueRowsetHbaseOper()) && (NOT inlinedActions) && (generator->getTransMode()->getAutoCommit() == TransMode::ON_) && (! NAExecTrans(0, transId)) && (NOT generator->oltOptInfo()->multipleRowsReturned())) || (isNoRollback()) || ((isUpsert()) && (insertType_ == UPSERT_LOAD)) || (noDTMxn())) { // no transaction needed } else { generator->setTransactionFlag(TRUE); if ((NOT uniqueHbaseOper()) || (uniqueRowsetHbaseOper()) || (inlinedActions) || (generator->oltOptInfo()->multipleRowsReturned())) generator->setUpdAbortOnError(TRUE); } return this; } RelExpr * ExeUtilFastDelete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; return ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs); } RelExpr * ExeUtilLobExtract::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init(); getGroupAttr()->getCharacteristicInputs().next(exprId); getGroupAttr()->getCharacteristicInputs().advance(exprId) ) { if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE) availableValues += exprId; } getGroupAttr()->setCharacteristicInputs(availableValues); getInputValuesFromParentAndChildren(availableValues); if (handle_) handle_->replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; } RelExpr * HashGroupBy::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; ValueIdSet vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } return GroupByAgg::preCodeGen(generator, externalInputs, pulledNewInputs); } RelExpr * GroupByAgg::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); generator->clearPrefixSortKey(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my child. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! child(0).getPtr()) return NULL; if ((getOperatorType() == REL_SHORTCUT_GROUPBY) && (getFirstNRows() == 1)) { RelExpr * firstnNode = new(generator->wHeap()) FirstN(child(0), getFirstNRows()); firstnNode->setEstRowsUsed(getEstRowsUsed()); firstnNode->setMaxCardEst(getMaxCardEst()); firstnNode->setInputCardinality(child(0)->getInputCardinality()); firstnNode->setPhysicalProperty(child(0)->getPhysicalProperty()); firstnNode->setGroupAttr(child(0)->getGroupAttr()); //10-060516-6532 -Begin //When FIRSTN node is created after optimization phase, the cost //of that node does not matter.But, display_explain and explain //show zero operator costs and rollup cost which confuses the user. //Also, the VQP crashes when cost tab for FIRSTN node is selected. //So, creating a cost object will fix this. //The operator cost is zero and rollup cost is same as it childs. Cost* firstnNodecost = new HEAP Cost(); firstnNode->setOperatorCost(firstnNodecost); Cost* rollupcost = (Cost *)(child(0)->getRollUpCost()); *rollupcost += *firstnNodecost; firstnNode->setRollUpCost(rollupcost); //10-060516-6532 -End firstnNode = firstnNode->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! firstnNode) return NULL; setChild(0, firstnNode); } getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); NABoolean replicatePredicates = TRUE; // Rebuild the grouping expressions tree. Use bridge values, if possible groupExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // No key predicates need to be generated here NULL, replicatePredicates, &getGroupAttr()->getCharacteristicOutputs()); // Rebuild the aggregate expressions tree aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); if (CmpCommon::getDefault(COMP_BOOL_211) == DF_ON) { ValueIdSet constantsInGroupExpr ; groupExpr().getConstantExprs(constantsInGroupExpr,FALSE); if (constantsInGroupExpr.entries() > 0) { if (constantsInGroupExpr.entries() == groupExpr().entries()) { ValueId vid ; constantsInGroupExpr.getFirst(vid); constantsInGroupExpr.remove(vid); } groupExpr() -= constantsInGroupExpr ; } } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // No key predicates need to be generated here NULL, replicatePredicates); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); // if the grouping is executed in DP2, we don't do overflow // handling. This also means, that it is a partial group by // Do not do overflow handling for any partial groupby. // NABoolean isPartialGroupBy = (isAPartialGroupByNonLeaf() || isAPartialGroupByLeaf()); // The old way, only groupbys in DP2 are considered partial // if (CmpCommon::getDefault(COMP_BOOL_152) == DF_ON) { isPartialGroupBy = executeInDP2(); } if ((getOperatorType() == REL_HASHED_GROUPBY) && !isPartialGroupBy) { // Count this BMO and add its needed memory to the total needed generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0) generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE)); } markAsPreCodeGenned(); // Done. return this; } // GroupByAgg::preCodeGen() RelExpr * MergeUnion::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // A temporary union (blocked union introduced for inlining after trigger) // should not get here. Should be removed in optimization phase. GenAssert(!getIsTemporary(), "Expecting this blocked union to be removed by this phase"); // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // clear any prefix sort key in generator work area generator->clearPrefixSortKey(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // Predicate pushdown causes the Characteristic Inputs and Outputs // of the union to be set precisely to those values that are // required by one of its descendants or by one of its ancestors, // respectively. However, the colMapTable_ contains all the values // that the MergeUnion is capable of producing. The colMapTable_ // is rebuilt here to contain exactly those values that appear in // the Characteristic Outputs. // // The output of the union is defined by the ValueIdUnion // expressions that are maintained in the colMapTable_. // ValueIdSet charOutputs = getGroupAttr()->getCharacteristicOutputs(); colMapTable().clear(); for (ValueId v = charOutputs.init(); charOutputs.next(v); charOutputs.advance(v)) { if (v.getItemExpr()->getOperatorType() != ITM_VALUEIDUNION) { // "other" available values besides the value being considered. ValueIdSet availableValues = charOutputs; availableValues -= v; // ------------------------------------------------------------------- // see whether the value being considered is covered by the remaining // values. that is, whether it is an expression in termes of the // other vid union's. // ------------------------------------------------------------------- ValueIdSet outputId; outputId.insert(v); outputId.removeUnCoveredExprs(availableValues); // ------------------------------------------------------------------- // v removed from outputId. that means it's not covered by remaining // vid union's. add the vid union's v is in terms of to colMapTable. // the node needs to produce it. Instead of producing the expression, // change the node to produce just the vid union, the expression can // be evaluatated at the parent. // ------------------------------------------------------------------- if (outputId.isEmpty()) { #pragma nowarn(1506) // warning elimination Int32 leftIndex = getLeftMap().getTopValues().index(v); #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination Int32 rightIndex = getRightMap().getTopValues().index(v); #pragma warn(1506) // warning elimination CMPASSERT((leftIndex != NULL_COLL_INDEX) && (rightIndex != NULL_COLL_INDEX)); ItemExpr *ptr = new(CmpCommon::statementHeap()) ValueIdUnion(getLeftMap().getBottomValues()[leftIndex], getRightMap().getBottomValues()[rightIndex],v); v.replaceItemExpr(ptr); colMapTable().insert(v); } } else colMapTable().insert(v); } // My Characteristic Inputs become the external inputs for my children. Lng32 nc = (Lng32)getArity(); const ValueIdSet & inputs = getGroupAttr()->getCharacteristicInputs(); for (Lng32 index = 0; index < nc; index++) { ValueIdSet pulledInputs; child(index) = child(index)->preCodeGen(generator,inputs,pulledInputs); if (child(index).getPtr() == NULL) return NULL; pulledNewInputs += pulledInputs; } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // Rebuild the colMapTable colMapTable().replaceVEGExpressions(availableValues,inputs); // Rebuild the sortOrder. sortOrder_.replaceVEGExpressions(availableValues,inputs); // Rebuild the merge expression if (mergeExpr_) { mergeExpr_ = mergeExpr_->replaceVEGExpressions(availableValues,inputs); //10-061219-1283:Set the second arugment to TRUE to redrive typesynthesis of children. mergeExpr_->synthTypeAndValueId(TRUE,TRUE); } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); // Rebuild the selection predicate tree. selectionPred().replaceVEGExpressions(availableValues,inputs); getGroupAttr()->resolveCharacteristicOutputs(availableValues,inputs); // Rebuild the conditional expression. condExpr().replaceVEGExpressions(availableValues, getGroupAttr()->getCharacteristicInputs()); if (!getUnionForIF() && !getInliningInfo().isIMUnion()) generator->oltOptInfo()->setMultipleRowsReturned(TRUE); markAsPreCodeGenned(); return this; } // MergeUnion::preCodeGen() RelExpr * MapValueIds::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { const ValueIdList &upperValues = map_.getTopValues(); const ValueIdList &lowerValues = map_.getBottomValues(); if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. child(0) = child(0)->preCodeGen( generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (child(0).getPtr() == NULL) return NULL; getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // --------------------------------------------------------------------- // The MapValueIds node describes a mapping between expressions used // by its child tree and expressions used by its parent tree. The // generator will make sure that the output values of the child tree // and the input values from the parent get passed in the correct // buffers. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // Replacing VEGReferences in those mapped expressions is not possible // in all cases; we have to restrict the kind of mappings that can // be done for expressions involving VEGs. This method assumes that // references to VEGs do not get altered during the rewrite, in other // words it assumes mappings of the kind // // a) sum(VEGRef(a,b,c)) <----> VEGRef(a,b,c) // // and it disallows mappings of the kind // // b) count(VEGRef(a,b,c)) <-----> 1 // c) VEGRef(a,b,c) <-----> VEGRef(d,e,f) // // Mappings of type b) will still work, as long as the VEGRef is contained // in some other mapped expression. A possible extension is to store // in the MapValueIds node which element(s) of which VEGRef should // be replaced in this step, but this information is hard to get // during optimization, unless we are looking at a scan node. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // The map contains many mappings, not all of which will have to // be evaluated by the generator. Only those values that are either // characteristic output values or are referenced by characteristic // output values will actually be mapped at execution time. Therefore // we first determine the actually needed mappings with the coverTest // method. // --------------------------------------------------------------------- GroupAttributes emptyGA; ValueIdSet coveredExpr; ValueIdSet referencedUpperValues; ValueIdMap newMap; emptyGA.setCharacteristicInputs(getGroupAttr()->getCharacteristicInputs()); emptyGA.coverTest( getGroupAttr()->getCharacteristicOutputs(), // the expressions needed upperValues, // offer the upper values as extra inputs coveredExpr, // doesn't matter which outputs are covered referencedUpperValues); // get those upper values needed by the outputs // Compute the values that are available here. ValueIdSet lowerAvailableValues; getOutputValuesOfMyChildren(lowerAvailableValues); lowerAvailableValues += getGroupAttr()->getCharacteristicInputs(); // The VEGReferences that are resolved can appear as leaves of the // expressions contained in lowerAvailableValues. These values are // required for remapping the upperValues. ValueIdSet leafValues; ValueId x; for (x = lowerAvailableValues.init(); lowerAvailableValues.next(x); lowerAvailableValues.advance(x)) x.getItemExpr()->getLeafValueIds(leafValues); lowerAvailableValues += leafValues; // upperAvailableValues is needed for mvqr. The addition of the lower // available values is only necessary to avoid an assertion failure in // VEGReference::replaceVEGReference(). ValueIdSet upperAvailableValues(valuesForVEGRewrite()); upperAvailableValues += lowerAvailableValues; // --------------------------------------------------------------------- // now walk through each needed mapping and replace wildcards in both its // upper and lower expressions // --------------------------------------------------------------------- for (CollIndex i = 0; i < upperValues.entries(); i++) { if (referencedUpperValues.contains(upperValues[i])) { ItemExpr *newUpper; ItemExpr *newLower; // This mapping is actually required, expand wild cards for it // We used to resolve the upper values using the // upperAvailableValues. Note that these available values // might not actually be available to this node. This could // sometimes cause problems if the VEGRef was resolved to the // 'wrong' value and the value is in a VEGPRed above. This // would cause VEGPRed to be resolved incorrectly and // possibly drop some join predicates. // Don't need to replace the VEGgies in the upper since they // will never be codeGen'ed. Just need to replace them with // a suitable substitute. // If it is a VEG_REF, then replace it with a surrogate // (NATypeToItem) otherwise leave it as is. (Don't use the // surrogate for all upper values because there are some // MVIds that have BaseColumns in the upper values. These // MVIds are introduced by Triggers. And these BaseColumns // are used in other operators in other parts of the tree // where they are expected to always be BaseColumns. So // mapping them here will cause problems elsewhere). In any // case, all we need to do here is to get rid of the // VEGRefs. // newLower = lowerValues[i] .getItemExpr() ->replaceVEGExpressions (lowerAvailableValues, getGroupAttr()->getCharacteristicInputs()); newUpper = upperValues[i].getItemExpr(); if (upperValues[i] != lowerValues[i]) { if (newUpper->getOperatorType() == ITM_VEG_REFERENCE) { if (usedByMvqr()) // If this node is used to map the outputs of an MV added by // MVQR, upperAvailableValues has been constructed to // contain the base column a vegref should map to, so we use // that instead of a created surrogate. newUpper = newUpper->replaceVEGExpressions (upperAvailableValues, getGroupAttr()->getCharacteristicInputs()); else { NAType *mapType = upperValues[i].getType().newCopy(generator->wHeap()); // Create replacement for VEGRef // ItemExpr *mapping = new(generator->wHeap()) NATypeToItem(mapType); ValueId id = upperValues[i]; // Replace in ValueDescArray. All instances of this ID // will now map to the surrogate. // id.replaceItemExpr(mapping); newUpper = upperValues[i].getItemExpr(); } } } else { // since they are the same, make upper equal to lower.. newUpper = newLower; } // add the mapping that may have been rewritten to the new map newMap.addMapEntry(newUpper->getValueId(),newLower->getValueId()); } } // now replace the map with the recomputed mappings map_ = newMap; // The selectionPred() on a MapValueId should have been pushed down // by the optimizer. GenAssert(selectionPred().isEmpty(),"NOT selectionPred().isEmpty()"); // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // Be thrifty. Reuse coveredExpr for gathering the input and output values. getInputAndPotentialOutputValues(coveredExpr); // Add the value that is being fabricated by the MapValueIds to the values // that are produced by its child and flow throught the MapValueIds. lowerAvailableValues += coveredExpr; getGroupAttr()->resolveCharacteristicOutputs (lowerAvailableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); return this; } // MapValueIds::preCodeGen() RelExpr * Sort::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; //else // cerr << "Possible error..." // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // if doing Partial Sorting, store partial sort key in generator work area // if the split-top node is providing this underneath, protect the order // else clear the partial sort key ValueIdList prefixSortKey = getPrefixSortKey(); generator->clearPrefixSortKey(); if (!prefixSortKey.isEmpty()) generator->setPrefixSortKey(prefixSortKey); PhysicalProperty* unPreCodeGendPP = NULL; // Protect against scan of self-referencing table partitions // completing asynchronously, thus allowing the various instances // of SORT to start returning rows before all scans are complete. // Let the PartitionAccess::preCodeGen and Exchange::preCodeGen // work together to detect this. Part of the fix for solution // 10-071204-9253. bool doCheckUnsycHalloweenScans = false; // solution 10-100310-8659 bool fixSolution8659 = false; int numUnblockedHalloweenScansBefore = generator->getUnblockedHalloweenScans(); bool needToRestoreLSH = false; bool saveLSH = generator->getPrecodeHalloweenLHSofTSJ(); // This is the pre-R2.5.1 test that triggers the check unblocked access. // Note that it indirectly depends on COMP_BOOL_166 OFF. if (checkAccessToSelfRefTable_) doCheckUnsycHalloweenScans = true; // This is the R2.5.1 way -- see solution 10-100310-8659. if ((generator->getPrecodeHalloweenLHSofTSJ()) && (!generator->getPrecodeRHSofNJ())) { if (generator->getHalloweenSortForced()) markAsHalloweenProtection(); if (generator->preCodeGenParallelOperator() && !generator->getHalloweenESPonLHS()) { doCheckUnsycHalloweenScans = true; fixSolution8659 = true; } else { // This serial sort is enough to block the // scan of the target table. No need for further // checking. Notice this serial vs. parallel sort test // was made in NestedJoin::preCodeGen before the fix // for 10-100310-8659. doCheckUnsycHalloweenScans = false; // More for 10-100310-8659 - don't call incUnblockedHalloweenScans // below this node. generator->setPrecodeHalloweenLHSofTSJ(false); needToRestoreLSH = true; GenAssert(generator->unsyncdSortFound() == FALSE, "Unknown operator set unsyncdSortFound."); } } if (doCheckUnsycHalloweenScans) { generator->setCheckUnsyncdSort(TRUE); // Preserve a copy of the child's physical properties // as it is before preCodeGen is called for the child. // Also, in this copy of the physical properties, use // a copy of the child's partitioning function. This // will be used in case we need to insert an ESP for // halloween protection. unPreCodeGendPP = new (CmpCommon::statementHeap()) PhysicalProperty(*child(0)->getPhysicalProperty(), child(0)->getPhysicalProperty()->getSortKey(), child(0)->getPhysicalProperty()->getSortOrderType(), child(0)->getPhysicalProperty()->getDp2SortOrderPartFunc(), child(0)->getPhysicalProperty()-> getPartitioningFunction()->copy() ); } if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; // get the char outputs and not the child's ValueIdSet vidSet = getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } // My Characteristic Inputs become the external inputs for my child. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); generator->clearPrefixSortKey(); if (! child(0).getPtr()) return NULL; if (needToRestoreLSH) generator->setPrecodeHalloweenLHSofTSJ(saveLSH); getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // ---------------------------------------------------------------------- // Replace VEGReferences in the order by list // Bugfix: sol# 10-020909-1555/56: the last argument, if not explicitly // stated, defaults to FALSE, and causes a shallow copy of the tree. // ---------------------------------------------------------------------- sortKey_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // default NULL, // default TRUE); // bugfix // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); //Consider Sort as part of BMO memory participant if not partial sort. if (prefixSortKey.entries() == 0 || CmpCommon::getDefault(COMP_BOOL_84) == DF_ON) { if (CmpCommon::getDefault(SORT_MEMORY_QUOTA_SYSTEM) != DF_OFF) { generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0) generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE)); } } markAsPreCodeGenned(); // Part of the fix for solution 10-071204-9253. // Modified for 10-100310-8659 if (doCheckUnsycHalloweenScans && generator->unsyncdSortFound()) { RelExpr *newChild = generator->insertEspExchange(child(0), unPreCodeGendPP); ((Exchange *)newChild)->markAsHalloweenProtection(); newChild = newChild->preCodeGen(generator, externalInputs, pulledNewInputs); GenAssert(newChild->getOperatorType() == REL_EXCHANGE, "Exchange eliminated despite our best efforts."); child(0) = newChild; // Now that an ESP is inserted above the scans, this sort operator // does block the scans, so we can discount them. if (fixSolution8659) { generator->setUnsyncdSortFound(FALSE); generator->setUnblockedHalloweenScans( numUnblockedHalloweenScansBefore); } } return this; } // Sort::preCodeGen() RelExpr * SortFromTop::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { return Sort::preCodeGen(generator, externalInputs, pulledNewInputs); } RelExpr *ProbeCache::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my child. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! child(0).getPtr()) return NULL; // add one more value to "valuesGivenToChild_": a statement execution // count that will invalidate cache each time the statement is // re-executed. It would be incorrect to cache across // statement executions (and possibly transactions). ValueId execCount = generator->getOrAddStatementExecutionCount(); pulledNewInputs += execCount; getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // Rewrite the selection predicates. NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need to generate key predicates here 0 /* no need for idempotence here */, replicatePredicates ); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); /* TBD - maybe ProbeCache as BMO memory participant?? if(CmpCommon::getDefault(PROBE_CACHE_MEMORY_QUOTA_SYSTEM) != DF_OFF) generator->incrNumBMOs(); */ if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0) generator->incrNBMOsMemoryPerCPU(getEstimatedRunTimeMemoryUsage(TRUE)); markAsPreCodeGenned(); return this; } // ProbeCache::preCodeGen() RelExpr * Exchange::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Set a flag if this is a parallel extract consumer. The flag for // extract producer queries gets set earlier in RelRoot::codeGen() if (child(0)->getOperatorType() == REL_EXTRACT_SOURCE) { isExtractConsumer_ = TRUE; GenAssert(!isExtractProducer_, "One extact query cannot be both producer and consumer"); } const PhysicalProperty* sppOfChild = child(0)->getPhysicalProperty(); NABoolean PivsReplaced = FALSE; if (sppOfChild->getPlanExecutionLocation() == EXECUTE_IN_DP2) { // If this is not an ESP exchange, then check if the pivs of this op // and it's child are the same. If they are not, make them the same. // We don't do this for an ESP exchange because an ESP exchange // denotes an ESP process boundary and the child's pivs // do not have to be the same as the parent and in fact should // not be the same. replacePivs(); PivsReplaced = TRUE; } RelExpr *result = this; // --------------------------------------------------------------------- // copy important info from the properties into data members // --------------------------------------------------------------------- storePhysPropertiesInNode(generator->getPrefixSortKey()); // If this is a parallel extract producer query: // - do a few checks to make sure the plan is valid // - store a copy of the root's select list if (isExtractProducer_) { RelRoot *root = generator->getBindWA()->getTopRoot(); // The plan is valid if this is an ESP exchange the number of // bottom partitions matches the number of requested streams. ComUInt32 numRequestedStreams = root->getNumExtractStreams(); ComUInt32 numBottomEsps = (ComUInt32) getBottomPartitioningFunction()->getCountOfPartitions(); if (!isEspExchange() || (numRequestedStreams != numBottomEsps)) { *CmpCommon::diags() << DgSqlCode(-7004); GenExit(); return NULL; } // Make a copy of the root's select list extractSelectList_ = new (generator->wHeap()) ValueIdList(root->compExpr()); // Do a coverage test to see find values in the select list that // this operator cannot already provide ValueIdSet valuesIDontHave(*extractSelectList_); ValueIdSet coveredExpr; ValueIdSet referencedUpperValues; getGroupAttr()->coverTest(valuesIDontHave, // expressions needed externalInputs, // extra inputs coveredExpr, // covered exprs referencedUpperValues); // new values needed // Add the needed values to characteristic inputs pulledNewInputs += referencedUpperValues; getGroupAttr()->addCharacteristicInputs(referencedUpperValues); } // --------------------------------------------------------------------- // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. // --------------------------------------------------------------------- ValueIdSet saveCharInputs = getGroupAttr()->getCharacteristicInputs(); getGroupAttr()->resolveCharacteristicInputs(externalInputs); // variables that store the result of the major decisions: // // makeThisExchangeAPapa: if this is a PAPA node, then make this // node the PAPA (and add a PA below it) // eliminateThisExchange: get rid of this node either because // it represents a sole PA or because it is // a redundant ESP exchange // topPartFunc_: the partitioning function produced by // this node after we're done with preCodeGen // bottomPartFunc_: the partitioning function produced by // the child of this node // paPartFunc: the partitioning function produced by the // PA node inserted below // lbpf LogPhysPartitioningFunction of the child, // if the child has such a part. function NABoolean makeThisExchangeAPapa = FALSE; NABoolean eliminateThisExchange = FALSE; const PartitioningFunction *paPartFunc = topPartFunc_; const LogPhysPartitioningFunction *lppf = NULL; if (isDP2Exchange() AND bottomPartFunc_->isALogPhysPartitioningFunction()) { lppf = bottomPartFunc_->castToLogPhysPartitioningFunction(); if (lppf->getUsePapa() || getGroupAttr()->isEmbeddedUpdateOrDelete()) { // Will a merge of sorted streams need to be done? if (NOT sortKeyForMyOutput_.isEmpty()) { Lng32 maxPartsPerGroup; // Since a merge of sorted streams is needed, we must // ensure that there is one PA for every partition in every // process. The optimizer should already have set this up // correctly, but sometimes, due to plan stealing, the value // can be wrong. This code is really a patch for the plan // stealing problem. We could try to fix the plan stealing // problem, but that would adversely affect compile time. // To set the number of clients (i.e. PAs) we must cast away // the const-ness, sorry. if (topPartFunc_->isAGroupingOf(*bottomPartFunc_, &maxPartsPerGroup)) { ((LogPhysPartitioningFunction*)lppf)->setNumOfClients( maxPartsPerGroup * topPartFunc_->getCountOfPartitions()); } else { ((LogPhysPartitioningFunction*)lppf)->setNumOfClients( bottomPartFunc_->getCountOfPartitions() * topPartFunc_->getCountOfPartitions()); } } // Keep this exchange and make it the PAPA node. The PA // nodes below the PAPA will actually produce a partitioning // scheme that is identical to that of the DP2 operator below, // since the PAPA splits its requests into smaller ones that // do not span DP2 partition boundaries. makeThisExchangeAPapa = TRUE; paPartFunc = bottomPartFunc_; } } if (!PivsReplaced && isRedundant_) replacePivs(); // flag to decide whether to use the characteristic inputs or outputs // as input the to the CIF determineInternalFormatFunction // if the the child is an insert or update then we consider the chars input // otherwise we use the chars outputs NABoolean useCharInputs = FALSE; // --------------------------------------------------------------------- // If the child of this Exchange executes in DP2, then allocate a // PartitionAccess operator. It should have the same Group Attributes // as its child. // --------------------------------------------------------------------- NABoolean savedOltMsgOpt = generator->oltOptInfo()->oltMsgOpt(); NABoolean inputOltMsgOpt = generator->oltOptInfo()->oltMsgOpt(); unsigned short prevNumBMOs = generator->replaceNumBMOs(0); CostScalar prevBMOsMemoryUsage = generator->replaceBMOsMemoryUsage(0); // These are used to fix solution 10-071204-9253 and for // solution 10-100310-8659. bool needToRestoreParallel = false; NABoolean savedParallelSetting = FALSE; bool needToRestoreCheckUnsync = false; NABoolean savedCheckUnsyncdSort = FALSE; bool needToRestoreLHS = false; bool halloweenLHSofTSJ = generator->getPrecodeHalloweenLHSofTSJ(); bool needToRestoreESP = false; bool halloweenESPonLHS = generator->getHalloweenESPonLHS(); if (isEspExchange() && getBottomPartitioningFunction()->isPartitioned()) { // Tell any child NJ that its Halloween blocking operator (SORT) // is operating in parallel. savedParallelSetting = generator->preCodeGenParallelOperator(); generator->setPreCodeGenParallelOperator(TRUE); needToRestoreParallel = true; } if (isEspExchange() && halloweenLHSofTSJ) { if ( !isRedundant_ ) { // Tell any parallel SORT below that it doesn't have to check // unsyncd access. needToRestoreESP = true; generator->setHalloweenESPonLHS(true); } savedCheckUnsyncdSort = generator->checkUnsyncdSort(); if (savedCheckUnsyncdSort == TRUE) { // savedCheckUnsyncdSort tells me there is a parallel SORT above this // exchange. This ESP guarantees that all instances of the SORT will // block until all instances of this ESP finish. So tell any child // PARTITION ACCESS that its scan of a self-referencing is sync'd. generator->setCheckUnsyncdSort(FALSE); needToRestoreCheckUnsync = true; // More for 10-100310-8659 - don't call incUnblockedHalloweenScans // below this node. halloweenLHSofTSJ = generator->setPrecodeHalloweenLHSofTSJ(false); needToRestoreLHS = true; } } else if (isEspExchange() && // this isPartitioned() condition is probably a bug, but // to be safe I am not fixing it now. getBottomPartitioningFunction()->isPartitioned()) { // Tell any child PARTITION ACCESS that its scan of a self-referencing // table is synchronized by an ESP exchange. That is, any blocking // SORT operator above this exchange will not get any rows until all // scans have finished. savedCheckUnsyncdSort = generator->checkUnsyncdSort(); generator->setCheckUnsyncdSort(FALSE); needToRestoreCheckUnsync = true; } if (halloweenSortIsMyChild_ && isRedundant_) { // Before eliminating itself, and before preCodeGen'ing the child // tree, this Exchange will tell its child (a Sort) that it needs to // check for unsynchronized access to the target table of a // self-referencing update. This is part of the fix for // solution 10-090310-9876. ((Sort *)(child(0).getPtr()))->doCheckAccessToSelfRefTable(); // Note for solution 10-100310-8659 -- the halloweenSortIsMyChild_ // flag will only be set when the COMP_BOOL_166 is used to revert // to pre-bugfix behavior. With the fix for 10-100310-8659, the // Sort uses the Generator's flags (precodeHalloweenLHSofTSJ and // precodeRHSofNJ) to know if it needs check access to the target // table. In other words, unless COMP_BOOL_166 is used, this // is dead code. } if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; ValueIdSet vidSet; if (!useCharInputs) { vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs(); } else { vidSet = saveCharInputs; } ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } // For HashJoin MIN/MAX optimization. If this is an ESP Exchange, // block all candidate values for min/max optimization from going // below this Exchange. Restore them upon return from // preCodeGen'ing the child. ValueIdList minMaxKeys, minVals, maxVals, willUseMinMaxKeys; if(isEspExchange()) { // Save the current values. minMaxKeys = generator->getMinMaxKeys(); minVals = generator->getMinVals(); maxVals = generator->getMaxVals(); willUseMinMaxKeys = generator->getWillUseMinMaxKeys(); // Clear the current values. generator->getMinMaxKeys().clear(); generator->getMinVals().clear(); generator->getMaxVals().clear(); generator->getWillUseMinMaxKeys().clear(); } // --------------------------------------------------------------------- // Perform preCodeGen on the child (including PA node if we created it) // --------------------------------------------------------------------- child(0) = child(0)->preCodeGen( generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); // For HashJoin MIN/MAX optimization. if(isEspExchange()) { // Restore the saved values. generator->getMinMaxKeys() = minMaxKeys; generator->getMinVals() = minVals; generator->getMaxVals() = maxVals; generator->getWillUseMinMaxKeys() = willUseMinMaxKeys; } if (needToRestoreParallel) generator->setPreCodeGenParallelOperator(savedParallelSetting); if (needToRestoreCheckUnsync) generator->setCheckUnsyncdSort(savedCheckUnsyncdSort); if (needToRestoreLHS) generator->setPrecodeHalloweenLHSofTSJ(halloweenLHSofTSJ); if (needToRestoreESP) generator->setHalloweenESPonLHS(halloweenESPonLHS); setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) ); setBMOsMemoryUsage( generator->replaceBMOsMemoryUsage(prevBMOsMemoryUsage) ); if (! child(0).getPtr()) return NULL; generator->oltOptInfo()->setOltMsgOpt(savedOltMsgOpt); // Decide whether this Exchange should try to eliminate itself. if (child(0)->castToRelExpr()->getOperatorType() == REL_EXE_UTIL) { // No, the REL_EXE_UTIL must execute in an ESP. } else if (skipRedundancyCheck_) { // No, the ESP was inserted just to force blocking of // data from SORT instances, to help prevent Halloween // problem -- see Soln 10-071204-9253. } else { // Yes, perform the redundancy check. eliminateThisExchange = (isRedundant_ OR (isDP2Exchange() AND NOT makeThisExchangeAPapa)); } // --------------------------------------------------------------------- // Determine which partition input values need to be supplied by our // parent and which are produced by this exchange node. PA or PAPA // exchange nodes (DP2 exchange nodes) do not produce any partition // input values themselves, just ask the parent to produce the PIVs // needed by the child. ESP exchanges produce the PIVs for their bottom // partition function, and this is also true for added repartitioning // exchanges. // --------------------------------------------------------------------- if (isEspExchange()) { pulledNewInputs -= bottomPartFunc_->getPartitionInputValues(); setBottomPartitionInputValues( bottomPartFunc_->getPartitionInputValuesLayout()); } getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // --------------------------------------------------------------------- // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // --------------------------------------------------------------------- ValueIdSet availableValues; getInputAndPotentialOutputValues(availableValues); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. // --------------------------------------------------------------------- // Rewrite the copy of the sort key which will be used for merging // rows. The VEGRef on the column being sorted may be preceeded by // an InverseOrder itemExpr (in case the shortcut_grby rule has fired) // The InverseOrder itemExpr will not perform a copy of the sortKey // before replacing VEGExpressions unless replicateExpression is set // to TRUE below. This avoids inverse(VEGRef_60(T1.a = T2.a)) being // resolved to T1.a in two different exchange nodes, even though T1.a // is not available at the second exchange node. // --------------------------------------------------------------------- NABoolean replicateExpression = TRUE; sortKeyForMyOutput_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicateExpression ); // --------------------------------------------------------------------- // Rewrite the partitioning expression, if the repartitioning function // contains one. A ReplicationPartitioningFunction does not contain // a partititioning expression because it uses a broadcast for // replicating rows to its consumers. // --------------------------------------------------------------------- if (isEspExchange()) { PartitioningFunction * rpf; // need to cast away const-ness to create partitioning expr, sorry rpf = (PartitioningFunction *) topPartFunc_; rpf->createPartitioningExpression(); rpf->preCodeGen(availableValues); } // --------------------------------------------------------------------- // For a parallel extract producer query, rewrite our copy of the // root's select list // --------------------------------------------------------------------- if (isExtractProducer_) { extractSelectList_-> replaceVEGExpressions(availableValues, getGroupAttr()->getCharacteristicInputs()); } // --------------------------------------------------------------------- // Resolve characteristic outputs. // --------------------------------------------------------------------- getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); // --------------------------------------------------------------------- // From here on we add or remove exchange nodes, but this node is // ready and does not need to be processed again should we call // preCodeGen for it again. // --------------------------------------------------------------------- markAsPreCodeGenned(); // --------------------------------------------------------------------- // Eliminate this exchange if it simply represented the PA node or // if it is redundant. Do not eliminate the exchange if it is a // parallel extract producer or consumer. // --------------------------------------------------------------------- if (isExtractProducer_ || isExtractConsumer_) eliminateThisExchange = FALSE; if (eliminateThisExchange) { result = child(0).getPtr(); // transfer the # of BMOs and their memory usages to generator as // this exchange node is to be discarded. generator->incrBMOsMemoryPerFrag(getBMOsMemoryUsage()); generator->incrNumBMOsPerFrag(getNumBMOs()); } if ((isEspExchange()) && (NOT eliminateThisExchange)) { // generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); generator->compilerStatsInfo().exchangeOps()++; generator->compilerStatsInfo().dop() = (UInt16)MAXOF(generator->compilerStatsInfo().dop(), getBottomPartitioningFunction()->getCountOfPartitions()); if ( getNumBMOs() > 0 ) generator->incTotalESPs(); // If the exchange uses SeaMonster, set a flag in the generator // to indicate that some part of the query does use SeaMonster if (thisExchangeCanUseSM(generator->getBindWA())) generator->setQueryUsesSM(TRUE); } // isEspExchange() && !eliminateThisExchange if ((ActiveSchemaDB()->getDefaults()).getAsDouble(EXE_MEMORY_LIMIT_PER_CPU) > 0) generator->incrNBMOsMemoryPerCPU(getEstimatedRunTimeMemoryUsage(TRUE)); return result; } // Exchange::preCodeGen() RelExpr * Tuple::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues = getGroupAttr()->getCharacteristicInputs(); tupleExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); return this; } ItemExpr * BuiltinFunction::preCodeGen(Generator * generator) { ItemExpr * retExpr = NULL; if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (CmpCommon::getDefault(OR_PRED_KEEP_CAST_VC_UCS2) == DF_ON) { // part of temporary workaround to yotta dp2 killer problem: // keep cast for upper(cast name as varchar(n) char set ucs2) switch (getOperatorType()) { case ITM_UPPER: case ITM_LOWER: case ITM_SUBSTR: case ITM_TRIM: if (child(0)->getOperatorType() == ITM_CAST) { Cast *arg = (Cast*)child(0)->castToItemExpr(); const NAType& typ = arg->getValueId().getType(); if (arg->matchChildType() && arg->child(0)->getValueId().getType() == typ && typ.getTypeQualifier() == NA_CHARACTER_TYPE && typ.isVaryingLen() && ((CharType*)(&typ))->getCharSet() == CharInfo::UCS2) { // don't skip codegen for the cast of // "upper(cast name as varchar(n) char set ucs2) IN <inlist>" arg->setMatchChildType(FALSE); } } } } if (! ItemExpr::preCodeGen(generator)) return NULL; switch (getOperatorType()) { case ITM_QUERYID_EXTRACT: { // convert arguments to ISO88591 character set if (child(0)->castToItemExpr()->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &typ0 = (const CharType &) (child(0)->castToItemExpr()->getValueId().getType()); if (typ0.getCharSet() != CharInfo::ISO88591) { // the executor method assumes an ASCII string for the query id, so // convert the value to a fixed char type in the ISO88591 char set SQLChar * newTyp0 = new(generator->wHeap()) SQLChar(typ0.getCharLimitInUCS2or4chars(), typ0.supportsSQLnullLogical(), typ0.isUpshifted(), typ0.isCaseinsensitive(), typ0.isVaryingLen(), CharInfo::ISO88591); child(0) = new (generator->wHeap()) Cast(child(0), newTyp0); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); } } if (child(1)->castToItemExpr()->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &typ1 = (const CharType &) (child(1)->castToItemExpr()->getValueId().getType()); if (typ1.getCharSet() != CharInfo::ISO88591) { // the executor method assumes an ASCII string for the query id, so // convert the value to a fixed char type in the ISO88591 char set SQLChar * newTyp1 = new(generator->wHeap()) SQLChar(typ1.getCharLimitInUCS2or4chars(), typ1.supportsSQLnullLogical(), typ1.isUpshifted(), typ1.isCaseinsensitive(), typ1.isVaryingLen(), CharInfo::ISO88591); child(1) = new (generator->wHeap()) Cast(child(1), newTyp1); child(1)->bindNode(generator->getBindWA()); child(1) = child(1)->preCodeGen(generator); } } } retExpr = this; break; default: { retExpr = this; } break; } // switch setReplacementExpr(retExpr); markAsPreCodeGenned(); return retExpr; } /* ItemExpr * Abs::preCodeGen(Generator * generator) { // The ABS function has the distinction of being the sole BuiltinFunction // that a) generates a new replacementExpr tree // and b) can appear in the select-list (compExpr). // // What happens is that code is generated for the ABS replacement CASE // TWICE, once in PartitionAccess eid, once in RelRoot generateOutputExpr: // the latter fails with a GenMapTable assert failing to find info for // the column in "SELECT ABS(col) FROM t;" // ("SELECT ABS(-1) FROM t;" and "SELECT ABS(col),col FROM T;" work fine -- // but of course they generate twice as much code as necessary, // however harmless/idempotent it may be...) // // We therefore cannot handle this one discrepant case neatly in // preCodeGen/codeGen -- it is fixed instead by having the Binder // upstream rewrite an ABS as the equivalent CASE. // // Genesis 10-980112-5942. // GenAssert(FALSE, "Abs::preCodeGen should be unreachable code!"); return NULL; //if (nodeIsPreCodeGenned()) // return getReplacementExpr(); // //ItemExpr * newExpr = // generator->getExpGenerator()->createExprTree( // "CASE WHEN @A1 < 0 THEN - @A1 ELSE @A1 END", 0, 1, child(0)); // //newExpr->bindNode(generator->getBindWA()); //setReplacementExpr(newExpr->preCodeGen(generator)); //markAsPreCodeGenned(); //return getReplacementExpr(); } */ ItemExpr * Abs::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; NAType * result_type = (NAType *)(&(getValueId().getType())); NAType * type_op1 = (NAType *)(&(child(0)->castToItemExpr()->getValueId().getType())); if (! (*result_type == *type_op1)) { // Insert a cast node to convert child to a result type. child(0) = new (generator->wHeap()) Cast(child(0), result_type); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // Abs::preCodeGen() ItemExpr * AggrMinMax::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; // if my child's attributes EXCEPT for nullability are not the // same as mine, do a conversion. NABoolean doConversion = FALSE; const NAType &myType = getValueId().getType(); const NAType &childType = child(0)->castToItemExpr()->getValueId().getType(); if (NOT (myType == childType)) // something is different { if ((myType.supportsSQLnull() && childType.supportsSQLnull()) || ((NOT myType.supportsSQLnull()) && (NOT childType.supportsSQLnull()))) doConversion = TRUE; // both nullable or not nullable, // something else is different else if (myType.supportsSQLnull() && NOT childType.supportsSQLnull()) { // create a new my type with the same null attr as child. NAType * newType = myType.newCopy(generator->wHeap()); newType->resetSQLnullFlag(); if (NOT(*newType == childType)) doConversion = TRUE; delete newType; } else { // Fix for solution ID 10-031121-1505) // I dont think we the following assert is correct // During VEG resolution a MIN/MAX() function can have a // NON-NULLABLE child replaced by a nullable child, consider // as an example the following query where i2 is not null: // // SELECT MIN(T0.i2) // FROM D12 T0 // WHERE // ?pa2 = T0.i2 // GROUP BY T0.i1; // // In the above case i2 will be replaced by ?pa2 when the VEG // (i2, ?pa2) is resolved. Therefore it is possible to get a // nullable child for a non-nullable aggregate. In the above // case the aggregate is non-nullable because i2 is non-nullable. // In such a case MIN(?pa2) would never be executed if ?pa2 is NULL // because predicate '?pa2 = T0.i2' will not select any rows when // ?pa2 is NULL (I am not sure how a parameter is set to NULL, for host // vars we can use the NULL indicator, not sure how we pass in NULL using // parameters). // // Assert on the following condition // The condition where I am not nullable and my child is nullable, // is an error case. //GenAssert(0, "AggrMinMax::preCodeGen::Should not reach here."); doConversion = TRUE; } } if (doConversion) { // Insert a cast node to convert child to a result type. child(0) = new (generator->wHeap()) Cast(child(0), &myType); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // AggrMinMax::preCodeGen() ItemExpr * Between::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); // transform "A BETWEEN B AND C" to "A >= B AND A <= C" ItemExpr * newExpr = generator->getExpGenerator()->createExprTree( "@A1 >= @A2 AND @A1 <= @A3", 0, 3, child(0), child(1), child(2)); newExpr->bindNode(generator->getBindWA()); setReplacementExpr(newExpr->preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // BiArithCount::preCodeGen // // The BiArithCount executor clause requires that all of the operands // be of the same type. preCodeGen introduces cast operators on the // input operands if necessary to enforce this requirement. // ItemExpr * BiArithCount::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Get a local handle on common generator objects. // CollHeap *wHeap = generator->wHeap(); const NAType &resultType = getValueId().getType(); const NAType &op1Type = child(0)->castToItemExpr()->getValueId().getType(); const NAType &op2Type = child(1)->castToItemExpr()->getValueId().getType(); // If the first operand type does not match that of the result, // cast it to the result type. // if(!(op1Type == resultType)) { child(0) = new(wHeap) Cast(child(0)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(0)->synthTypeAndValueId(); } // Ditto for the second operand. // if(!(op2Type == resultType)) { child(1) = new(wHeap) Cast(child(1)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(1)->synthTypeAndValueId(); } return BiArith::preCodeGen(generator); } // BiArithSum::preCodeGen // // The BiArithSum executor clause requires that all of the operands // be of the same type. preCodeGen introduces cast operators on the // input operands if necessary to enforce this requirement. // ItemExpr * BiArithSum::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Get a local handle on common generator objects. // CollHeap *wHeap = generator->wHeap(); // Get a handle on the operand types. // const NAType &resultType = getValueId().getType(); const NAType &op1Type = child(0)->castToItemExpr()->getValueId().getType(); const NAType &op2Type = child(1)->castToItemExpr()->getValueId().getType(); // If the first operand type does not match that of the result, // cast it to the result type. // if(!(op1Type == resultType)) { child(0) = new(wHeap) Cast(child(0)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(0)->synthTypeAndValueId(); } // Ditto for the second operand. // if(!(op2Type == resultType)) { child(1) = new(wHeap) Cast(child(1)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(1)->synthTypeAndValueId(); } ItemExpr *result = BiArith::preCodeGen(generator); if (! result) return NULL; ItemExpr *outExpr = NULL; Lng32 rc = generator->getExpGenerator()->foldConstants(child(0), &outExpr); if ((rc == 0) && (outExpr)) { child(0) = outExpr->preCodeGen(generator); } rc = generator->getExpGenerator()->foldConstants(child(1), &outExpr); if ((rc == 0) && (outExpr)) { child(1) = outExpr->preCodeGen(generator); } return this; } ItemExpr * BiArith::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; NAType * result_type = (NAType *)(&(getValueId().getType())); NAType * type_op1 = (NAType *)(&(child(0)->castToItemExpr()->getValueId().getType())); NAType * type_op2 = (NAType *)(&(child(1)->castToItemExpr()->getValueId().getType())); if (result_type->isComplexType()) { if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) { child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } else if (getOperatorType() == ITM_DIVIDE) { // before doing the division, the numerator has to be upscaled. // Lets find out how much. // NS = numerator scale // DS = denominator scale // RS = result scale // Upscale = (RS - NS) + DS // Newscale = NS + Upscale = RS + DS Lng32 newscale = ((NumericType *)result_type)->getScale() + ((NumericType *)type_op2)->getScale(); if (newscale != ((NumericType *)type_op1)->getScale()) { NAType * new_type = result_type->newCopy(generator->wHeap()); ((NumericType *)new_type)->setScale(newscale); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *new_type); } } type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if (result_type->getFSDatatype() == type_op1->getFSDatatype()) { if (((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS) || (getOperatorType() == ITM_DIVIDE)) && (result_type->getNominalSize() != type_op1->getNominalSize())) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); } } else { if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS) || (getOperatorType() == ITM_DIVIDE)) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); } else { child(0) = new(generator->wHeap()) Cast(child(0), result_type->synthesizeType(SYNTH_RULE_PASS_THRU_NUM, *type_op1, *result_type, generator->wHeap())); } } if (result_type->getFSDatatype() == type_op2->getFSDatatype()) { if (((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) && (result_type->getNominalSize() != type_op2->getNominalSize())) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); } } else { if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); } else { child(1) = new(generator->wHeap()) Cast(child(1), result_type->synthesizeType(SYNTH_RULE_PASS_THRU_NUM, *type_op2, *result_type, generator->wHeap())); } } child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // following is for simple types. SimpleType * attr_result = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (getValueId().getType(), generator->wHeap())); SimpleType * attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(0)->getValueId().getType(), generator->wHeap())); SimpleType * attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(1)->getValueId().getType(), generator->wHeap())); // see if conversion needed before arithmetic operation could be done. Int32 matchScale = 0; if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { // match scales if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) { child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } else if (getOperatorType() == ITM_DIVIDE) { // before doing the division, the numerator has to be upscaled. // Lets find out how much. // NS = numerator scale // DS = denominator scale // RS = result scale // Upscale = (RS - NS) + DS // Newscale = NS + Upscale = RS + DS Lng32 newscale = ((NumericType *)result_type)->getScale() + ((NumericType *)type_op2)->getScale(); if (newscale != ((NumericType *)type_op1)->getScale()) { NAType * new_type = result_type->newCopy(generator->wHeap()); ((NumericType *)new_type)->setScale(newscale); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *new_type); matchScale = 1; } } } else if (result_type->getTypeQualifier() == NA_INTERVAL_TYPE) { switch (getOperatorType()) { case ITM_PLUS: case ITM_MINUS: if (type_op1->getTypeQualifier() == NA_DATETIME_TYPE) { #pragma nowarn(1506) // warning elimination Lng32 fp1 = ((DatetimeType *) type_op1)->getFractionPrecision(); #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination Lng32 fp2 = ((DatetimeType *) type_op2)->getFractionPrecision(); #pragma warn(1506) // warning elimination if (fp1 < fp2) { child(0) = new(generator->wHeap()) Cast(child(0), type_op2); child(0)->bindNode(generator->getBindWA()); } else if (fp1 > fp2) { child(1) = new(generator->wHeap()) Cast(child(1), type_op1); child(1)->bindNode(generator->getBindWA()); } } else { child(0) = generator->getExpGenerator()->matchIntervalEndFields( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchIntervalEndFields( child(1)->getValueId(), *result_type); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales( child(1)->getValueId(), *result_type); type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if (result_type->getNominalSize() != type_op1->getNominalSize()) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); child(0)->bindNode(generator->getBindWA()); } if (result_type->getNominalSize() != type_op2->getNominalSize()) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); child(1)->bindNode(generator->getBindWA()); } } break; case ITM_TIMES: { // // Unfortunately, the multiply node may be the root ItemExpr node, and // we can't change the root ItemExpr node since its ValueId has already // been stored away in the parent RelExpr's ValueIdLists. We'll have to // move the expression down, e.g. // // * <-- same root --> * // / \ / \ // I N becomes I 1 // | // * // / \ // N N // | // I // if (type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) child(0) = generator->getExpGenerator()->convertIntervalToNumeric( child(0)->getValueId()); else child(1) = generator->getExpGenerator()->convertIntervalToNumeric( child(1)->getValueId()); char str[20]; strcpy(str, "@A1 * @A2"); child(0) = generator->getExpGenerator()->createExprTree(str, 0, 2, child(0), child(1)); child(0)->bindNode(generator->getBindWA()); child(0) = generator->getExpGenerator()->convertNumericToInterval( child(0)->getValueId(), *result_type); strcpy(str, "1"); child(1) = generator->getExpGenerator()->createExprTree(str, CharInfo::ISO88591); child(1)->bindNode(generator->getBindWA()); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getNominalSize() != type_op2->getNominalSize()) || (type_op2->getFSDatatype() != REC_BIN16_SIGNED)) { IntervalType *interval = (IntervalType *) result_type; const Int16 DisAmbiguate = 0; child(1) = new(generator->wHeap()) Cast(child(1), new(generator->wHeap()) SQLNumeric(TRUE, /* signed */ #pragma nowarn(1506) // warning elimination interval->getTotalPrecision(), 0, DisAmbiguate, // added for 64bit proj. interval->supportsSQLnull())); #pragma warn(1506) // warning elimination child(1)->bindNode(generator->getBindWA()); } break; } case ITM_DIVIDE: { // // Unfortunately, the divide node may be the root ItemExpr node, and // we can't change the root ItemExpr node since its ValueId has already // been stored away in the parent RelExpr's ValueIdLists. We'll have to // move the expression down, e.g. // // div <-- same root --> div // / \ / \ // I N becomes I 1 // | // div // / \ // N N // | // I // child(0) = generator->getExpGenerator()->convertIntervalToNumeric( child(0)->getValueId()); char str[20]; strcpy(str, "@A1 / @A2"); child(0) = generator->getExpGenerator()->createExprTree(str, 0, 2, child(0), child(1)); child(0)->bindNode(generator->getBindWA()); child(0) = generator->getExpGenerator()->convertNumericToInterval( child(0)->getValueId(), *result_type); strcpy(str, "1"); child(1) = generator->getExpGenerator()->createExprTree(str, CharInfo::ISO88591); child(1)->bindNode(generator->getBindWA()); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getNominalSize() != type_op2->getNominalSize()) || (type_op2->getFSDatatype() != REC_BIN16_SIGNED)) { IntervalType *interval = (IntervalType *) result_type; const Int16 DisAmbiguate = 0; child(1) = new(generator->wHeap()) Cast(child(1), new(generator->wHeap()) SQLNumeric(TRUE, /* signed */ #pragma nowarn(1506) // warning elimination interval->getTotalPrecision(), 0, DisAmbiguate, // added for 64bit proj. interval->supportsSQLnull())); #pragma warn(1506) // warning elimination child(1)->bindNode(generator->getBindWA()); } break; } default: break; } } else if (result_type->getTypeQualifier() == NA_DATETIME_TYPE) { switch (getOperatorType()) { case ITM_PLUS: case ITM_MINUS: { if ((type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) && (((IntervalType*) type_op1)->getEndField() == REC_DATE_SECOND)) { #pragma nowarn(1506) // warning elimination Lng32 sourceScale = ((IntervalType *) type_op1)->getFractionPrecision(); Lng32 targetScale = ((DatetimeType *) type_op2)->getFractionPrecision(); #pragma warn(1506) // warning elimination child(0) = generator->getExpGenerator()->scaleBy10x( child(0)->getValueId(), targetScale - sourceScale); } else if ((type_op2->getTypeQualifier() == NA_INTERVAL_TYPE) && (((IntervalType*) type_op2)->getEndField() == REC_DATE_SECOND)) { #pragma nowarn(1506) // warning elimination Lng32 targetScale = ((DatetimeType *) type_op1)->getFractionPrecision(); Lng32 sourceScale = ((IntervalType *) type_op2)->getFractionPrecision(); #pragma warn(1506) // warning elimination child(1) = generator->getExpGenerator()->scaleBy10x( child(1)->getValueId(), targetScale - sourceScale); } // Extend the datetime to contain a YEAR field if needed. The // value will need to be extended if it contains a DAY field but // does not already contain a YEAR field. This is necessary // since with the introduction of non-standard SQL/MP datetime // types, it is possible to have a datetime value which has a // DAY field but not a YEAR or not a MONTH field. In this // situation, it is not possible to define a meaningful way to // do the operation. Does the DAY field wrap at 30, 31, 28, or // 29. So to make this operation meaningful, the value is // extended to the current timestamp. // if (type_op1->getTypeQualifier() == NA_DATETIME_TYPE) { if(((DatetimeType *) type_op1)->containsField(REC_DATE_DAY) && ! ((DatetimeType *) type_op1)->containsField(REC_DATE_YEAR)) { // Need to extend the given datetime value in order to be // able to do the operation. Extend the value out to the // YEAR field. // DatetimeType *extendedType = DatetimeType::constructSubtype(type_op1->supportsSQLnull(), REC_DATE_YEAR, ((DatetimeType *)type_op1)->getEndField(), ((DatetimeType *)type_op1)->getFractionPrecision(), generator->wHeap()); // Cast the given value to the extended type. // child(0) = new (generator->wHeap()) Cast(child(0), extendedType); child(0)->bindNode(generator->getBindWA()); } } else { if(((DatetimeType *) type_op2)->containsField(REC_DATE_DAY) && ! ((DatetimeType *) type_op2)->containsField(REC_DATE_YEAR)) { // Need to extend the given datetime value in order to be // able to do the operation. Extend the value out to the // YEAR field. // DatetimeType *extendedType = DatetimeType::constructSubtype(type_op2->supportsSQLnull(), REC_DATE_YEAR, ((DatetimeType *)type_op2)->getEndField(), ((DatetimeType *)type_op2)->getFractionPrecision(), generator->wHeap()); // Cast the given value to the extended type. // child(1) = new (generator->wHeap()) Cast(child(1), extendedType); child(1)->bindNode(generator->getBindWA()); } } break; } default: break; } } // NABoolean convertRoundedDivResult = FALSE; // If this arith operation is supported at runtime, then no // conversion is needed. Done for result numeric type only. if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; attr_result = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (getValueId().getType(), generator->wHeap())); attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(0)->getValueId().getType(), generator->wHeap())); attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(1)->getValueId().getType(), generator->wHeap())); ex_arith_clause temp_clause(getOperatorType(), NULL, NULL, getRoundingMode(), getDivToDownscale()); if (temp_clause.isArithSupported(getOperatorType(), attr_op1, attr_op2, attr_result )) { markAsPreCodeGenned(); return this; } } // if the datatype or lengths of child and this don't match, then // conversion is needed. type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && (result_type->getTypeQualifier() != NA_DATETIME_TYPE) && ((result_type->getFSDatatype() != type_op1->getFSDatatype()) || (result_type->getNominalSize() != type_op1->getNominalSize()))) { // If the result type is not a float, make sure that the following // Cast does not scale (for floats we have do do scaling). This is // done by using the result type but changing the scale to the scale // of the operand NAType * new_type = result_type->newCopy(generator->wHeap()); if ((result_type->getFSDatatype() < REC_MIN_FLOAT) || (result_type->getFSDatatype() > REC_MAX_FLOAT)) { ((NumericType *)new_type)-> setScale(((NumericType *)type_op1)->getScale()); }; child(0) = new(generator->wHeap()) Cast(child(0), new_type, ITM_CAST, FALSE); } if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && (result_type->getTypeQualifier() != NA_DATETIME_TYPE) && ((result_type->getFSDatatype() != type_op2->getFSDatatype()) || (result_type->getNominalSize() != type_op2->getNominalSize()))) { NAType * new_type = result_type->newCopy(generator->wHeap()); if ((result_type->getFSDatatype() < REC_MIN_FLOAT) || (result_type->getFSDatatype() > REC_MAX_FLOAT) || matchScale) { ((NumericType *)new_type)-> setScale(((NumericType *)type_op2)->getScale()); }; child(1) = new(generator->wHeap()) Cast(child(1), new_type, ITM_CAST, FALSE); } child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // BiArith::preCodeGen() ItemExpr * BiLogic::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; ItemExpr *result = this; ItemExpr *INlhs = NULL; if (CmpCommon::getDefault(OR_PRED_ADD_BLOCK_TO_IN_LIST) == DF_ON && createdFromINlist() && (INlhs=getINlhs())!=NULL) { // ItmBlockFunction serves like the "C/C++ comma" expression that // 1) evaluates its 1st operand, 2nd operand, and // 2) returns its 2nd operand as value of that expression. // ItmBlockFunction also has the codegen property that // its 1st operand is evaluated (codegen'ed) only once // even if 1st operand occurs multiple times in 2nd operand. // So, given "UPPER(n) IN ('a', 'b')" that has been converted to // ItmBlockFunction // / \ // U OR // / \ // = = // / \ / \ // U a U b // "UPPER(n)", represented as U, is evaluated once even if // it's used multiple times in the OR expression. // Trying to add ItmBlockFunction early in the parser (ie, in // sqlparseraux.cpp convertINvaluesToOR() causes a lot of grief // especially in cardinality estimation code. So, we resort to // doing it late, here in precodegen. result = new(generator->wHeap()) ItmBlockFunction(INlhs, result); result->synthTypeAndValueId(); result->markAsPreCodeGenned(); return result; } markAsPreCodeGenned(); return result; } ItemExpr * BiRelat::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // transform multivalue predicates to single-value comparisons. ItemExpr * newNode = transformMultiValuePredicate(); if (newNode) { #ifdef _DEBUG // NAString unp; // unparse(unp); // cerr << "BiRelat::preCodeGen - " << unp << " needed to be transformed!" // << endl; // I don't think we should ever have an untransformed MVP at this stage! #endif // transformMultiValuePredicate() cannot do synthTypeAndValue() // because it is also called from the normalizer in places // where it needs to postpone it. newNode->synthTypeAndValueId(); return newNode->preCodeGen(generator); } if (! ItemExpr::preCodeGen(generator)) return NULL; NAType * type_op1 = (NAType *)(&(child(0)->getValueId().getType())); NAType * type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((type_op1->isComplexType()) || (type_op2->isComplexType())) { // find the 'super' type const NAType *result_type = type_op1->synthesizeType(SYNTH_RULE_UNION, *type_op1, *type_op2, generator->wHeap()); CMPASSERT(result_type); if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { // match scales child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getFSDatatype() != type_op1->getFSDatatype()) || (result_type->getNominalSize() != type_op1->getNominalSize())) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); } if ((result_type->getFSDatatype() != type_op2->getFSDatatype()) || (result_type->getNominalSize() != type_op2->getNominalSize())) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); } child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } const NAType &type1A = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2A = child(1)->castToItemExpr()->getValueId().getType(); if ((type1A.getTypeQualifier() == NA_CHARACTER_TYPE) && (type2A.getTypeQualifier() == NA_CHARACTER_TYPE)) { const CharType &cType1A = (CharType&)type1A; const CharType &cType2A = (CharType&)type2A; CharInfo::Collation cType1A_coll = cType1A.getCollation(); CharInfo::Collation cType2A_coll = cType2A.getCollation(); // // When Implicit Casting And Translation feature is enabled, it is // possible for the binder to allow a comparision between an ISO88591-type // value and a UCS2-type value to be passed through to the generator. // If that happens, we throw in a Translate node at this point. // CharInfo::CharSet cType1A_CS = cType1A.getCharSet() ; CharInfo::CharSet cType2A_CS = cType2A.getCharSet() ; if ( ( cType1A_CS != cType2A_CS ) && ( cType1A_CS != CharInfo::UnknownCharSet ) && ( cType2A_CS != CharInfo::UnknownCharSet ) ) { Int32 chld_to_trans = 0; if ( cType1A_CS != CharInfo::ISO88591 ) { if ( (cType1A_CS == CharInfo::UNICODE) ) chld_to_trans = 1; if ( (cType1A_CS == CharInfo::UTF8) && (cType2A_CS != CharInfo::UNICODE) ) chld_to_trans = 1; if ( (cType1A_CS == CharInfo::SJIS) && (cType2A_CS == CharInfo::ISO88591) ) chld_to_trans = 1; } Int32 tran_type = Translate::UNKNOWN_TRANSLATION; if ( chld_to_trans == 0 ) tran_type = find_translate_type( cType1A_CS, cType2A_CS ); else tran_type = find_translate_type( cType2A_CS, cType1A_CS ); ItemExpr * newChild = NULL; newChild = new (generator->wHeap()) Translate(child(chld_to_trans), tran_type); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(chld_to_trans, newChild); } else if ( cType1A_coll != cType2A_coll && cType1A_CS == CharInfo::ISO88591 && cType1A_CS == cType2A_CS && child(1)->getOperatorType() == ITM_CONSTANT && CollationInfo::isSystemCollation(cType1A_coll)) { ItemExpr * pNewChild2 = NULL; NAType * pNewType2 = cType2A.newCopy(generator->wHeap()); CharType * pNewCType2 = NULL; if (pNewType2 != NULL) pNewCType2 = (CharType*)pNewType2; if (pNewCType2 != NULL) pNewCType2->setCollation(cType1A_coll); pNewChild2 = new (generator->wHeap()) Cast(child(1), pNewCType2); pNewChild2 = pNewChild2->bindNode(generator->getBindWA()); pNewChild2 = pNewChild2->preCodeGen(generator); if (pNewChild2 == NULL) return NULL; setChild(1, pNewChild2); } // Regenerate the types...before we continue with rest of code type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); ItemExpr * pChild1 = child(1)->castToItemExpr(); const NAType &type1 = pChild1->getValueId().getType(); const CharType &cType1 = (CharType&)type1; ItemExpr * pChild2 = child(1)->castToItemExpr(); const NAType &type2 = pChild2->getValueId().getType(); const CharType &cType2 = (CharType&)type2; CharInfo::Collation coll1 = cType1.getCollation(); CharInfo::Collation coll2 = cType2.getCollation(); CMPASSERT(coll1==coll2); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll1)) { setCollationEncodeComp(TRUE); { ItemExpr * newIe1 = child(0); ItemExpr * newIe2 = child(1); if (! (cType1 == cType2)) { NAType *resultType ; Lng32 len = MAXOF(cType1.getMaxLenInBytesOrNAWChars(), cType2.getMaxLenInBytesOrNAWChars()); Lng32 Prec= MAXOF(cType1.getStrCharLimit(), cType2.getStrCharLimit()); if (len != cType1.getMaxLenInBytesOrNAWChars()) { if (DFS2REC::isAnyVarChar(cType1.getFSDatatype())) { resultType = new (generator->wHeap()) SQLVarChar( CharLenInfo(Prec, len), cType1.supportsSQLnull(), cType1.isUpshifted(), cType1.isCaseinsensitive(), cType1.getCharSet(), cType1.getCollation(), cType1.getCoercibility() ); } else { resultType = new (generator->wHeap()) SQLChar( CharLenInfo(Prec, len), cType1.supportsSQLnull(), cType1.isUpshifted(), cType1.isCaseinsensitive(), FALSE, cType1.getCharSet(), cType1.getCollation(), cType1.getCoercibility() ); } newIe1 = new(generator->wHeap()) Cast(newIe1,resultType); } if (len != cType2.getMaxLenInBytesOrNAWChars()) { if (DFS2REC::isAnyVarChar(cType2.getFSDatatype())) { resultType = new (generator->wHeap()) SQLVarChar( CharLenInfo(Prec, len), cType2.supportsSQLnull(), cType2.isUpshifted(), cType2.isCaseinsensitive(), cType2.getCharSet(), cType2.getCollation(), cType2.getCoercibility() ); } else { resultType = new (generator->wHeap()) SQLChar( CharLenInfo(Prec, len), cType2.supportsSQLnull(), cType2.isUpshifted(), cType2.isCaseinsensitive(), FALSE, cType2.getCharSet(), cType2.getCollation(), cType2.getCoercibility() ); } newIe2 = new(generator->wHeap()) Cast(newIe2,resultType); } } ItemExpr * newEncode; newEncode = new(generator->wHeap()) CompEncode(newIe1,FALSE, -1, CollationInfo::Compare); newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(0, newEncode); newEncode = new(generator->wHeap()) CompEncode(newIe2, FALSE, -1,CollationInfo::Compare); newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(1, newEncode); } } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { // update both operands if case insensitive comparions // are to be done. NABoolean doCIcomp = ((cType1.isCaseinsensitive()) && (cType2.isCaseinsensitive())); ItemExpr * newChild = NULL; if ((doCIcomp) && (NOT cType1.isUpshifted())) { newChild = child(0); // Add UPPER except if it is NULL constant value. if (newChild->getOperatorType() != ITM_CONSTANT || !((ConstValue *)newChild)->isNull()) newChild = new (generator->wHeap()) Upper(newChild); newChild = newChild->bindNode(generator->getBindWA()); if (! newChild || generator->getBindWA()->errStatus()) return NULL; newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } if ((doCIcomp) && (NOT cType2.isUpshifted())) { newChild = child(1); // Add UPPER except if it is NULL constant value. if (newChild->getOperatorType() != ITM_CONSTANT || !((ConstValue *)newChild)->isNull()) newChild = new (generator->wHeap()) Upper(newChild); newChild = newChild->bindNode(generator->getBindWA()); if (! newChild || generator->getBindWA()->errStatus()) return NULL; newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(1, newChild); } } } // following is for simple types. SimpleType * attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes( child(0)->getValueId().getType(), generator->wHeap())); SimpleType * attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes( child(1)->getValueId().getType(), generator->wHeap())); ex_comp_clause temp_clause; temp_clause.set_case_index(getOperatorType(), attr_op1, attr_op2 ); if (temp_clause.get_case_index() != ex_comp_clause::COMP_NOT_SUPPORTED) { NABoolean doConstFolding = FALSE; if ((temp_clause.get_case_index() == ex_comp_clause::ASCII_COMP) && (CmpCommon::getDefault(CONSTANT_FOLDING) == DF_ON)) { if (((child(0)->getOperatorType() == ITM_CONSTANT) && (child(1)->getOperatorType() != ITM_CONSTANT)) || ((child(1)->getOperatorType() == ITM_CONSTANT) && (child(0)->getOperatorType() != ITM_CONSTANT)) && (type_op1->getFSDatatype() == REC_BYTE_F_ASCII) && (type_op2->getFSDatatype() == REC_BYTE_F_ASCII)) { if (((child(0)->getOperatorType() == ITM_CONSTANT) && (type_op1->getNominalSize() < type_op2->getNominalSize())) || ((child(1)->getOperatorType() == ITM_CONSTANT) && (type_op2->getNominalSize() < type_op1->getNominalSize()))) { doConstFolding = TRUE; } } } if (NOT doConstFolding) { markAsPreCodeGenned(); return this; } } // conversion needed before comparison could be done. // find the 'super' type UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); const NAType *result_type = type_op1->synthesizeType(SYNTH_RULE_UNION, *type_op1, *type_op2, generator->wHeap(), &flags); CMPASSERT(result_type); if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { // match scales child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } else if (result_type->getTypeQualifier() == NA_DATETIME_TYPE) { #pragma nowarn(1506) // warning elimination Lng32 fp1 = ((DatetimeType *) type_op1)->getFractionPrecision(); #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination Lng32 fp2 = ((DatetimeType *) type_op2)->getFractionPrecision(); #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination Lng32 fpResult = ((DatetimeType *) result_type)->getFractionPrecision(); #pragma warn(1506) // warning elimination if (fp1 != fpResult) { child(0) = new(generator->wHeap()) Cast(child(0), result_type, ITM_CAST, FALSE); child(0)->bindNode(generator->getBindWA()); } if (fp2 != fpResult) { child(1) = new(generator->wHeap()) Cast(child(1), result_type, ITM_CAST, FALSE); child(1)->bindNode(generator->getBindWA()); } } else if (result_type->getTypeQualifier() == NA_INTERVAL_TYPE) { child(0) = generator->getExpGenerator()->matchIntervalEndFields( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchIntervalEndFields( child(1)->getValueId(), *result_type); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales( child(1)->getValueId(), *result_type); type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if (result_type->getNominalSize() != type_op1->getNominalSize()) { child(0) = new(generator->wHeap()) Cast(child(0), result_type, ITM_CAST, FALSE); child(0)->bindNode(generator->getBindWA()); } if (result_type->getNominalSize() != type_op2->getNominalSize()) { child(1) = new(generator->wHeap()) Cast(child(1), result_type, ITM_CAST, FALSE); child(1)->bindNode(generator->getBindWA()); } } // if the datatype or lengths of child and this don't match, then // conversion is needed. type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && ((result_type->getFSDatatype() != type_op1->getFSDatatype()) || (result_type->getNominalSize() != type_op1->getNominalSize()))) { child(0) = new(generator->wHeap()) Cast(child(0), result_type, ITM_CAST, FALSE); } if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && ((result_type->getFSDatatype() != type_op2->getFSDatatype()) || (result_type->getNominalSize() != type_op2->getNominalSize()))) { child(1) = new(generator->wHeap()) Cast(child(1), result_type, ITM_CAST, FALSE); } // bind/type propagate the new nodes child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; ItemExpr *outExpr = NULL; Lng32 rc = generator->getExpGenerator()->foldConstants(child(0), &outExpr); if ((rc == 0) && (outExpr)) { child(0) = outExpr->preCodeGen(generator); } rc = generator->getExpGenerator()->foldConstants(child(1), &outExpr); if ((rc == 0) && (outExpr)) { child(1) = outExpr->preCodeGen(generator); } markAsPreCodeGenned(); return this; } // BiRelat::preCodeGen() ItemExpr * Assign::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; child(1) = generator->getExpGenerator()->matchIntervalEndFields( child(1)->getValueId(), getValueId().getType()); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), getValueId().getType()); child(1)->bindNode(generator->getBindWA()); child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // Assign::preCodeGen() ItemExpr * BaseColumn::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * BitOperFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (getOperatorType() == ITM_BITEXTRACT) { // convert 2nd and 3rd operands to Int32 signed. for (Int32 i = 1; i < getArity(); i++) { const NAType &typ = child(i)->getValueId().getType(); if (typ.getFSDatatype() != REC_BIN32_UNSIGNED) { ItemExpr * newChild = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLInt(FALSE, typ.supportsSQLnullLogical())); setChild(i, newChild); child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } // if } // for } else { for (Int32 i = 0; i < getArity(); i++) { const NAType &typ = child(i)->getValueId().getType(); if (NOT (getValueId().getType() == typ)) { NAType *resultType = getValueId().getType().newCopy(generator->wHeap()); ItemExpr * newChild = new (generator->wHeap()) Cast(child(i), resultType); setChild(i, newChild); } child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } } markAsPreCodeGenned(); return this; } // BitOperFunc::preCodeGen() ItemExpr * Cast::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; // if a special cast node, see if my child's data attributes are // the same as my data attributes. If they are, return pointer to // my child. if ((matchChildType()) && (child(0)->getValueId().getType() == getValueId().getType())) { markAsPreCodeGenned(); return child(0); } NABuiltInTypeEnum sourceTypeQual = child(0)->getValueId().getType().getTypeQualifier(); NABuiltInTypeEnum targetTypeQual = getValueId().getType().getTypeQualifier(); // If this is a NARROW operation, but it is not possible to result // in an error, no reason to use NARROW. Convert the NARROW to the // equivalent CAST. if (getOperatorType() == ITM_NARROW) { const NAType * sourceType = &(child(0)->getValueId().getType()); const NAType * targetType = &(getValueId().getType()); if (!sourceType->errorsCanOccur(*targetType)) { ItemExpr *c = new(generator->wHeap()) Cast(child(0), targetType); c->bindNode(generator->getBindWA()); return c->preCodeGen(generator); } } // Conversion to/from a tandem float type is only supported if // the from/to type is a float type. // If target is a tandem float type and source is not float or // target is not float and source is tandem, then convert source // to ieee float type (ieee double). short srcFsType = child(0)->getValueId().getType().getFSDatatype(); short tgtFsType = getValueId().getType().getFSDatatype(); if ((((tgtFsType == REC_TDM_FLOAT32) || (tgtFsType == REC_TDM_FLOAT64)) && ! ((srcFsType >= REC_MIN_FLOAT) && (srcFsType <= REC_MAX_FLOAT))) || (((srcFsType == REC_TDM_FLOAT32) || (srcFsType == REC_TDM_FLOAT64)) && ! ((tgtFsType >= REC_MIN_FLOAT) && (tgtFsType <= REC_MAX_FLOAT)))) { NAType * intermediateType = new(generator->wHeap()) SQLDoublePrecision( child(0)->getValueId().getType().supportsSQLnull(), generator->wHeap()); // Genesis case 10-040126-9823. // Match the scales of the source with that of the intermediate type. If // this is not done, the cast to the intermediate type does not get scaled // properly, leading to incorrect results. child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *intermediateType); child(0) = new(generator->wHeap()) Cast(child(0),intermediateType); child(0)->bindNode(generator->getBindWA()); sourceTypeQual = child(0)->getValueId().getType().getTypeQualifier(); } if ((sourceTypeQual == NA_NUMERIC_TYPE) && (targetTypeQual == NA_DATETIME_TYPE)) { // binder has already verified that this is a valid conversion // in special1 mode. NumericType &sourceType = (NumericType &)(child(0)->getValueId().getType()); DatetimeType &targetType = (DatetimeType &)(getValueId().getType()); if (sourceType.getFSDatatype() != REC_BIN64_SIGNED) { // doing a numeric to date conversion // convert source to largeint. ItemExpr * newChild = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLLargeInt(TRUE, child(0)->castToItemExpr()-> getValueId().getType().supportsSQLnull())); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } } if ((sourceTypeQual == NA_DATETIME_TYPE) && (targetTypeQual == NA_NUMERIC_TYPE)) { // binder has already verified that this is a valid conversion // in special1 mode. DatetimeType &sourceType = (DatetimeType &)(child(0)->getValueId().getType()); NumericType &targetType = (NumericType &)(getValueId().getType()); if (targetType.getFSDatatype() != REC_BIN64_SIGNED) { // doing a date to numeric conversion. // convert source to largeint. ItemExpr * newChild = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLLargeInt(TRUE, child(0)->castToItemExpr()-> getValueId().getType().supportsSQLnull())); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } } // numeric to date conversion if ((sourceTypeQual == NA_DATETIME_TYPE) && (targetTypeQual == NA_DATETIME_TYPE)) { DatetimeType &sourceType = (DatetimeType &)(child(0)->getValueId().getType()); DatetimeType &targetType = (DatetimeType &)(getValueId().getType()); if (targetType.getStartField() < sourceType.getStartField()) { // Must provide some fields from the current time stamp // // The following code generates the current timestamp as a // string and extracts the needed leading fields and appends to // this the given value (child(0)) as a string. The result is a // string which contains the given datetime value extended to // the YEAR field with the current timestamp. // // Buffer to hold new expression string. // char str[200]; // Offset (in bytes) from the start of the current timestamp // (represented as a char. string) to the first field needed in // the extension. // // - Subtract 1 from the start field to make the value zero based. // // - Each field has a least 3 bytes (2 for the value and 1 for the // delimiter) // // - Add 1, since the substring function is 1 based. // Int32 leadFieldsOffset = ((targetType.getStartField() - 1) * 3) + 1; // - Add 2 extra for the year field if it is being skiped over // since it has 4 bytes of value. // if (leadFieldsOffset > 1) leadFieldsOffset += 2; // Size (in bytes) of the leading fields represented as a // character string taken from the current timestamp // // - Subtract 1 from the start field to make the value zero based. // // - Each field has a least 3 bytes (2 for the value and 1 for the // delimiter) // // - Add 2 extra for the year field (which will always be one of // the extended fields) since it has 4 bytes of value. // // - Subtract the leadFieldsOffset ( - 1 to make it zero based). // Int32 leadFieldsSize = ((((sourceType.getStartField() - 1) * 3) + 2) - (leadFieldsOffset - 1)); // Size (in bytes) of the source value represented as a // character string. // #pragma nowarn(1506) // warning elimination Int32 sourceFieldsSize = sourceType.getDisplayLength(); #pragma warn(1506) // warning elimination // Construct an expression (string) to concatinate the given // value with the required fields from the current timestamp as // a string, then cast this string as a datetime value, that can // be cast to the desired result. // // Example : // // cast(DATETIME 'dd hh:mm:ss' DAY TO SECOND as DATETIME MONTH to MINUTE) // // current timestamp (as string) | "YYYY-MM-DD HH:MM:SS.FFFFFF" // | // leadFieldsOffset = ((2-1)*3)+1 +2 = | --6--^ // | // leadFieldsSize = (((3-1)*3)+2) - 5 =| ^3^ // | // result of substring(cts from 1 to 8)| "MM-" // | // value to be extended (as string) | "dd hh:mm:ss" // | // result of string concat. (as string)| "MM-dd hh:mm:ss" // | // Cast to a datetime MONTH TO SECOND | Mdhms // | // Original (this) cast to result | Mdhm // str_sprintf(str, "CAST((SUBSTRING(CAST(CURRENT AS CHAR(19)) " "FROM %d FOR %d) || CAST(@A1 AS CHAR(%d))) " "AS DATETIME %s TO %s)", leadFieldsOffset, leadFieldsSize, sourceFieldsSize, targetType.getFieldName(targetType.getStartField()), ((sourceType.getEndField() == REC_DATE_SECOND) ? "FRACTION(6)" : sourceType.getFieldName(sourceType.getEndField()))); GenAssert(str_len(str) < 199,"Internal Datetime Error Cast::preCodeGen"); ItemExpr * newExpr = generator->getExpGenerator()->createExprTree(str, 0, 1, child(0)); newExpr->bindNode(generator->getBindWA()); child(0) = newExpr->preCodeGen(generator); } } // Call matchScales only if both datatypes aren't intervals. // (We make the exception for intervals because Cast is able // to match the scales of intervals itself.) // Also, we suppress the call to matchScales() for a narrow. // This is because narrow will handle the scaling differently. // Conversions from float to bignum are also not scaled here. Scaling // is done in BigNum::castFrom method. if (NOT ((getOperatorType() == ITM_NARROW) || ((sourceTypeQual == NA_INTERVAL_TYPE) && (targetTypeQual == NA_INTERVAL_TYPE)) || ((DFS2REC::isFloat(srcFsType)) && (DFS2REC::isBigNum(tgtFsType))))) { child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), getValueId().getType()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } // For a numeric NARROW, check if scaling is needed. if (targetTypeQual == NA_NUMERIC_TYPE && getOperatorType() == ITM_NARROW) { GenAssert(sourceTypeQual == NA_NUMERIC_TYPE, "source type and target type incompatible in NARROW"); const NumericType * sourceNumType = (const NumericType *)(&child(0)->getValueId().getType()); const NumericType * targetNumType = (const NumericType *)(&getValueId().getType()); if (sourceNumType->getScale() != targetNumType->getScale()) { // We need to scale the value. We don't want to use the // usual scaling method of simply multiplying or dividing // the result because we need to capture truncations // and overflows at run time. The Narrow operator supports // scaling for the BigNum-to-any-numeric type case. // Therefore, we first cast the value to BigNum, // then narrow it down. // Soln 10-041105-1519 // Dont introduce the CAST operator if the target is already a BigNum // because NARROW does not support scaling for the BigNum-to-BigNum // case. Use the usual scaling method instead. if (targetNumType->isBigNum()) { child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *targetNumType); } else { Lng32 intermediatePrecision = sourceNumType->getPrecision(); Lng32 intermediateScale = sourceNumType->getScale(); // SQLBigNum takes decimal precision, so if the source // has binary precision, we need to adjust. if (sourceNumType->binaryPrecision()) { // Can fit three binary digits in the space of one // decimal digit. The '+5' in the precision calculation // allows for an extra digit before and after the // radix point. intermediatePrecision = (intermediatePrecision+5)/3; } // If we need to cast an approximate, increase the length // and scale so that the number can be represented now that // it won't have an exponent. // In each of the cases below, the formula used to calculate // precision is: // // intermediatePrecision = 2 * <max exponent> // + <# significant digits in mantissa> + 1 // // We use 2 * <max exponent> to take into account the // maximum positive exponent as well as the maximum // negative exponent. // // The formula used to calculate scale is: // // intermediateScale = <max exponent> + // <# significant digits in mantissa> - 1 // // Here the exponent and digits are understood to be decimal, // not binary. // // For the various kinds of floats we have: // // Kind Max exponent Decimal digits in Mantissa // ----------- ------------ -------------------------- // IEEE 32 bit 38 7 // IEEE 64 bit 308 17 // Tandem 32 bit 78 7 // Tandem 64 bit 78 18 if (sourceNumType->getFSDatatype() == REC_IEEE_FLOAT32) { intermediatePrecision = 84; // (2 x 38) + 7 + 1 = 84 intermediateScale = 44; // 38 + 7 - 1 = 44 } else if (sourceNumType->getFSDatatype() == REC_IEEE_FLOAT64) { intermediatePrecision = 634; // (2 x 308) + 17 + 1 = 634 intermediateScale = 324; // 308 + 17 - 1 = 324 } else if (sourceNumType->getFSDatatype() == REC_TDM_FLOAT32) { intermediatePrecision = 164; // (2 x 78) + 7 + 1 = 164 intermediateScale = 84; // 78 + 7 - 1 = 84 } else if (sourceNumType->getFSDatatype() == REC_TDM_FLOAT64) { intermediatePrecision = 175; // (2 x 78) + 18 + 1 = 175 intermediateScale = 95; // 78 + 18 - 1 = 95 } NAType * intermediateType = new(generator->wHeap()) SQLBigNum(intermediatePrecision, intermediateScale, (sourceNumType->isBigNum() && ((SQLBigNum*)sourceNumType)->isARealBigNum()), TRUE, // make it signed sourceNumType->supportsSQLnull(), NULL); child(0) = new(generator->wHeap()) Cast(child(0),intermediateType); child(0)->bindNode(generator->getBindWA()); // To suppress insertion of multiplying/dividing, mark Cast as // already pre-code-genned. child(0)->markAsPreCodeGenned(); } } } if ((sourceTypeQual == NA_CHARACTER_TYPE) && ((tgtFsType == REC_BLOB) || (tgtFsType == REC_CLOB))) { LOBconvertHandle * lc = new(generator->wHeap()) LOBconvertHandle(child(0), LOBoper::LOB_); lc->bindNode(generator->getBindWA()); lc->preCodeGen(generator); child(0) = lc; } if (getArity() > 1) { child(1)->bindNode(generator->getBindWA()); child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; } ItemExpr *result = this; markAsPreCodeGenned(); return result; } // Cast::preCodeGen() ItemExpr * CharFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; const NAType &typ1 = child(0)->getValueId().getType(); // Insert a cast node to convert child to an INT. child(0) = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLInt(FALSE, typ1.supportsSQLnullLogical())); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // CharFunc::preCodeGen() ItemExpr * CompEncode::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // during key encode expr generation, no need to convert external // column types(like tandem floats) to their internal // equivalent(ieee floats). Avoid doing preCodeGen in these cases. // Do this only for child leaf nodes (columns, hostvar, params, literals). // if (NOT (child(0)->getValueId().getType().isExternalType() && child(0)->getArity() == 0)) { child(0) = child(0)->preCodeGen(generator); } markAsPreCodeGenned(); return this; } // CompEncode::preCodeGen() ItemExpr * CompDecode::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; return CompEncode::preCodeGen(generator); } // CompDecode::preCodeGen() ItemExpr * Convert::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Since this CONVERT will convert its child to the original // ExternalType, no need to ask it to first be cast to an internal // type. So, do not call precodegen in these cases. // Do this only for child leaf nodes (columns, hostvar, params, literals). // if (NOT (child(0)->getValueId().getType().isExternalType() && child(0)->getArity() == 0)) { child(0) = child(0)->preCodeGen(generator); } markAsPreCodeGenned(); return this; } // Convert::preCodeGen() ItemExpr * ConvertTimestamp::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // // If the operand is not a largeint with a scale of 0, convert it to one. // NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType())); if ((numeric->getFSDatatype() != REC_BIN64_SIGNED) || (numeric->getScale() != 0)) { child(0) = new(generator->wHeap()) Cast(child(0), new(generator->wHeap()) SQLLargeInt(TRUE, numeric->supportsSQLnull())); child(0)->bindNode(generator->getBindWA()); } child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // ConvertTimestamp::preCodeGen() ItemExpr * Extract::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // // If the operand is an interval and the extract field is not the end field, // convert the interval to the units of the extract field. // Set the dataconversionerror param to Cast so conversion error // (truncation) could be ignored at runtime. // NAType * type_op1 = (NAType *)(&(child(0)->getValueId().getType())); if ((type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) && (getExtractField() < ((IntervalType *) type_op1)->getEndField())) { IntervalType *interval = (IntervalType *) type_op1; ItemExpr *dataConvError = new(generator->wHeap()) ConstValue(1234567890); child(0) = new(generator->wHeap()) Cast(child(0), dataConvError, new(generator->wHeap()) SQLInterval(interval->supportsSQLnull(), interval->getStartField(), interval->getLeadingPrecision(), getExtractField()), ITM_NARROW); child(0)->bindNode(generator->getBindWA()); } child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // Extract::preCodeGen() ItemExpr * Format::preCodeGen(Generator * generator) { return BuiltinFunction::preCodeGen(generator); } ItemExpr * JulianTimestamp::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // // If the operand is not a timestamp with a fractional precision of 6, // convert it to one. // DatetimeType *dt = (DatetimeType *)(&(child(0)->getValueId().getType())); if ((dt->getSubtype() != DatetimeType::SUBTYPE_SQLTimestamp) || (dt->getFractionPrecision() != 6)) { child(0) = new(generator->wHeap()) Cast(child(0), new(generator->wHeap()) SQLTimestamp(dt->supportsSQLnull(), 6)); child(0)->bindNode(generator->getBindWA()); } child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // JulianTimestamp::preCodeGen() ItemExpr * Hash::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); ItemExpr *result = this; // --------------------------------------------------------------------- // In the optimizer, a hash function accepts a comma-separated list // of columns. In the executor, replace this with the HashComb of the hash // functions of the individual list elements. NOTE: once error handling // is in place we need to make sure that no errors are generated from // this. // --------------------------------------------------------------------- if (child(0)->getOperatorType() == ITM_ITEM_LIST) { // child is a multi-valued expression, transform into multiple // hash expressions ExprValueId treePtr = child(0); ItemExprTreeAsList hashValues(&treePtr, ITM_ITEM_LIST, LEFT_LINEAR_TREE); // this expression becomes the hash operator for the first // hash value child(0) = hashValues[0]; const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { //-------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } // add hash expressions for all other hash values and HashComb // them together CollIndex nc = hashValues.entries(); for (CollIndex i = 1; i < nc; i++) { ItemExpr *hi = hashValues[i]; const NAType &childType = hi->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { hi = new(generator->wHeap()) CompEncode(hi,FALSE, -1, CollationInfo::Compare); hi = hi->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { //----------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { hi = new (generator->wHeap()) Upper(hi); hi = hi->bindNode(generator->getBindWA()); } //----------------------- } } ItemExpr *hv = new(generator->wHeap()) Hash(hi); result = new(generator->wHeap()) HashComb(result,hv); } result->bindNode(generator->getBindWA()); } else { const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { child(0) = new (generator->wHeap()) CompEncode(child(0), FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } } // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // Hash::preCodeGen() ItemExpr * HiveHash::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); ItemExpr *result = this; // --------------------------------------------------------------------- // In the optimizer, a hash function accepts a comma-separated list // of columns. In the executor, replace this with the HashComb of the hash // functions of the individual list elements. NOTE: once error handling // is in place we need to make sure that no errors are generated from // this. // --------------------------------------------------------------------- if (child(0)->getOperatorType() == ITM_ITEM_LIST) { // child is a multi-valued expression, transform into multiple // hash expressions ExprValueId treePtr = child(0); ItemExprTreeAsList hivehashValues(&treePtr, ITM_ITEM_LIST, LEFT_LINEAR_TREE); // this expression becomes the hash operator for the first // hash value child(0) = hivehashValues[0]; const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { //-------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } // add hash expressions for all other hash values and HiveHashComb // them together CollIndex nc = hivehashValues.entries(); for (CollIndex i = 1; i < nc; i++) { ItemExpr *hi = hivehashValues[i]; const NAType &childType = hi->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { hi = new(generator->wHeap()) CompEncode(hi,FALSE, -1, CollationInfo::Compare); hi = hi->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { //----------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { hi = new (generator->wHeap()) Upper(hi); hi = hi->bindNode(generator->getBindWA()); } //----------------------- } } ItemExpr *hv = new(generator->wHeap()) HiveHash(hi); result = new(generator->wHeap()) HiveHashComb(result,hv); } result->bindNode(generator->getBindWA()); } else { const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { child(0) = new (generator->wHeap()) CompEncode(child(0), FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } } // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // Hash::preCodeGen() // -------------------------------------------------------------- // member functions for HashDistPartHash operator // Hash Function used by Hash Partitioning. This function cannot change // once Hash Partitioning is released! Defined for all data types, // returns a 32 bit non-nullable hash value for the data item. //-------------------------------------------------------------- ItemExpr * HashDistPartHash::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); ItemExpr *result = this; // --------------------------------------------------------------------- // In the optimizer, a hash function accepts a comma-separated list // of columns. Replace this with the HashComb of the hash functions // of the individual list elements. // --------------------------------------------------------------------- if (child(0)->getOperatorType() == ITM_ITEM_LIST) { // child is a multi-valued expression, transform into multiple // hash expressions ExprValueId treePtr = child(0); ItemExprTreeAsList hashValues(&treePtr, ITM_ITEM_LIST, LEFT_LINEAR_TREE); // this expression becomes the hash operator for the first // hash value child(0) = hashValues[0]; const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { if (child(0)->getOperatorType() == ITM_NARROW) { ItemExpr* narrowsChild = child(0)->child(0); const NAType &narrowsChildType= narrowsChild->getValueId().getType(); CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE); NAType *newType= narrowsChildType.newCopy(generator->wHeap()); CharType * newCharType = (CharType *) newType; newCharType->setDataStorageSize(chType.getDataStorageSize()); child(0)->getValueId().changeType(newCharType); } child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { if ((chType.isCaseinsensitive()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } // add hash expressions for all other hash values and HashComb // them together CollIndex nc = hashValues.entries(); for (CollIndex i = 1; i < nc; i++) { ItemExpr *hi = hashValues[i]; const NAType &childType = hi->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { //Solution 10-081216-8006 if (hi->getOperatorType() == ITM_NARROW) { ItemExpr* narrowsChild = hi->child(0); const NAType &narrowsChildType= narrowsChild->getValueId().getType(); CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE); NAType *newType= narrowsChildType.newCopy(generator->wHeap()); CharType * newCharType = (CharType *) newType; newCharType->setDataStorageSize(chType.getDataStorageSize()); hi->getValueId().changeType(newCharType); } hi = new(generator->wHeap()) CompEncode(hi,FALSE, -1, CollationInfo::Compare); hi = hi->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { if ((chType.isCaseinsensitive()) && (NOT chType.isUpshifted())) { hi = new (generator->wHeap()) Upper(hi); hi = hi->bindNode(generator->getBindWA()); } } } ItemExpr *hv = new(generator->wHeap()) HashDistPartHash(hi); result = new(generator->wHeap()) HashDistPartHashComb(result,hv); } result->bindNode(generator->getBindWA()); } else { const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); //LCOV_EXCL_START : cnu - Should not count in Code Coverage until we support non-binary collation in SQ if (CollationInfo::isSystemCollation(coll)) { //Solution 10-081216-8006 if (child(0)->getOperatorType() == ITM_NARROW) { ItemExpr* narrowsChild = child(0)->child(0); const NAType &narrowsChildType= narrowsChild->getValueId().getType(); CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE); NAType *newType= narrowsChildType.newCopy(generator->wHeap()); CharType * newCharType = (CharType *) newType; newCharType->setDataStorageSize(chType.getDataStorageSize()); child(0)->getValueId().changeType(newCharType); } child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } //LCOV_EXCL_STOP : cnu - Should not count in Code Coverage until we support non-binary collation in SQ else { if ((chType.isCaseinsensitive()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } } // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // HashDistPartHash::preCodeGen() ItemExpr * HostVar::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * IndexColumn::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * Generator::addCompDecodeForDerialization(ItemExpr * ie, NABoolean isAlignedFormat) { if (!ie) return NULL; if ((ie->getOperatorType() == ITM_BASECOLUMN) || (ie->getOperatorType() == ITM_INDEXCOLUMN)) { if (! isAlignedFormat && HbaseAccess::isEncodingNeededForSerialization(ie)) { ItemExpr * newNode = new(wHeap()) CompDecode (ie, &ie->getValueId().getType(), FALSE, TRUE); newNode->bindNode(getBindWA()); if (getBindWA()->errStatus()) return NULL; newNode = newNode->preCodeGen(this); if (! newNode) return NULL; return newNode; } else return ie; } for (Lng32 i = 0; i < ie->getArity(); i++) { ItemExpr * nie = addCompDecodeForDerialization(ie->child(i), isAlignedFormat); if (nie) ie->setChild(i, nie); } return ie; } ItemExpr * HbaseTimestamp::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; markAsPreCodeGenned(); return this; } ItemExpr * HbaseVersion::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; markAsPreCodeGenned(); return this; } ItemExpr * LOBoper::preCodeGen(Generator * generator) { generator->setProcessLOB(TRUE); return BuiltinFunction::preCodeGen(generator); } ItemExpr * LOBconvert::preCodeGen(Generator * generator) { NAColumn * col = child(0)->getValueId().getNAColumn(TRUE); if (col) { lobNum() = col->lobNum(); lobStorageType() = col->lobStorageType(); lobStorageLocation() = col->lobStorageLocation(); } return LOBoper::preCodeGen(generator); } ItemExpr * LOBupdate::preCodeGen(Generator * generator) { return LOBoper::preCodeGen(generator); } ItemExpr * MathFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; for (Int32 i = 0; i < getArity(); i++) { const NAType &typ = child(i)->getValueId().getType(); // Insert a cast node to convert child to a double precision. child(i) = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLDoublePrecision( typ.supportsSQLnullLogical())); child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // MathFunc::preCodeGen() ItemExpr * Modulus::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; for (Int32 i = 0; i < 2; i++) { const NumericType &typ = (NumericType&)child(i)->getValueId().getType(); if (typ.isDecimal()) { // Insert a cast node to convert child to an LARGEINT. child(i) = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLLargeInt(TRUE, typ.supportsSQLnullLogical())); } child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // Modulus::preCodeGen() ItemExpr * ItemExpr::convertExternalType(Generator * generator) { BindWA * bindWA = generator->getBindWA(); if (getValueId().getType().isExternalType()) { // this type is not supported internally. // Convert it to an equivalent internal type. ItemExpr * c = new (bindWA->wHeap()) Cast(this, getValueId().getType().equivalentType(bindWA->wHeap())); c->synthTypeAndValueId(); // mark 'this' as precodegenned so we don't go thru // this path again. markAsPreCodeGenned(); c = c->preCodeGen(generator); unmarkAsPreCodeGenned(); return c; } else return this; } ItemExpr * Parameter::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * PivotGroup::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; ItemExpr * childExpr = child(0)->castToItemExpr(); const NAType &type1 = childExpr->getValueId().getType(); if (type1.getTypeQualifier() != NA_CHARACTER_TYPE) { Lng32 displayLen = type1.getDisplayLength( type1.getFSDatatype(), type1.getNominalSize(), type1.getPrecision(), type1.getScale(), 0); NAType * newType = new(generator->getBindWA()->wHeap()) SQLVarChar(displayLen, type1.supportsSQLnull()); childExpr = new (generator->getBindWA()->wHeap()) Cast(childExpr, newType); childExpr = childExpr->bindNode(generator->getBindWA()); if (! childExpr || generator->getBindWA()->errStatus()) return NULL; childExpr = childExpr->preCodeGen(generator); if (! childExpr) return NULL; child(0) = childExpr; } markAsPreCodeGenned(); return this; } ItemExpr * RandomNum::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (child(0)) { const NAType &typ1 = child(0)->getValueId().getType(); // Insert a cast node to convert child to an INT. child(0) = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLInt(FALSE, typ1.supportsSQLnullLogical())); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // RandomNum::preCodeGen() ItemExpr * Repeat::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; const NAType &typ2 = child(1)->getValueId().getType(); // Insert a cast node to convert child 2 to an INT. child(1) = new (generator->wHeap()) Cast(child(1), new (generator->wHeap()) SQLInt(FALSE, typ2.supportsSQLnullLogical())); child(1)->bindNode(generator->getBindWA()); for (Int32 i = 0; i < getArity(); i++) { if (child(i)) { child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } } markAsPreCodeGenned(); return this; } // Repeat::preCodeGen() ItemExpr *ReplaceNull::preCodeGen(Generator *generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); NAType *dstAType = getValueId().getType().newCopy(generator->wHeap()); const NAType& dstBType = getValueId().getType(); if(child(0) == child(1)) { dstAType->setNullable(TRUE); } child(1) = new(generator->wHeap()) Cast(child(1), dstAType); child(2) = new(generator->wHeap()) Cast(child(2), &dstBType); child(1)->bindNode(generator->getBindWA()); child(2)->bindNode(generator->getBindWA()); setReplacementExpr(ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } ItemExpr * TriRelational::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); // --------------------------------------------------------------------- // The executor does not handle tri-relational operators. It either // handles key exclusion expressions if the operator is part or a key // predicate, or the tri-relational operator gets converted into // a case statement (see comment in file ItemFunc.h). // --------------------------------------------------------------------- NABoolean lessOrLe = (getOperatorType() == ITM_LESS_OR_LE); BiRelat *exclusive = new(generator->wHeap()) BiRelat( (IFX lessOrLe THENX ITM_LESS ELSEX ITM_GREATER), child(0), child(1)); BiRelat *inclusive = new(generator->wHeap()) BiRelat( (IFX lessOrLe THENX ITM_LESS_EQ ELSEX ITM_GREATER_EQ), child(0), child(1)); exclusive->setSpecialNulls(getSpecialNulls()); inclusive->setSpecialNulls(getSpecialNulls()); ItemExpr * result = new(generator->wHeap()) Case( NULL, new(generator->wHeap()) IfThenElse( child(2), exclusive, inclusive)); result->bindNode(generator->getBindWA()); // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // TriRelational::preCodeGen() ItemExpr * HashDistrib::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; // Assert that the operands are unsigned int. // NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid first operand type to function HashDistrib"); numeric = (NumericType *)(&(child(1)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid second operand type to function HashDistrib"); markAsPreCodeGenned(); return this; } ItemExpr * ProgDistribKey::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Assert that all operands are of type unsigned int. // for (Int32 i=0; i<3; i++) { NumericType *numeric = (NumericType *)(&(child(i)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid operand type to function ProgDistribKey"); } markAsPreCodeGenned(); return this; } ItemExpr * PAGroup::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; // Assert that the operands are unsigned int. // NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid first operand type to function PAGroup"); numeric = (NumericType *)(&(child(1)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid second operand type to function PAGroup"); numeric = (NumericType *)(&(child(2)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid third operand type to function PAGroup"); markAsPreCodeGenned(); return this; } ItemExpr * ScalarVariance::preCodeGen(Generator *generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; NumericType *result_type = (NumericType *)(&(getValueId().getType())); NumericType *type_op1 = (NumericType *)(&(child(0)->castToItemExpr()->getValueId().getType())); NumericType *type_op2 = (NumericType *)(&(child(1)->castToItemExpr()->getValueId().getType())); NumericType *type_op3 = (NumericType *)(&(child(1)->castToItemExpr()->getValueId().getType())); GenAssert(result_type->getTypeQualifier() == NA_NUMERIC_TYPE && type_op1->getTypeQualifier() == NA_NUMERIC_TYPE && type_op2->getTypeQualifier() == NA_NUMERIC_TYPE && type_op3->getTypeQualifier() == NA_NUMERIC_TYPE && !result_type->isExact() && !type_op1->isExact() && !type_op2->isExact() && !type_op3->isExact() && result_type->getBinaryPrecision() == SQL_DOUBLE_PRECISION && type_op1->getBinaryPrecision() == SQL_DOUBLE_PRECISION && type_op2->getBinaryPrecision() == SQL_DOUBLE_PRECISION && type_op3->getBinaryPrecision() == SQL_DOUBLE_PRECISION, "ScalarVariance: Invalid Inputs"); markAsPreCodeGenned(); return this; } ItemExpr * Substring::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; for (Int32 i = 1; i < getArity(); i++) { if (child(i)) { const NAType &typ1 = child(i)->getValueId().getType(); // Insert a cast node to convert child to an INT. child(i) = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLInt(TRUE, typ1.supportsSQLnullLogical())); child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } } markAsPreCodeGenned(); return this; } // Substring::preCodeGen() ItemExpr * ItemExpr::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; Lng32 nc = (Lng32)getArity(); for (Lng32 index = 0; index < nc; index++) { child(index) = child(index)->preCodeGen(generator); if (! child(index).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // ItemExpr::preCodeGen() // --------------------------------------------------------- // Methods for class VEGRewritePairs // --------------------------------------------------------- VEGRewritePairs::VEGRewritePairs(CollHeap* heap) : heap_(heap), vegRewritePairs_(&valueIdHashFunc, 1009, TRUE, heap) { } ULng32 VEGRewritePairs::valueIdHashFunc(const CollIndex & v) { return (ULng32)v; } const VEGRewritePairs::VEGRewritePair * VEGRewritePairs::getPair( const ValueId& original) const { CollIndex k(original); return vegRewritePairs_.getFirstValue(&k); } // getPair(..) NABoolean VEGRewritePairs:: getRewritten(ValueId& rewritten, const ValueId& original) const { NABoolean found = FALSE; const VEGRewritePairs::VEGRewritePair * vrPairPtr = NULL; if (vrPairPtr = getPair(original)){ rewritten = vrPairPtr->getRewritten(); found = TRUE; } return found; } // getRewritten VEGRewritePairs::~VEGRewritePairs() { clear(); } // VEGRewritePairs::~VEGRewritePairs() void VEGRewritePairs::insert(const ValueId& original, const ValueId& rewritten) { // Precondition: // original must have not been rewritten before: CMPASSERT(getPair(original) == NULL); VEGRewritePairs::VEGRewritePair * vrPairPtr = new (heap_) VEGRewritePairs::VEGRewritePair(original,rewritten); CMPASSERT(vrPairPtr != NULL); CollIndex* key = (CollIndex*) new (heap_) CollIndex(original); vegRewritePairs_.insert(key, vrPairPtr); } void VEGRewritePairs::VEGRewritePair::print(FILE *ofd) const { #pragma nowarn(1506) // warning elimination Lng32 orId = CollIndex(original_), #pragma warn(1506) // warning elimination #pragma nowarn(1506) // warning elimination reId = CollIndex(rewritten_); #pragma warn(1506) // warning elimination fprintf(ofd,"<%d, %d>",orId,reId); } void VEGRewritePairs::print( FILE* ofd, const char* indent, const char* title) const { #pragma nowarn(1506) // warning elimination BUMP_INDENT(indent); #pragma warn(1506) // warning elimination fprintf(ofd,"%s %s\n%s",NEW_INDENT,title,NEW_INDENT); CollIndex *key; VEGRewritePair *value; NAHashDictionaryIterator<CollIndex, VEGRewritePair> iter(vegRewritePairs_); for (CollIndex i=0; i < iter.entries(); i++) { iter.getNext(key, value); value->print(ofd); } } // PhysTranspose::preCodeGen() ------------------------------------------- // Perform local query rewrites such as for the creation and // population of intermediate tables, for accessing partitioned // data. Rewrite the value expressions after minimizing the dataflow // using the transitive closure of equality predicates. // // PhysTranspose::preCodeGen() - is basically the same as the RelExpr:: // preCodeGen() except that here we replace the VEG references in the // transUnionVals() as well as the selectionPred(). // // Parameters: // // Generator *generator // IN/OUT : A pointer to the generator object which contains the state, // and tools (e.g. expression generator) to generate code for // this node. // // ValueIdSet &externalInputs // IN : The set of external Inputs available to this node. // // RelExpr * PhysTranspose::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. // getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. // Int32 nc = getArity(); for (Int32 index = 0; index < nc; index++) { ValueIdSet childPulledInputs; child(index) = child(index)->preCodeGen(generator, externalInputs, pulledNewInputs); if (! child(index).getPtr()) return NULL; // process additional input value ids the child wants getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // The transUnionVals have access to only the Input Values. // These can come from the parent of be the outputs of the child. // for(CollIndex v = 0; v < transUnionVectorSize(); v++) { ValueIdList valIdList = transUnionVector()[v]; valIdList.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); } // The selectionPred has access to the output values generated by transpose. // as well as any input values from the parent or child. // getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. // NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates ); // Replace VEG references in the outputs and remove redundant // outputs. // getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->oltOptInfo()->setMultipleRowsReturned(TRUE); markAsPreCodeGenned(); return this; } // PhysTranspose::preCodeGen // ----------------------------------------------------------------------- // PhyPack::preCodeGen() is basically the same as RelExpr::preCodeGen(). // It replaces the VEG's in its packingExpr_ as well as selectionPred_. // ----------------------------------------------------------------------- RelExpr* PhyPack::preCodeGen(Generator* generator, const ValueIdSet& externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. // getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. // Int32 nc = getArity(); for(Int32 index = 0; index < nc; index++) { ValueIdSet childPulledInputs; child(index) = child(index)->preCodeGen(generator, externalInputs, pulledNewInputs); if(! child(index).getPtr()) return NULL; // process additional input value ids the child wants getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; } if (getFirstNRows() != -1) { RelExpr * firstn = new(generator->wHeap()) FirstN(child(0), getFirstNRows()); // move my child's attributes to the firstN node. // Estimated rows will be mine. firstn->setEstRowsUsed(getEstRowsUsed()); firstn->setMaxCardEst(getMaxCardEst()); firstn->setInputCardinality(child(0)->getInputCardinality()); firstn->setPhysicalProperty(child(0)->getPhysicalProperty()); firstn->setGroupAttr(child(0)->getGroupAttr()); //10-060516-6532 -Begin //When FIRSTN node is created after optimization phase, the cost //of that node does not matter.But, display_explain and explain //show zero operator costs and rollup cost which confuses the user. //Also, the VQP crashes when cost tab for FIRSTN node is selected. //So, creating a cost object will fix this. //The operator cost is zero and rollup cost is same as it childs. Cost* firstnNodecost = new HEAP Cost(); firstn->setOperatorCost(firstnNodecost); Cost* rollupcost = (Cost *)(child(0)->getRollUpCost()); *rollupcost += *firstnNodecost; firstn->setRollUpCost(rollupcost); //10-060516-6532 -End firstn = firstn->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! firstn) return NULL; setChild(0, firstn); } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); const ValueIdSet& inputValues = getGroupAttr()->getCharacteristicInputs(); // Replace VEG's in both the packing expression and the packing factor. // packingFactor().replaceVEGExpressions(availableValues,inputValues); packingExpr().replaceVEGExpressions(availableValues,inputValues); // The selectionPred has access to the output values generated by Pack. // getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. // NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions(availableValues, inputValues, FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates ); // Replace VEG references in the outputs and remove redundant outputs. // getGroupAttr()->resolveCharacteristicOutputs(availableValues,inputValues); markAsPreCodeGenned(); return this; } // PhyPack::preCodeGen() // //PrecodeGen method for class PhysicalTuple list //This was put in as a fix for cr 10-010327-1947. //Before the fix the RelExpr was getting to the generator //with a VEGRef still in it, because the VEGRef from the //tupleExpr had not be removed and resolved correctly. RelExpr * PhysicalTuple::preCodeGen(Generator * generator, const ValueIdSet& externalInputs, ValueIdSet& pulledNewInputs_) { ValueIdSet availableValues = externalInputs; tupleExpr().replaceVEGExpressions (availableValues, externalInputs); return (RelExpr::preCodeGen(generator, availableValues, pulledNewInputs_)); } // PhysicalTuple::preCodeGen() // RelExpr * PhysicalTupleList::preCodeGen(Generator * generator, const ValueIdSet& externalInputs, ValueIdSet& pulledNewInputs_) { ValueIdSet availableValues = externalInputs; tupleExpr().replaceVEGExpressions (availableValues, externalInputs); generator->oltOptInfo()->setMultipleRowsReturned(TRUE); return (RelExpr::preCodeGen(generator, availableValues, pulledNewInputs_)); } // PhysicalTupleList::preCodeGen() RelExpr * CompoundStmt::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); ValueIdSet availableValues; ValueIdSet childPulledInputs; // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); availableValues = getGroupAttr()->getCharacteristicInputs(); // This is similar to what is done in Join::precodeGen when we have a TSJ. // A compound statement node behaves in a similar way to a TSJ node since // it flows values from left to right. // My Characteristic Inputs become the external inputs for my left child. child(0) = child(0)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(0).getPtr()) return NULL; // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; availableValues += childPulledInputs; childPulledInputs.clear(); // The values produced as output by my left child can be used as // "external" inputs by my right child. availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); // Process the right child child(1) = child(1)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(1).getPtr()) return NULL; // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. getInputValuesFromParentAndChildren(availableValues); // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need to generate key predicates here 0 /* no need for idempotence here */, replicatePredicates ); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); // Xn will be aborted if there is any IUD stmt within this CS and // an error occurs at runtime. if (generator->foundAnUpdate()) { //generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); //generator->setUpdPartialOnError(FALSE); } generator->setAqrEnabled(FALSE); markAsPreCodeGenned(); return this; } // CompoundStmt::preCodeGen RelExpr * FirstN::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } // FirstN::preCodeGen RelExpr * RelRoutine::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; availableValues = getGroupAttr()->getCharacteristicInputs(); const ValueIdSet &inputValues = getGroupAttr()->getCharacteristicInputs(); getProcInputParamsVids().replaceVEGExpressions(availableValues, inputValues); generator->setAqrEnabled(FALSE); markAsPreCodeGenned(); return this; } RelExpr * IsolatedNonTableUDR::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!RelRoutine::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; availableValues = getGroupAttr()->getCharacteristicInputs(); const ValueIdSet &inputValues = getGroupAttr()->getCharacteristicInputs(); getNeededValueIds().replaceVEGExpressions(availableValues, inputValues); markAsPreCodeGenned(); return this; } RelExpr * PhysicalTableMappingUDF::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!RelRoutine::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); for(Int32 i = 0; i < getArity(); i++) { getChildInfo(i)->getOutputIds().replaceVEGExpressions( availableValues, getGroupAttr()->getCharacteristicInputs()); } planInfo_ = getPhysicalProperty()->getUDRPlanInfo(); if (!getDllInteraction()->finalizePlan(this, planInfo_)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * PhysicalFastExtract::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; generator->setIsFastExtract(TRUE); if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); getSelectListIds().replaceVEGExpressions(availableValues, externalInputs); if (isAppend()) generator->setAqrEnabled(FALSE); // This relation is a linear fit to cpu consumption data observed during a // performance run, while extracting data from the LINEITEM table. CPU Usage // can go from 0% to 50% according to this relation. CPU Usage is determined // by 2 factors (a) bytes of data extracted and (b) % non-character // (termed numeric below) columns in each row (computed based on total max // row size and tol non-char column size). Both factors have equal weigth, // i.e. they can contribute at most 25% towards Cpu usage. For upto 50 GB // extracted data the bytes of extracted data increases linearly from 0% to // 25%. After 50 GB (total volume across all partitions), the contribution to // cpu usage from bytes extracted does not increase. Similarly the a table // all non-char columns can contribute upto 25% towards cpu usage. The numeric // factor is also weighted by the volume of data extracted. const Int32 plateauTabSizeInGB = 50; const float weightOfBaseExtract = 0.5; const float weightOfNumericExpressionEval = 0.5; const Int32 maxPossibleCpuUsage = 50 ; // in percentage units Int32 rowLength = child(0).getGroupAttr()->getCharacteristicOutputs().getRowLength(); Int32 numericRowLength = child(0).getGroupAttr()-> getCharacteristicOutputs().getRowLengthOfNumericCols(); float numericRowLengthRatio = ((float) numericRowLength)/rowLength ; double bytesExtractedInGB = (getEstRowsUsed().value()*rowLength)/(1024*1024*1024); double bytesExtractedRatio = bytesExtractedInGB/plateauTabSizeInGB ; if (bytesExtractedRatio > 1) bytesExtractedRatio = 1; Int32 maxCpuUsage = (Int32) (maxPossibleCpuUsage*bytesExtractedRatio*(weightOfBaseExtract + (weightOfNumericExpressionEval*numericRowLengthRatio))); generator->setMaxCpuUsage(maxCpuUsage); markAsPreCodeGenned(); return this; } RelExpr * RelLock::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Since the newExch node is added as the parent // to SequenceGenerator node, this method gets // called again during the preCodeGen of t // newExch. if(parallelExecution_) { // Add an exchange node here so this could be executed in ESP. RelExpr * exchange = new(generator->wHeap()) Exchange (this); exchange->setPhysicalProperty(this->getPhysicalProperty()); exchange->setGroupAttr(this->getGroupAttr()); markAsPreCodeGenned(); exchange = exchange->preCodeGen(generator, externalInputs, pulledNewInputs); // Done. return exchange; /* RelExpr *newExch = generator->insertEspExchange(this, getPhysicalProperty()); ((Exchange *)newExch)->makeAnESPAccess(); markAsPreCodeGenned(); RelExpr * exch = newExch->preCodeGen(generator, externalInputs, pulledNewInputs); return exch; */ } if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * StatisticsFunc::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // don't collect stats for stats func itself generator->setComputeStats(FALSE); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilGetStatistics::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // don't collect stats for stats func itself generator->setComputeStats(FALSE); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilWnrInsert::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } ItemExpr * PositionFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! BuiltinFunction::preCodeGen(generator)) return NULL; const NAType &type1 = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2 = child(1)->castToItemExpr()->getValueId().getType(); CMPASSERT( (type1.getTypeQualifier() == NA_CHARACTER_TYPE) && (type2.getTypeQualifier() == NA_CHARACTER_TYPE)) const CharType &cType1 = (CharType&)type1; const CharType &cType2 = (CharType&)type2; CharInfo::Collation coll1 = cType1.getCollation(); CharInfo::Collation coll2 = cType2.getCollation(); CMPASSERT(coll1==coll2); setCollation(coll1); if (CollationInfo::isSystemCollation(coll1)) { { ItemExpr * newEncode = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Search); newEncode = newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(0, newEncode); newEncode = new(generator->wHeap()) CompEncode(child(1), FALSE, -1,CollationInfo::Search); newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(1, newEncode); } } markAsPreCodeGenned(); return this; } // PositionFunc::preCodeGen() ItemExpr * Trim::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! BuiltinFunction::preCodeGen(generator)) return NULL; const NAType &type1 = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2 = child(1)->castToItemExpr()->getValueId().getType(); CMPASSERT( (type1.getTypeQualifier() == NA_CHARACTER_TYPE) && (type2.getTypeQualifier() == NA_CHARACTER_TYPE)) const CharType &cType1 = (CharType&)type1; const CharType &cType2 = (CharType&)type2; CharInfo::Collation coll1 = cType1.getCollation(); CharInfo::Collation coll2 = cType2.getCollation(); CMPASSERT(coll1==coll2); setCollation(coll1); markAsPreCodeGenned(); return this; } // Trim::preCodeGen() ItemExpr * NotIn::preCodeGen(Generator * generator) { if (child(0)->getOperatorType() == ITM_ITEM_LIST) {//Multicolumn NotIn should not reach this far GenAssert(FALSE,"Multicolumn NotIn should not have reached this far"); return NULL; } if (nodeIsPreCodeGenned()) { return getReplacementExpr(); } // if single column NOT IN reaches pre-code generation, then replace it with // non equi-predicate form (NE) // An example of cases where NotIn reaches this far is a aquery like // select * from ta where (select sum(a2) from ta) not in (select b2 from tb); // where the NotIn predicate gets pushed down and is not caught at optimization // time ValueId vid = createEquivNonEquiPredicate(); ItemExpr * newPred = vid.getItemExpr(); setReplacementExpr(newPred->preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // NotIn::preCodeGen() short HbaseAccess::processSQHbaseKeyPreds(Generator * generator, NAList<HbaseSearchKey*>& searchKeys, ListOfUniqueRows &listOfUniqueRows, ListOfRangeRows &listOfRangeRows) { Int32 ct = 0; HbaseUniqueRows getSpec; getSpec.rowTS_ = -1; for (CollIndex i = 0; i<searchKeys.entries(); i++ ) { HbaseSearchKey* searchKey = searchKeys[i]; ValueIdSet newSelectionPreds; if ( searchKey->isUnique() ) { // Since we fill one rowId per entry, we will be using getRow() form of Get. if ( (ct=searchKey->getCoveredLeadingKeys()) > 0 ) { NAString result; ValueIdList keyValues = searchKey->getBeginKeyValues(); keyValues.convertToTextKey(searchKey->getKeyColumns(), result); getSpec.rowIds_.insert(result); } // getSpec.addColumnNames(searchKey->getRequiredOutputColumns()); } else { // Multiple rows. Do Scan HbaseRangeRows scanSpec; scanSpec.beginKeyExclusive_ = FALSE; scanSpec.endKeyExclusive_ = FALSE; scanSpec.rowTS_ = -1; if ( !searchKey->areAllBeginKeysMissing() ) { if ( (ct=searchKey->getCoveredLeadingKeys()) > 0 ) { ValueIdList beginKeyValues = searchKey->getBeginKeyValues(); beginKeyValues.convertToTextKey(searchKey->getKeyColumns(), scanSpec.beginRowId_); scanSpec.beginKeyExclusive_ = searchKey->isBeginKeyExclusive(); } } if ( !searchKey->areAllEndKeysMissing() ) { if ( (ct=searchKey->getCoveredLeadingKeys()) ) { ValueIdList endKeyValues = searchKey->getEndKeyValues(); endKeyValues.convertToTextKey(searchKey->getKeyColumns(), scanSpec.endRowId_); scanSpec.endKeyExclusive_ = searchKey->isEndKeyExclusive(); } } // scanSpec.addColumnNames(searchKey->getRequiredOutputColumns()); listOfRangeRows.insertAt(listOfRangeRows.entries(), scanSpec); } } // for if (getSpec.rowIds_.entries() > 0) listOfUniqueRows.insert(getSpec); return 0; } short HbaseAccess::processNonSQHbaseKeyPreds(Generator * generator, ValueIdSet &preds, ListOfUniqueRows &listOfUniqueRows, ListOfRangeRows &listOfRangeRows) { ValueId vid; ValueId eqRowIdValVid; ValueId eqColNameValVid; ItemExpr * ie = NULL; NABoolean rowIdFound = FALSE; NABoolean colNameFound = FALSE; NABoolean isConstParam = FALSE; ValueIdList newPredList; NABoolean addToNewPredList; HbaseUniqueRows hg; HbaseRangeRows hs; for (vid = preds.init(); (preds.next(vid)); preds.advance(vid)) { ie = vid.getItemExpr(); addToNewPredList = TRUE; ConstValue * constVal = NULL; if ((NOT rowIdFound) && (isEqGetExpr(ie, eqRowIdValVid, isConstParam, "ROW_ID"))) { rowIdFound = TRUE; if (isConstParam) { ConstantParameter*cp = (ConstantParameter*)eqRowIdValVid.getItemExpr(); constVal = cp->getConstVal(); } else constVal = (ConstValue*)eqRowIdValVid.getItemExpr(); NAString rid = *constVal->getRawText(); hg.rowIds_.insert(rid); hg.rowTS_ = -1; addToNewPredList = FALSE; } if (isEqGetExpr(ie, eqColNameValVid, isConstParam, "COL_NAME")) { colNameFound = TRUE; if (isConstParam) { ConstantParameter*cp = (ConstantParameter*)eqColNameValVid.getItemExpr(); constVal = cp->getConstVal(); } else constVal = (ConstValue*)eqColNameValVid.getItemExpr(); NAString col = *constVal->getRawText(); hg.colNames_.insert(col); hs.colNames_.insert(col); addToNewPredList = FALSE; } if (addToNewPredList) newPredList.insert(vid); } // for if ((rowIdFound) || (colNameFound)) { preds.clear(); preds.insertList(newPredList); } if (rowIdFound) { listOfUniqueRows.insert(hg); } else { hs.rowTS_ = -1; listOfRangeRows.insert(hs); } // markAsPreCodeGenned(); // Done. return 0; } //////////////////////////////////////////////////////////////////////////// // To push down, the predicate must have the following form: // <column> <op> <value-expr> // // and all of the following conditions must be met: // // <column>: a base table or index column which can be serialized. // serialized: either the column doesn't need encoding, like // an unsigned integer, or the column // was declared with the SERIALIZED option. // <op>: eq, ne, gt, ge, lt, le // <value-expr>: an expression that only contains const or param values, and // <value-expr>'s datatype is not a superset of <column>'s datatype. // ///////////////////////////////////////////////////////////////////////////// NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr * ie, ValueId &colVID, ValueId &valueVID, NAString &op, NABoolean &removeFromOrigList) { NABoolean found = FALSE; removeFromOrigList = FALSE; NABoolean hbaseLookupPred = FALSE; NABoolean flipOp = FALSE; // set to TRUE when column is child(1) if (ie && ((ie->getOperatorType() >= ITM_EQUAL) && (ie->getOperatorType() <= ITM_GREATER_EQ))) { ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(1)))) { found = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(0)))) { found = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(1)))) { found = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(0)))) { found = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_REFERENCE) && (NOT hasColReference(ie->child(1)))) { found = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_REFERENCE) && (NOT hasColReference(ie->child(0)))) { found = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(1)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(0)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); found = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(0)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(1)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); found = TRUE; flipOp = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(0)->getValueId(); } } } if (found) { const NAType &colType = colVID.getType(); const NAType &valueType = valueVID.getType(); NABoolean generateNarrow = FALSE; if (NOT hbaseLookupPred) { generateNarrow = valueType.errorsCanOccur(colType); if ((generateNarrow) || // value not a superset of column (NOT columnEnabledForSerialization(colVID.getItemExpr()))) found = FALSE; } if (found) { if (colType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &charColType = (CharType&)colType; const CharType &charValType = (CharType&)valueType; if ((charColType.isCaseinsensitive() || charValType.isCaseinsensitive()) || (charColType.isUpshifted() || charValType.isUpshifted())) found = FALSE; } else if (colType.getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType &numType = (NumericType&)colType; const NumericType &valType = (NumericType&)valueType; if (numType.isBigNum() || valType.isBigNum()) found = FALSE; } } if (found) { if ((ie) && (((BiRelat*)ie)->addedForLikePred()) && (valueVID.getItemExpr()->getOperatorType() == ITM_CONSTANT)) { // remove trailing '\0' characters since this is being pushed down to hbase. ConstValue * cv = (ConstValue*)(valueVID.getItemExpr()); char * cvv = (char*)cv->getConstValue(); Lng32 len = cv->getStorageSize() - 1; while ((len > 0) && (cvv[len] == '\0')) len--; NAString newCVV(cvv, len+1); ItemExpr * newCV = new(generator->wHeap()) ConstValue(newCVV); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); valueVID = newCV->getValueId(); } ItemExpr * castValue = NULL; if (NOT hbaseLookupPred) castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &colType); else { castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &valueVID.getType()); } if ((NOT hbaseLookupPred) && (isEncodingNeededForSerialization(colVID.getItemExpr()))) { castValue = new(generator->wHeap()) CompEncode (castValue, FALSE, -1, CollationInfo::Sort, TRUE, FALSE); } castValue = castValue->bindNode(generator->getBindWA()); castValue = castValue->preCodeGen(generator); valueVID = castValue->getValueId(); // hbase pred evaluation compares the column byte string with the // value byte string. It doesn't have a notion of nullability. // For a nullable value stored in database, the first byte represents // if the value is a null value. // During pred evaluation in hbase, a null value could either get filtered // out due to byte string comparison, or it may get returned back. // For ex, <col> <gt> <value> // will return TRUE if the first byte of <col> is a null value. // Similary, <col> <lt> <value> // will return FALSE if the first byte of <col> is a null value. // If the a null value gets filtered out, then that is correct semantics. // But if the null value gets returned to executor, then it still need to be // filtered out. To do that, the predicate need to be evaluated in executor // with proper null semantics. // // Long story short, do not remove the original pred if the col or value is // nullable. // if ((colType.supportsSQLnull()) || (valueType.supportsSQLnull())) { removeFromOrigList = FALSE; } else { removeFromOrigList = TRUE; } if (ie->getOperatorType() == ITM_EQUAL) op = "EQUAL"; else if (ie->getOperatorType() == ITM_NOT_EQUAL) op = "NOT_EQUAL"; else if (ie->getOperatorType() == ITM_LESS) { if (flipOp) op = "GREATER"; else op = "LESS"; } else if (ie->getOperatorType() == ITM_LESS_EQ) { if (flipOp) op = "GREATER_OR_EQUAL"; else op = "LESS_OR_EQUAL"; } else if (ie->getOperatorType() == ITM_GREATER) { if (flipOp) op = "LESS"; else op = "GREATER"; } else if (ie->getOperatorType() == ITM_GREATER_EQ) { if (flipOp) op = "LESS_OR_EQUAL"; else op = "GREATER_OR_EQUAL"; } else op = "NO_OP"; } } return found; } short HbaseAccess::extractHbaseFilterPreds(Generator * generator, ValueIdSet &preds, ValueIdSet &newExePreds) { if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_OFF) return 0; // cannot push preds for aligned format row NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (isAlignedFormat) return 0; for (ValueId vid = preds.init(); (preds.next(vid)); preds.advance(vid)) { ItemExpr * ie = vid.getItemExpr(); // if it is AND operation, recurse through left and right children if (ie->getOperatorType() == ITM_AND) { ValueIdSet leftPreds; ValueIdSet rightPreds; leftPreds += ie->child(0)->castToItemExpr()->getValueId(); rightPreds += ie->child(1)->castToItemExpr()->getValueId(); extractHbaseFilterPreds(generator, leftPreds, newExePreds); extractHbaseFilterPreds(generator, rightPreds, newExePreds); continue; } ValueId colVID; ValueId valueVID; NABoolean removeFromOrigList = FALSE; NAString op; NABoolean isHFP = isHbaseFilterPred(generator, ie, colVID, valueVID, op, removeFromOrigList); if (isHFP) { hbaseFilterColVIDlist_.insert(colVID); hbaseFilterValueVIDlist_.insert(valueVID); opList_.insert(op); if (NOT removeFromOrigList) newExePreds.insert(vid); } else { newExePreds.insert(vid); } } // end for return 0; } //////////////////////////////////////////////////////////////////////////// // To push down, the predicate must have the following form: // xp:= <column> <op> <value-expr> // xp:= <column> is not null (no support for hbase lookup) // xp:= <column> is null (no support for hbase lookup) // (xp:=<column> like <value-expr> not yet implemented) // xp:=<xp> OR <xp> (not evaluated in isHbaseFilterPredV2, but by extractHbaseFilterPredV2) // xp:=<xp> AND <xp>(not evaluated in isHbaseFilterPredV2, but by extractHbaseFilterPredV2) // // and all of the following conditions must be met: // // <column>: a base table or index column which can be serialized and belong to the table being scanned. // serialized: either the column doesn't need encoding, like // an unsigned integer, or the column // was declared with the SERIALIZED option. // it also must not be an added column with default non null. // <op>: eq, ne, gt, ge, lt, le // <value-expr>: an expression that only contains const or param values, and // <value-expr>'s datatype is not a superset of <column>'s datatype. // // colVID, valueID and op are output parameters. ///////////////////////////////////////////////////////////////////////////// NABoolean HbaseAccess::isHbaseFilterPredV2(Generator * generator, ItemExpr * ie, ValueId &colVID, ValueId &valueVID, NAString &op) { NABoolean foundBinary = FALSE; NABoolean foundUnary = FALSE; NABoolean hbaseLookupPred = FALSE; NABoolean flipOp = FALSE; // set to TRUE when column is child(1) if (ie && ((ie->getOperatorType() >= ITM_EQUAL) && (ie->getOperatorType() <= ITM_GREATER_EQ))) //binary operator case {//begin expression ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(1)))) { foundBinary = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(0)))) { foundBinary = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(1)))) { foundBinary = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(0)))) { foundBinary = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(1)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(0)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); foundBinary = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(0)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(1)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); foundBinary = TRUE; flipOp = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(0)->getValueId(); } } }//end binary operators else if (ie && ((ie->getOperatorType() == ITM_IS_NULL)||(ie->getOperatorType() == ITM_IS_NOT_NULL))){//check for unary operators ItemExpr * child0 = ie->child(0)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) || (ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN)){ foundUnary = TRUE; colVID = ie->child(0)->getValueId(); valueVID = NULL_VALUE_ID; } }//end unary operators //check if found columns belong to table being scanned (so is not an input to the scan node) if (foundBinary || foundUnary){ ValueId dummyValueId; if (getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(colVID,dummyValueId)){ foundBinary=FALSE; foundUnary=FALSE; } } //check if not an added column with default non null if ((foundBinary || foundUnary)&& (NOT hbaseLookupPred)){ if (colVID.isAddedColumnWithNonNullDefault()){ foundBinary=FALSE; foundUnary=FALSE; } } if (foundBinary) { const NAType &colType = colVID.getType(); const NAType &valueType = valueVID.getType(); NABoolean generateNarrow = FALSE; if (NOT hbaseLookupPred) { generateNarrow = valueType.errorsCanOccur(colType); if ((generateNarrow) || // value not a superset of column (NOT columnEnabledForSerialization(colVID.getItemExpr()))) foundBinary = FALSE; } if (foundBinary) { if (colType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &charColType = (CharType&)colType; const CharType &charValType = (CharType&)valueType; if ((charColType.isCaseinsensitive() || charValType.isCaseinsensitive()) || (charColType.isUpshifted() || charValType.isUpshifted())) foundBinary = FALSE; } else if (colType.getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType &numType = (NumericType&)colType; const NumericType &valType = (NumericType&)valueType; if (numType.isBigNum() || valType.isBigNum()) foundBinary = FALSE; } } if (foundBinary) { if ((ie) && (((BiRelat*)ie)->addedForLikePred()) && (valueVID.getItemExpr()->getOperatorType() == ITM_CONSTANT)) { // remove trailing '\0' characters since this is being pushed down to hbase. ConstValue * cv = (ConstValue*)(valueVID.getItemExpr()); char * cvv = (char*)cv->getConstValue(); Lng32 len = cv->getStorageSize() - 1; while ((len > 0) && (cvv[len] == '\0')) len--; NAString newCVV(cvv, len+1); ItemExpr * newCV = new(generator->wHeap()) ConstValue(newCVV); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); valueVID = newCV->getValueId(); } ItemExpr * castValue = NULL; if (NOT hbaseLookupPred) castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &colType); else { castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &valueVID.getType()); } if ((NOT hbaseLookupPred) && (isEncodingNeededForSerialization(colVID.getItemExpr()))) { castValue = new(generator->wHeap()) CompEncode (castValue, FALSE, -1, CollationInfo::Sort, TRUE, FALSE); } castValue = castValue->bindNode(generator->getBindWA()); castValue = castValue->preCodeGen(generator); valueVID = castValue->getValueId(); NAString nullType; if ((colType.supportsSQLnull()) || (valueType.supportsSQLnull())) { nullType = "_NULL"; } else { nullType = ""; } // append -NULL to the operator to signify the java code generating pushdown filters to handle NULL semantic logic if (ie->getOperatorType() == ITM_EQUAL) op = "EQUAL"+nullType; else if (ie->getOperatorType() == ITM_NOT_EQUAL) op = "NOT_EQUAL"+nullType; else if (ie->getOperatorType() == ITM_LESS){ if (flipOp) op = "GREATER"+nullType; else op = "LESS"+nullType; } else if (ie->getOperatorType() == ITM_LESS_EQ){ if (flipOp) op = "GREATER_OR_EQUAL"+nullType; else op = "LESS_OR_EQUAL"+nullType; }else if (ie->getOperatorType() == ITM_GREATER){ if (flipOp) op = "LESS"+nullType; else op = "GREATER"+nullType; }else if (ie->getOperatorType() == ITM_GREATER_EQ){ if (flipOp) op = "LESS_OR_EQUAL"+nullType; else op = "GREATER_OR_EQUAL"+nullType; }else op = "NO_OP"+nullType; } } if (foundUnary){ const NAType &colType = colVID.getType(); NAString nullType; if (colType.supportsSQLnull()) { nullType = "_NULL"; } else { nullType = ""; } if (ie->getOperatorType() == ITM_IS_NULL) op = "IS_NULL"+nullType; else if (ie->getOperatorType() == ITM_IS_NOT_NULL) op = "IS_NOT_NULL"+nullType; } return foundBinary || foundUnary; } short HbaseAccess::extractHbaseFilterPredsVX(Generator * generator, ValueIdSet &preds, ValueIdSet &newExePreds){ //separate the code that should not belong in the recursive function if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_OFF) return 0; // check if initial (version 1) implementation if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MINIMUM) return extractHbaseFilterPreds(generator,preds,newExePreds); // if here, we are DF_MEDIUM // cannot push preds for aligned format row NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (isAlignedFormat) return 0; //recursive function call opList_.insert("V2");//to instruct the java side that we are dealing with predicate pushdown V2 semantic, add "V2" marker extractHbaseFilterPredsV2(generator,preds,newExePreds,FALSE); return 0; } // return true if successfull push down of node NABoolean HbaseAccess::extractHbaseFilterPredsV2(Generator * generator, ValueIdSet &preds, ValueIdSet &newExePreds, NABoolean checkOnly) { // the isFirstAndLayer is used to allow detecting top level predicate that can still be pushed to executor int addedNode=0; for (ValueId vid = preds.init(); (preds.next(vid)); preds.advance(vid)) { ItemExpr * ie = vid.getItemExpr(); // if it is AND operation, recurse through left and right children if (ie->getOperatorType() == ITM_AND){ ValueIdSet leftPreds; ValueIdSet rightPreds; leftPreds += ie->child(0)->castToItemExpr()->getValueId(); rightPreds += ie->child(1)->castToItemExpr()->getValueId(); //cannot be first AND layer, both left and right must be pushable to get anything pushed if(extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, TRUE)&& extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, TRUE)){// both left and right child must match if(!checkOnly){ extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, FALSE);//generate tree extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, FALSE);//generate tree opList_.insert("AND"); } if (preds.entries()==1) return TRUE; } else{ if(!checkOnly){ newExePreds.insert(vid); } if (preds.entries()==1) return FALSE; } continue; // the OR case is easier, as we don t have the case of top level expression that can still be pushed to executor }//end if AND else if(ie->getOperatorType() == ITM_OR){ ValueIdSet leftPreds; ValueIdSet rightPreds; leftPreds += ie->child(0)->castToItemExpr()->getValueId(); rightPreds += ie->child(1)->castToItemExpr()->getValueId(); //both left and right must be pushable to get anything pushed if(extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, TRUE)&& extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, TRUE)){// both left and right child must match if(!checkOnly){ extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, FALSE);//generate tree extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, FALSE);//generate tree opList_.insert("OR"); if (addedNode>0)opList_.insert("AND"); // if it is not the first node add to the push down, AND it with the rest addedNode++; // we just pushed it down, so increase the node count pushed down. } if (preds.entries()==1) return TRUE; } else{// if predicate cannot be pushed down if(!checkOnly){ newExePreds.insert(vid); } if (preds.entries()==1) return FALSE; } continue; }//end if OR ValueId colVID; ValueId valueVID; NAString op; NABoolean isHFP = isHbaseFilterPredV2(generator, ie, colVID, valueVID, op); if (isHFP && !checkOnly){// if pushable, push it hbaseFilterColVIDlist_.insert(colVID); if (valueVID != NULL_VALUE_ID) hbaseFilterValueVIDlist_.insert(valueVID);// don't insert valueID for unary operators. opList_.insert(op); if (addedNode>0)opList_.insert("AND"); // if it is not the first node add to the push down, AND it with the rest addedNode++; // we just pushed it down, so increase the node count pushed down. }else if (!checkOnly){//if not pushable, pass it for executor evaluation. newExePreds.insert(vid); } if (preds.entries()==1){ return isHFP; // if we are not on the first call level, where we can have multiple preds, exit returning the pushability } } // end for return TRUE;//don't really care, means we are top level. } void HbaseAccess::computeRetrievedCols() { GroupAttributes fakeGA; ValueIdSet requiredValueIds(getGroupAttr()-> getCharacteristicOutputs()); ValueIdSet coveredExprs; // --------------------------------------------------------------------- // Make fake group attributes with all inputs that are available to // the file scan node and with no "native" values. // Then call the "coverTest" method, offering it all the index columns // as additional inputs. "coverTest" will mark those index columns that // it actually needs to satisfy the required value ids, and that is // what we actually want. The actual cover test should always succeed, // otherwise the FileScan node would have been inconsistent. // --------------------------------------------------------------------- fakeGA.addCharacteristicInputs(getGroupAttr()->getCharacteristicInputs()); requiredValueIds += selectionPred(); requiredValueIds += executorPred(); fakeGA.coverTest(requiredValueIds, // char outputs + preds getIndexDesc()->getIndexColumns(), // all index columns coveredExprs, // dummy parameter retrievedCols()); // needed index cols // // *** This CMPASSERT goes off sometimes, indicating an actual problem. // Hans has agreed to look into it (10/18/96) but I (brass) am // commenting it out for now, for sake of my time in doing a checking. // // CMPASSERT(coveredExprs == requiredValueIds); } RelExpr * HbaseAccess::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; const PartitioningFunction* myPartFunc = getPartFunc(); // use const HBase keys only if we don't have to add // partitioning key predicates if ( myPartFunc == NULL || !myPartFunc->isPartitioned() || myPartFunc->isAReplicationPartitioningFunction()) if (!processConstHBaseKeys( generator, this, getSearchKey(), getIndexDesc(), executorPred(), getHbaseSearchKeys(), listOfUniqueRows_, listOfRangeRows_)) return NULL; if (! FileScan::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; //compute isUnique: NABoolean isUnique = FALSE; if (listOfRangeRows_.entries() == 0) { if ((searchKey() && searchKey()->isUnique()) && (listOfUniqueRows_.entries() == 0)) isUnique = TRUE; else if ((NOT (searchKey() && searchKey()->isUnique())) && (listOfUniqueRows_.entries() == 1) && (listOfUniqueRows_[0].rowIds_.entries() == 1)) isUnique = TRUE; } // executorPred() contains an ANDed list of predicates. // if hbase filter preds are enabled, then extracts those preds from executorPred() // which could be pushed down to hbase. // Do this only for non-unique scan access. ValueIdSet newExePreds; ValueIdSet* originExePreds = new (generator->wHeap())ValueIdSet(executorPred()) ;//saved for futur nullable column check if (CmpCommon::getDefault(HBASE_FILTER_PREDS) != DF_MINIMUM){ // the check for V2 and above is moved up before calculating retrieved columns if ((NOT isUnique) && (extractHbaseFilterPredsVX(generator, executorPred(), newExePreds))) return this; // if some filter preds were found, then initialize executor preds with new exe preds. // newExePreds may be empty which means that all predicates were changed into // hbase preds. In this case, nuke existing exe preds. if (hbaseFilterColVIDlist_.entries() > 0) setExecutorPredicates(newExePreds); } ValueIdSet colRefSet; computeRetrievedCols(); for (ValueId valId = retrievedCols().init(); retrievedCols().next(valId); retrievedCols().advance(valId)) { ValueId dummyValId; if ((valId.getItemExpr()->getOperatorType() != ITM_CONSTANT) && (getGroupAttr()->getCharacteristicOutputs().referencesTheGivenValue(valId, dummyValId))) colRefSet.insert(valId); } if (getTableDesc()->getNATable()->isHbaseCellTable()) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { // retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]); } } else if (getTableDesc()->getNATable()->isHbaseRowTable()) { NASet<NAString> * hbaseColNameSet = generator->getBindWA()->hbaseColUsageInfo()->hbaseColNameSet ((QualifiedName*)&getTableDesc()->getNATable()->getTableName()); NABoolean starFound = FALSE; for (Lng32 ij = 0; ij < hbaseColNameSet->entries(); ij++) { NAString &colName = (*hbaseColNameSet)[ij]; retHbaseColRefSet_.insert(colName); if (colName == "*") starFound = TRUE; } if (starFound) retHbaseColRefSet_.clear(); } else { // create the list of columns that need to be retrieved from hbase . // first add all columns referenced in the executor pred. HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet); HbaseAccess::addReferenceFromVIDset (getGroupAttr()->getCharacteristicOutputs(), TRUE, TRUE, colRefSet); for (ValueId valId = colRefSet.init(); colRefSet.next(valId); colRefSet.advance(valId)) { ValueId dummyValId; if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId)) { retColRefSet_.insert(valId); if (valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) { Lng32 colNumber = ((BaseColumn*)((HbaseTimestamp*)valId.getItemExpr())->col())->getColNumber(); ValueId colVID = getIndexDesc()->getIndexColumns()[colNumber]; retColRefSet_.insert(colVID); } if (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION) { Lng32 colNumber = ((BaseColumn*)((HbaseVersion*)valId.getItemExpr())->col())->getColNumber(); ValueId colVID = getIndexDesc()->getIndexColumns()[colNumber]; retColRefSet_.insert(colVID); } } } // add key columns. If values are missing in hbase, then atleast the key // value is needed to retrieve a row. //only if needed. If there is already a non nullable non added non nullable with default columns in the set, we should not need to add //any other columns. if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MEDIUM && getMdamKeyPtr() == NULL){ //only enable column retrieval optimization with DF_MEDIUM and not for MDAM scan bool needAddingNonNullableColumn = true; //assume we need to add one non nullable column for (ValueId vid = retColRefSet_.init();// look for each column in th eresult set if one match the criteria non null non added non nullable with default retColRefSet_.next(vid); retColRefSet_.advance(vid)) { if (originExePreds->isNotNullable(vid)){// it is non nullable OperatorTypeEnum operatorType = vid.getItemExpr()->getOperatorType(); if ((operatorType == ITM_BASECOLUMN || operatorType == ITM_INDEXCOLUMN) && !vid.isAddedColumnWithNonNullDefault()){//check if added and with default... notgood needAddingNonNullableColumn = false; // we found one column meeting all criteria break; } } } if (needAddingNonNullableColumn){ // ok now we need to add one key column that is not nullable bool foundAtLeastOneKeyColumnNotNullable = false; for(int i=getIndexDesc()->getIndexKey().entries()-1; i>=0;i--)// doing reverse search is making sure we are trying to avoid to use _SALT_ column // because _SALT_ is physicaly the last column therefore we don't skip columns optimally if using _SALT_ column { ValueId vaId = getIndexDesc()->getIndexKey()[i]; if ( (vaId.getItemExpr()->getOperatorType() == ITM_BASECOLUMN && !((BaseColumn*)vaId.getItemExpr())->getNAColumn()->getType()->supportsSQLnullPhysical())|| (vaId.getItemExpr()->getOperatorType() == ITM_INDEXCOLUMN && !((IndexColumn*)vaId.getItemExpr())->getNAColumn()->getType()->supportsSQLnullPhysical()) ){ //found good key column candidate? HbaseAccess::addReferenceFromItemExprTree(vaId.getItemExpr(),TRUE,FALSE,retColRefSet_); // add it foundAtLeastOneKeyColumnNotNullable = true; //tag we found it break; // no need to look further } } if (!foundAtLeastOneKeyColumnNotNullable){//oh well, did not find any key column non nullable, let s add all key columns HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } } }else //end if DF_MEDIUM HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } if ((getMdamKeyPtr()) && ((listOfRangeRows_.entries() > 0) || (listOfUniqueRows_.entries() > 0))) { GenAssert(0, "listOfRange/Unique cannot be used if mdam is chosen."); return NULL; } // flag for both hive and hbase tables generator->setHdfsAccess(TRUE); if (!isUnique) generator->oltOptInfo()->setMultipleRowsReturned(TRUE) ; // Do not allow cancel of unique queries but allow cancel of queries // that are part of a rowset operation. if ((isUnique) && (NOT generator->oltOptInfo()->multipleRowsReturned())) { generator->setMayNotCancel(TRUE); uniqueHbaseOper() = TRUE; } else { generator->oltOptInfo()->setOltCliOpt(FALSE); if (isUnique) { if ((CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD()) && (searchKey() && searchKey()->isUnique())) { uniqueRowsetHbaseOper() = TRUE; } } } // executorPred() contains an ANDed list of predicates. // if hbase filter preds are enabled, then extracts those preds from executorPred() // which could be pushed down to hbase. // Do this only for non-unique scan access. if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MINIMUM){ //keep the check for pushdown after column retrieval for pushdown V1. if ((NOT isUnique) && (extractHbaseFilterPreds(generator, executorPred(), newExePreds))) return this; // if some filter preds were found, then initialize executor preds with new exe preds. // newExePreds may be empty which means that all predicates were changed into // hbase preds. In this case, nuke existing exe preds. if (hbaseFilterColVIDlist_.entries() > 0) setExecutorPredicates(newExePreds); }//DF_MINIMUM snpType_ = SNP_NONE; DefaultToken tok = CmpCommon::getDefault(TRAF_TABLE_SNAPSHOT_SCAN); if (tok == DF_LATEST) //latest snapshot -- new way used with scan independent from bulk unload snpType_= SNP_LATEST; else if (tok == DF_SUFFIX) //the exsiting where snapshot scan is used with bulk unload snpType_ = SNP_SUFFIX; markAsPreCodeGenned(); // Done. return this; } RelExpr * HbaseAccessCoProcAggr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! HbaseAccess::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Rebuild the aggregate expressions tree ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilHbaseCoProcAggr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Rebuild the aggregate expressions tree ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilOrcFastAggr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Rebuild the aggregate expressions tree ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; }
1
10,685
Would a check for added columns with non-null default be sufficient for aligned format?
apache-trafodion
cpp
@@ -69,6 +69,7 @@ var ( hostname = flag.String(ovfimportparams.HostnameFlagKey, "", "Specify the hostname of the instance to be created. The specified hostname must be RFC1035 compliant.") machineImageStorageLocation = flag.String(ovfimportparams.MachineImageStorageLocationFlagKey, "", "GCS bucket storage location of the machine image being imported (regional or multi-regional)") buildID = flag.String("build-id", "", "Cloud Build ID override. This flag should be used if auto-generated or build ID provided by Cloud Build is not appropriate. For example, if running multiple imports in parallel in a single Cloud Build run, sharing build ID could cause premature temporary resource clean-up resulting in import failures.") + computeServiceAccount = flag.String("compute_service_account", "", "Compute service account to be used by importer. When empty, the Compute Engine default service account is used.") nodeAffinityLabelsFlag flags.StringArrayFlag currentExecutablePath string
1
// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // GCE OVF import tool package main import ( "flag" "fmt" "os" "strings" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/daisy" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/flags" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging/service" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_import_params" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_importer" ) var ( instanceNames = flag.String(ovfimportparams.InstanceNameFlagKey, "", "VM Instance names to be created, separated by commas.") machineImageName = flag.String(ovfimportparams.MachineImageNameFlagKey, "", "Name of the machine image to create.") clientID = flag.String(ovfimportparams.ClientIDFlagKey, "", "Identifies the client of the importer, e.g. `gcloud` or `pantheon`") clientVersion = flag.String("client-version", "", "Identifies the version of the client of the importer") ovfOvaGcsPath = flag.String(ovfimportparams.OvfGcsPathFlagKey, "", " Google Cloud Storage URI of the OVF or OVA file to import. For example: gs://my-bucket/my-vm.ovf.") noGuestEnvironment = flag.Bool("no-guest-environment", false, "Google Guest Environment will not be installed on the image.") canIPForward = flag.Bool("can-ip-forward", false, "If provided, allows the instances to send and receive packets with non-matching destination or source IP addresses.") deletionProtection = flag.Bool("deletion-protection", false, "Enables deletion protection for the instance.") description = flag.String("description", "", "Specifies a textual description of the instances.") labels = flag.String("labels", "", "List of label KEY=VALUE pairs to add. Keys must start with a lowercase character and contain only hyphens (-), underscores (_), lowercase characters, and numbers. Values must contain only hyphens (-), underscores (_), lowercase characters, and numbers.") machineType = flag.String("machine-type", "", "Specifies the machine type used for the instances. To get a list of available machine types, run 'gcloud compute machine-types list'. If unspecified, the default type is n1-standard-1.") network = flag.String("network", "", "Name of the network in your project to use for the image import. The network must have access to Google Cloud Storage. If not specified, the network named default is used. If -subnet is also specified subnet must be a subnetwork of network specified by -network.") networkTier = flag.String("network-tier", "", "Specifies the network tier that will be used to configure the instance. NETWORK_TIER must be one of: PREMIUM, STANDARD. The default value is PREMIUM.") subnet = flag.String("subnet", "", "Name of the subnetwork in your project to use for the image import. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. Zone should be specified if this field is specified.") privateNetworkIP = flag.String("private-network-ip", "", "Specifies the RFC1918 IP to assign to the instance. The IP should be in the subnet or legacy network IP range.") noExternalIP = flag.Bool("no-external-ip", false, "Specifies that VPC into which instances is being imported doesn't allow external IPs.") noRestartOnFailure = flag.Bool("no-restart-on-failure", false, "the instance will not be restarted if it’s terminated by Compute Engine. This does not affect terminations performed by the user.") osID = flag.String("os", "", "Specifies the OS of the image being imported. OS must be one of: "+strings.Join(daisy.GetSortedOSIDs(), ", ")+".") shieldedIntegrityMonitoring = flag.Bool("shielded-integrity-monitoring", false, "Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created. This baseline can be updated by using --shielded-vm-learn-integrity-policy.") shieldedSecureBoot = flag.Bool("shielded-secure-boot", false, "The instance will boot with secure boot enabled.") shieldedVtpm = flag.Bool("shielded-vtpm", false, "The instance will boot with the TPM (Trusted Platform Module) enabled. A TPM is a hardware module that can be used for different security operations such as remote attestation, encryption and sealing of keys.") tags = flag.String("tags", "", "Specifies a list of tags to apply to the instance. These tags allow network firewall rules and routes to be applied to specified VM instances. See `gcloud compute firewall-rules create` for more details.") zoneFlag = flag.String("zone", "", "Zone of the image to import. The zone in which to do the work of importing the image. Overrides the default compute/zone property value for this command invocation") bootDiskKmskey = flag.String("boot-disk-kms-key", "", "The Cloud KMS (Key Management Service) cryptokey that will be used to protect the disk. The arguments in this group can be used to specify the attributes of this resource. ID of the key or fully qualified identifier for the key. This flag must be specified if any of the other arguments in this group are specified.") bootDiskKmsKeyring = flag.String("boot-disk-kms-keyring", "", "The KMS keyring of the key.") bootDiskKmsLocation = flag.String("boot-disk-kms-location", "", "The Cloud location for the key.") bootDiskKmsProject = flag.String("boot-disk-kms-project", "", "The Cloud project for the key.") timeout = flag.String("timeout", "", "Maximum time a build can last before it is failed as TIMEOUT. For example, specifying 2h will fail the process after 2 hours. See `gcloud topic datetimes` for information on duration formats") project = flag.String("project", "", "project to run in, overrides what is set in workflow") scratchBucketGcsPath = flag.String("scratch-bucket-gcs-path", "", "GCS scratch bucket to use, overrides what is set in workflow") oauth = flag.String("oauth", "", "path to oauth json file, overrides what is set in workflow") ce = flag.String("compute-endpoint-override", "", "API endpoint to override default") gcsLogsDisabled = flag.Bool("disable-gcs-logging", false, "do not stream logs to GCS") cloudLogsDisabled = flag.Bool("disable-cloud-logging", false, "do not stream logs to Cloud Logging") stdoutLogsDisabled = flag.Bool("disable-stdout-logging", false, "do not display individual workflow logs on stdout") releaseTrack = flag.String("release-track", ovfimporter.GA, fmt.Sprintf("Release track of OVF import. One of: %s, %s or %s. Impacts which compute API release track is used by the import tool.", ovfimporter.Alpha, ovfimporter.Beta, ovfimporter.GA)) uefiCompatible = flag.Bool("uefi-compatible", false, "Enables UEFI booting, which is an alternative system boot method. Most public images use the GRUB bootloader as their primary boot method.") hostname = flag.String(ovfimportparams.HostnameFlagKey, "", "Specify the hostname of the instance to be created. The specified hostname must be RFC1035 compliant.") machineImageStorageLocation = flag.String(ovfimportparams.MachineImageStorageLocationFlagKey, "", "GCS bucket storage location of the machine image being imported (regional or multi-regional)") buildID = flag.String("build-id", "", "Cloud Build ID override. This flag should be used if auto-generated or build ID provided by Cloud Build is not appropriate. For example, if running multiple imports in parallel in a single Cloud Build run, sharing build ID could cause premature temporary resource clean-up resulting in import failures.") nodeAffinityLabelsFlag flags.StringArrayFlag currentExecutablePath string ) func init() { currentExecutablePath = string(os.Args[0]) flag.Var(&nodeAffinityLabelsFlag, "node-affinity-label", "Node affinity label used to determine sole tenant node to schedule this instance on. Label is of the format: <key>,<operator>,<value>,<value2>... where <operator> can be one of: IN, NOT. For example: workload,IN,prod,test is a label with key 'workload' and values 'prod' and 'test'. This flag can be specified multiple times for multiple labels.") } func buildOVFImportParams() *ovfimportparams.OVFImportParams { flag.Parse() return &ovfimportparams.OVFImportParams{InstanceNames: *instanceNames, MachineImageName: *machineImageName, ClientID: *clientID, OvfOvaGcsPath: *ovfOvaGcsPath, NoGuestEnvironment: *noGuestEnvironment, CanIPForward: *canIPForward, DeletionProtection: *deletionProtection, Description: *description, Labels: *labels, MachineType: *machineType, Network: *network, NetworkTier: *networkTier, Subnet: *subnet, PrivateNetworkIP: *privateNetworkIP, NoExternalIP: *noExternalIP, NoRestartOnFailure: *noRestartOnFailure, OsID: *osID, ShieldedIntegrityMonitoring: *shieldedIntegrityMonitoring, ShieldedSecureBoot: *shieldedSecureBoot, ShieldedVtpm: *shieldedVtpm, Tags: *tags, Zone: *zoneFlag, BootDiskKmskey: *bootDiskKmskey, BootDiskKmsKeyring: *bootDiskKmsKeyring, BootDiskKmsLocation: *bootDiskKmsLocation, BootDiskKmsProject: *bootDiskKmsProject, Timeout: *timeout, Project: project, ScratchBucketGcsPath: *scratchBucketGcsPath, Oauth: *oauth, Ce: *ce, GcsLogsDisabled: *gcsLogsDisabled, CloudLogsDisabled: *cloudLogsDisabled, StdoutLogsDisabled: *stdoutLogsDisabled, NodeAffinityLabelsFlag: nodeAffinityLabelsFlag, CurrentExecutablePath: currentExecutablePath, ReleaseTrack: *releaseTrack, UefiCompatible: *uefiCompatible, Hostname: *hostname, MachineImageStorageLocation: *machineImageStorageLocation, BuildID: *buildID, } } func runImport() (service.Loggable, error) { var ovfImporter *ovfimporter.OVFImporter var err error defer func() { if ovfImporter != nil { ovfImporter.CleanUp() } }() if ovfImporter, err = ovfimporter.NewOVFImporter(buildOVFImportParams()); err != nil { return nil, err } wf, err := ovfImporter.Import() return service.NewLoggableFromWorkflow(wf), err } func main() { flag.Parse() var paramLog service.InputParams var action string isInstanceImport := *instanceNames != "" if isInstanceImport { paramLog = createInstanceImportInputParams() action = service.InstanceImportAction } else { paramLog = createMachineImageImportInputParams() action = service.MachineImageImportAction } if err := service.RunWithServerLogging(action, paramLog, project, runImport); err != nil { os.Exit(1) } } func createInstanceImportInputParams() service.InputParams { return service.InputParams{ InstanceImportParams: &service.InstanceImportParams{ CommonParams: createCommonInputParams(), InstanceName: *instanceNames, OvfGcsPath: *ovfOvaGcsPath, CanIPForward: *canIPForward, DeletionProtection: *deletionProtection, MachineType: *machineType, NetworkInterface: *network, NetworkTier: *networkTier, PrivateNetworkIP: *privateNetworkIP, NoExternalIP: *noExternalIP, NoRestartOnFailure: *noRestartOnFailure, OS: *osID, ShieldedIntegrityMonitoring: *shieldedIntegrityMonitoring, ShieldedSecureBoot: *shieldedSecureBoot, ShieldedVtpm: *shieldedVtpm, Tags: *tags, HasBootDiskKmsKey: *bootDiskKmskey != "", HasBootDiskKmsKeyring: *bootDiskKmsKeyring != "", HasBootDiskKmsLocation: *bootDiskKmsLocation != "", HasBootDiskKmsProject: *bootDiskKmsProject != "", NoGuestEnvironment: *noGuestEnvironment, NodeAffinityLabel: nodeAffinityLabelsFlag.String(), }, } } func createMachineImageImportInputParams() service.InputParams { return service.InputParams{ MachineImageImportParams: &service.MachineImageImportParams{ CommonParams: createCommonInputParams(), MachineImageName: *machineImageName, OvfGcsPath: *ovfOvaGcsPath, CanIPForward: *canIPForward, DeletionProtection: *deletionProtection, MachineType: *machineType, NetworkInterface: *network, NetworkTier: *networkTier, PrivateNetworkIP: *privateNetworkIP, NoExternalIP: *noExternalIP, NoRestartOnFailure: *noRestartOnFailure, OS: *osID, ShieldedIntegrityMonitoring: *shieldedIntegrityMonitoring, ShieldedSecureBoot: *shieldedSecureBoot, ShieldedVtpm: *shieldedVtpm, Tags: *tags, HasBootDiskKmsKey: *bootDiskKmskey != "", HasBootDiskKmsKeyring: *bootDiskKmsKeyring != "", HasBootDiskKmsLocation: *bootDiskKmsLocation != "", HasBootDiskKmsProject: *bootDiskKmsProject != "", NoGuestEnvironment: *noGuestEnvironment, NodeAffinityLabel: nodeAffinityLabelsFlag.String(), Hostname: *hostname, MachineImageStorageLocation: *machineImageStorageLocation, }, } } func createCommonInputParams() *service.CommonParams { return &service.CommonParams{ ClientID: *clientID, ClientVersion: *clientVersion, Network: *network, Subnet: *subnet, Zone: *zoneFlag, Timeout: *timeout, Project: *project, ObfuscatedProject: service.Hash(*project), Labels: *labels, ScratchBucketGcsPath: *scratchBucketGcsPath, Oauth: *oauth, ComputeEndpointOverride: *ce, DisableGcsLogging: *gcsLogsDisabled, DisableCloudLogging: *cloudLogsDisabled, DisableStdoutLogging: *stdoutLogsDisabled, } }
1
12,925
Is this PR implementing compute service account for OVF import as well? I thought it's only image/one-step import? Btw, OVF importer uses "-" instead of "_" in flags (should be `compute-service-account`).
GoogleCloudPlatform-compute-image-tools
go
@@ -23,7 +23,7 @@ use Symfony\Component\Routing\Annotation\Route; /** * @Route( * name="ergonode_product_collection_element_delete", - * path="/collections/{collection}/elements/{product}", + * path="/collections/{collection}/element/{product}", * methods={"DELETE"}, * requirements={ * "collection"="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types = 1); namespace Ergonode\ProductCollection\Application\Controller\Api\Element; use Ergonode\Api\Application\Response\EmptyResponse; use Ergonode\EventSourcing\Infrastructure\Bus\CommandBusInterface; use Ergonode\Product\Domain\Entity\AbstractProduct; use Ergonode\ProductCollection\Domain\Command\DeleteProductCollectionElementCommand; use Ergonode\ProductCollection\Domain\Entity\ProductCollection; use Sensio\Bundle\FrameworkExtraBundle\Configuration\IsGranted; use Sensio\Bundle\FrameworkExtraBundle\Configuration\ParamConverter; use Swagger\Annotations as SWG; use Symfony\Component\HttpFoundation\Response; use Symfony\Component\Routing\Annotation\Route; /** * @Route( * name="ergonode_product_collection_element_delete", * path="/collections/{collection}/elements/{product}", * methods={"DELETE"}, * requirements={ * "collection"="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", * "product"="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" * }, * ) */ class ProductCollectionElementDeleteAction { /** * @var CommandBusInterface */ private CommandBusInterface $commandBus; /** * @param CommandBusInterface $commandBus */ public function __construct(CommandBusInterface $commandBus) { $this->commandBus = $commandBus; } /** * @IsGranted("PRODUCT_COLLECTION_DELETE") * * @SWG\Tag(name="Product Collection") * @SWG\Parameter( * name="language", * in="path", * type="string", * description="Language code", * default="EN" * ) * @SWG\Parameter( * name="collection", * in="path", * type="string", * required=true, * description="Product Collection ID", * ) * @SWG\Parameter( * name="product", * in="path", * type="string", * required=true, * description="Product Id", * ) * @SWG\Response( * response=204, * description="Success" * ) * @SWG\Response( * response=404, * description="Not found", * ) * @SWG\Response( * response="409", * description="Existing relationships" * ) * * @ParamConverter(class="Ergonode\ProductCollection\Domain\Entity\ProductCollection") * @ParamConverter(class="Ergonode\Product\Domain\Entity\AbstractProduct") * * @param ProductCollection $productCollection * @param AbstractProduct $product * * @return Response */ public function __invoke(ProductCollection $productCollection, AbstractProduct $product): Response { $command = new DeleteProductCollectionElementCommand($productCollection->getId(), $product->getId()); $this->commandBus->dispatch($command); return new EmptyResponse(); } }
1
8,564
Why change to element ?? in whole application use plural convention ??
ergonode-backend
php
@@ -23,7 +23,7 @@ const renderInnerPanel = (props) => { } const poweredByUppy = (props) => { - return <a href="https://uppy.io" rel="noreferrer noopener" target="_blank" class="uppy-Dashboard-poweredBy">Powered by <svg aria-hidden="true" class="UppyIcon uppy-Dashboard-poweredByIcon" width="11" height="11" viewBox="0 0 11 11" xmlns="http://www.w3.org/2000/svg"> + return <a tabindex="-1" href="https://uppy.io" rel="noreferrer noopener" target="_blank" class="uppy-Dashboard-poweredBy">Powered by <svg aria-hidden="true" class="UppyIcon uppy-Dashboard-poweredByIcon" width="11" height="11" viewBox="0 0 11 11" xmlns="http://www.w3.org/2000/svg"> <path d="M7.365 10.5l-.01-4.045h2.612L5.5.806l-4.467 5.65h2.604l.01 4.044h3.718z" fill-rule="evenodd" /> </svg><span class="uppy-Dashboard-poweredByUppy">Uppy</span></a> }
1
const FileList = require('./FileList') const Tabs = require('./Tabs') const FileCard = require('./FileCard') const classNames = require('classnames') const { isTouchDevice } = require('../../core/Utils') const { h } = require('preact') // http://dev.edenspiekermann.com/2016/02/11/introducing-accessible-modal-dialog // https://github.com/ghosh/micromodal const renderInnerPanel = (props) => { return <div style={{ width: '100%', height: '100%' }}> <div class="uppy-DashboardContent-bar"> <div class="uppy-DashboardContent-title"> {props.i18n('importFrom')} {props.activePanel ? props.activePanel.name : null} </div> <button class="uppy-DashboardContent-back" type="button" onclick={props.hideAllPanels}>{props.i18n('done')}</button> </div> {props.getPlugin(props.activePanel.id).render(props.state)} </div> } const poweredByUppy = (props) => { return <a href="https://uppy.io" rel="noreferrer noopener" target="_blank" class="uppy-Dashboard-poweredBy">Powered by <svg aria-hidden="true" class="UppyIcon uppy-Dashboard-poweredByIcon" width="11" height="11" viewBox="0 0 11 11" xmlns="http://www.w3.org/2000/svg"> <path d="M7.365 10.5l-.01-4.045h2.612L5.5.806l-4.467 5.65h2.604l.01 4.044h3.718z" fill-rule="evenodd" /> </svg><span class="uppy-Dashboard-poweredByUppy">Uppy</span></a> } module.exports = function Dashboard (props) { const dashboardClassName = classNames( { 'uppy-Root': props.isTargetDOMEl }, 'uppy-Dashboard', { 'Uppy--isTouchDevice': isTouchDevice() }, { 'uppy-Dashboard--modal': !props.inline }, { 'uppy-Dashboard--wide': props.isWide } ) return ( <div class={dashboardClassName} aria-hidden={props.inline ? 'false' : props.modal.isHidden} aria-label={!props.inline ? props.i18n('dashboardWindowTitle') : props.i18n('dashboardTitle')} onpaste={props.handlePaste}> <div class="uppy-Dashboard-overlay" tabindex={-1} onclick={props.handleClickOutside} /> <div class="uppy-Dashboard-inner" aria-modal={!props.inline && 'true'} role={!props.inline && 'dialog'} style={{ width: props.inline && props.width ? props.width : '', height: props.inline && props.height ? props.height : '' }}> <button class="uppy-Dashboard-close" type="button" aria-label={props.i18n('closeModal')} title={props.i18n('closeModal')} onclick={props.closeModal}> <span aria-hidden="true">&times;</span> </button> <div class="uppy-Dashboard-innerWrap"> <Tabs {...props} /> <FileCard {...props} /> <div class="uppy-Dashboard-filesContainer"> <FileList {...props} /> </div> <div class="uppy-DashboardContent-panel" role="tabpanel" id={props.activePanel && `uppy-DashboardContent-panel--${props.activePanel.id}`} aria-hidden={props.activePanel ? 'false' : 'true'}> {props.activePanel && renderInnerPanel(props)} </div> <div class="uppy-Dashboard-progressindicators"> {props.progressindicators.map((target) => { return props.getPlugin(target.id).render(props.state) })} </div> </div> { props.proudlyDisplayPoweredByUppy && poweredByUppy(props) } </div> </div> ) }
1
10,835
Tiniest nit but this can be `tabindex={-1}`, `width={11}`, `height={11}`
transloadit-uppy
js
@@ -344,6 +344,16 @@ try: cast = image.astype(np.float32) assert cast.dtype == np.float32 + # Test .astype for conversion between vector-like pixel types. + components = 3 + numpyImage = np.random.randint(0, 256, (12,8,components)).astype(np.uint8) + input_image = itk.image_from_array(numpyImage, is_vector=True) + if (type(input_image) == itk.Image[itk.RGBPixel[itk.UC],2] and + hasattr(itk.CastImageFilter, 'IRGBUC2IVF32')): + output_pixel_type = itk.Vector[itk.F,components] + output_image = input_image.astype(output_pixel_type) + assert type(output_image) == itk.Image[output_pixel_type, 2] + except ImportError: print("NumPy not imported. Skipping BridgeNumPy tests") # Numpy is not available, do not run the Bridge NumPy tests
1
#========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ # also test the import callback feature import itk import sys import os def custom_callback(name, progress): if progress == 0: print("Loading %s..." % name, file=sys.stderr) if progress == 1: print("done", file=sys.stderr) import itkConfig itkConfig.ImportCallback = custom_callback # test setting the number of threads itk.set_nthreads(4) assert itk.get_nthreads() == 4 # test the force load function itk.force_load() filename = sys.argv[1] mesh_filename = sys.argv[2] PixelType = itk.UC dim = 2 ImageType = itk.Image[PixelType, dim] ReaderType = itk.ImageFileReader[ImageType] reader = ReaderType.New(FileName=filename) # test snake_case keyword arguments reader = ReaderType.New(file_name=filename) # test echo itk.echo(reader) itk.echo(reader, sys.stdout) # test class_ assert itk.class_(reader) == ReaderType assert itk.class_("dummy") == str # test template assert itk.template(ReaderType) == (itk.ImageFileReader, (ImageType,)) assert itk.template(reader) == (itk.ImageFileReader, (ImageType,)) try: itk.template(str) raise Exception("unknown class should send an exception") except KeyError: pass # test ctype assert itk.ctype("unsigned short") == itk.US assert itk.ctype(" unsigned \n short \t ") == itk.US assert itk.ctype("signed short") == itk.SS assert itk.ctype("short") == itk.SS try: itk.ctype("dummy") raise Exception("unknown C type should send an exception") except KeyError: pass # test output assert itk.output(reader) == reader.GetOutput() assert itk.output(1) == 1 # test the deprecated image assert itk.image(reader) == reader.GetOutput() assert itk.image(1) == 1 # test size s = itk.size(reader) assert s[0] == s[1] == 256 s = itk.size(reader.GetOutput()) assert s[0] == s[1] == 256 # test physical size s = itk.physical_size(reader) assert s[0] == s[1] == 256.0 s = itk.physical_size(reader.GetOutput()) assert s[0] == s[1] == 256.0 # test spacing s = itk.spacing(reader) assert s[0] == s[1] == 1.0 s = itk.spacing(reader.GetOutput()) assert s[0] == s[1] == 1.0 # test origin s = itk.origin(reader) assert s[0] == s[1] == 0.0 s = itk.origin(reader.GetOutput()) assert s[0] == s[1] == 0.0 # test index s = itk.index(reader) assert s[0] == s[1] == 0 s = itk.index(reader.GetOutput()) assert s[0] == s[1] == 0 # test region s = itk.region(reader) assert s.GetIndex()[0] == s.GetIndex()[1] == 0 assert s.GetSize()[0] == s.GetSize()[1] == 256 s = itk.region(reader.GetOutput()) assert s.GetIndex()[0] == s.GetIndex()[1] == 0 assert s.GetSize()[0] == s.GetSize()[1] == 256 # test range assert itk.range(reader) == (0, 255) assert itk.range(reader.GetOutput()) == (0, 255) # test write itk.imwrite(reader, sys.argv[3]) itk.imwrite(reader, sys.argv[3], True) # test read image = itk.imread(filename) assert type(image) == itk.Image[itk.RGBPixel[itk.UC],2] image = itk.imread(filename, itk.F) assert type(image) == itk.Image[itk.F,2] image = itk.imread(filename, itk.F, fallback_only=True) assert type(image) == itk.Image[itk.RGBPixel[itk.UC],2] try: image = itk.imread(filename, fallback_only=True) # Should never reach this point if test passes since an exception # is expected. raise Exception('`itk.imread()` fallback_only should have failed') except Exception as e: if str(e) == "pixel_type must be set when using the fallback_only option": pass else: raise e # test mesh read / write mesh = itk.meshread(mesh_filename) assert type(mesh) == itk.Mesh[itk.F, 3] mesh = itk.meshread(mesh_filename, itk.UC) assert type(mesh) == itk.Mesh[itk.UC, 3] mesh = itk.meshread(mesh_filename, itk.UC, fallback_only=True) assert type(mesh) == itk.Mesh[itk.F, 3] itk.meshwrite(mesh, sys.argv[4]) itk.meshwrite(mesh, sys.argv[4], compression=True) # test search res = itk.search("Index") assert res[0] == "Index" assert res[1] == "index" assert "ContinuousIndex" in res res = itk.search("index", True) assert "Index" not in res # test down_cast obj = itk.Object.cast(reader) # be sure that the reader is casted to itk::Object assert obj.__class__ == itk.Object down_casted = itk.down_cast(obj) assert down_casted == reader assert down_casted.__class__ == ReaderType # test setting the IO manually png_io = itk.PNGImageIO.New() assert png_io.GetFileName() == '' reader=itk.ImageFileReader.New(FileName=filename, ImageIO=png_io) reader.Update() assert png_io.GetFileName() == filename # test reading image series series_reader = itk.ImageSeriesReader.New(FileNames=[filename,filename]) series_reader.Update() assert series_reader.GetOutput().GetImageDimension() == 3 assert series_reader.GetOutput().GetLargestPossibleRegion().GetSize()[2] == 2 # test reading image series and check that dimension is not increased if # last dimension is 1. image_series = itk.Image[itk.UC, 3].New() image_series.SetRegions([10, 7, 1]) image_series.Allocate() image_series.FillBuffer(0) image_series3d_filename = os.path.join( sys.argv[5], "image_series_extras_py.mha") itk.imwrite(image_series, image_series3d_filename) series_reader = itk.ImageSeriesReader.New( FileNames=[image_series3d_filename, image_series3d_filename]) series_reader.Update() assert series_reader.GetOutput().GetImageDimension() == 3 # test reading image series with itk.imread() image_series = itk.imread([filename, filename]) assert image_series.GetImageDimension() == 3 # Numeric series filename generation without any integer index. It is # only to produce an ITK object that users could set as an input to # `itk.ImageSeriesReader.New()` or `itk.imread()` and test that it works. numeric_series_filename = itk.NumericSeriesFileNames.New() numeric_series_filename.SetStartIndex(0) numeric_series_filename.SetEndIndex(3) numeric_series_filename.SetIncrementIndex(1) numeric_series_filename.SetSeriesFormat(filename) image_series = itk.imread(numeric_series_filename.GetFileNames()) number_of_files = len(numeric_series_filename.GetFileNames()) assert image_series.GetImageDimension() == 3 assert image_series.GetLargestPossibleRegion().GetSize()[2] == number_of_files # test reading image series with `itk.imread()` and check that dimension is # not increased if last dimension is 1. image_series = itk.imread([image_series3d_filename, image_series3d_filename]) assert image_series.GetImageDimension() == 3 # pipeline, auto_pipeline and templated class are tested in other files # BridgeNumPy try: # Images import numpy as np image = itk.imread(filename) arr = itk.GetArrayFromImage(image) arr.fill(1) assert np.any(arr != itk.GetArrayFromImage(image)) arr = itk.array_from_image(image) arr.fill(1) assert np.any(arr != itk.GetArrayFromImage(image)) view = itk.GetArrayViewFromImage(image) view.fill(1) assert np.all(view == itk.GetArrayFromImage(image)) image = itk.GetImageFromArray(arr) image.FillBuffer(2) assert np.any(arr != itk.GetArrayFromImage(image)) image = itk.GetImageViewFromArray(arr) image.FillBuffer(2) assert np.all(arr == itk.GetArrayFromImage(image)) image = itk.GetImageFromArray(arr, is_vector=True) assert image.GetImageDimension() == 2 image = itk.GetImageViewFromArray(arr, is_vector=True) assert image.GetImageDimension() == 2 arr = np.array([[1,2,3],[4,5,6]]).astype(np.uint8) assert arr.shape[0] == 2 assert arr.shape[1] == 3 assert arr[1,1] == 5 image = itk.GetImageFromArray(arr) arrKeepAxes = itk.GetArrayFromImage(image, keep_axes=True) assert arrKeepAxes.shape[0] == 3 assert arrKeepAxes.shape[1] == 2 assert arrKeepAxes[1,1] == 4 arr = itk.GetArrayFromImage(image, keep_axes=False) assert arr.shape[0] == 2 assert arr.shape[1] == 3 assert arr[1,1] == 5 arrKeepAxes = itk.GetArrayViewFromImage(image, keep_axes=True) assert arrKeepAxes.shape[0] == 3 assert arrKeepAxes.shape[1] == 2 assert arrKeepAxes[1,1] == 4 arr = itk.GetArrayViewFromImage(image, keep_axes=False) assert arr.shape[0] == 2 assert arr.shape[1] == 3 assert arr[1,1] == 5 arr = arr.copy() image = itk.GetImageFromArray(arr) image2 = type(image).New() image2.Graft(image) del image # Delete image but pixel data should be kept in img2 image = itk.GetImageFromArray(arr+1) # Fill former memory if wrongly released assert np.array_equal(arr, itk.GetArrayViewFromImage(image2)) image2.SetPixel([0]*image2.GetImageDimension(), 3) # For mem check in dynamic analysis # VNL Vectors v1 = itk.vnl_vector.D(2) v1.fill(1) v_np = itk.GetArrayFromVnlVector(v1) assert v1.get(0) == v_np[0] v_np[0] = 0 assert v1.get(0) != v_np[0] view = itk.GetArrayViewFromVnlVector(v1) assert v1.get(0) == view[0] view[0] = 0 assert v1.get(0) == view[0] # VNL Matrices m1 = itk.vnl_matrix.D(2,2) m1.fill(1) m_np = itk.GetArrayFromVnlMatrix(m1) assert m1.get(0,0) == m_np[0,0] m_np[0,0] = 0 assert m1.get(0,0) != m_np[0,0] view = itk.GetArrayViewFromVnlMatrix(m1) assert m1.get(0,0) == view[0,0] view[0,0] = 0 assert m1.get(0,0) == view[0,0] arr = np.zeros([3,3]) m_vnl = itk.GetVnlMatrixFromArray(arr) assert m_vnl(0,0) == 0 m_vnl.put(0,0,3) assert m_vnl(0,0) == 3 assert arr[0,0] == 0 # ITK Matrix arr = np.zeros([3,3],float) m_itk = itk.GetMatrixFromArray(arr) # Test snake case function m_itk = itk.matrix_from_array(arr) m_itk.SetIdentity() # Test that the numpy array has not changed,... assert arr[0,0] == 0 # but that the ITK matrix has the correct value. assert m_itk(0,0) == 1 arr2 = itk.GetArrayFromMatrix(m_itk) # Check that snake case function also works arr2 = itk.array_from_matrix(m_itk) # Check that the new array has the new value. assert arr2[0,0] == 1 arr2[0,0]=2 # Change the array value,... assert arr2[0,0] == 2 # and make sure that the matrix hasn't changed. assert m_itk(0,0) == 1 # test .astype image = itk.imread(filename, itk.UC) cast = image.astype(PixelType) assert cast == image cast = image.astype(itk.F) assert cast.dtype == np.float32 cast = image.astype(itk.SS) assert cast.dtype == np.int16 cast = image.astype(np.float32) assert cast.dtype == np.float32 except ImportError: print("NumPy not imported. Skipping BridgeNumPy tests") # Numpy is not available, do not run the Bridge NumPy tests pass # xarray conversion try: import xarray as xr import numpy as np print('Testing xarray conversion') image = itk.imread(filename) image.SetSpacing((0.1, 0.2)) image.SetOrigin((30., 44.)) theta = np.radians(30) cosine = np.cos(theta) sine = np.sin(theta) rotation = np.array(((cosine, -sine), (sine, cosine))) image.SetDirection(rotation) data_array = itk.xarray_from_image(image) assert data_array.dims[0] == 'y' assert data_array.dims[1] == 'x' assert data_array.dims[2] == 'c' assert np.array_equal(data_array.values, itk.array_from_image(image)) assert len(data_array.coords['x']) == 256 assert len(data_array.coords['y']) == 256 assert len(data_array.coords['c']) == 3 assert data_array.coords['x'][0] == 30.0 assert data_array.coords['x'][1] == 30.1 assert data_array.coords['y'][0] == 44.0 assert data_array.coords['y'][1] == 44.2 assert data_array.coords['c'][0] == 0 assert data_array.coords['c'][1] == 1 assert data_array.attrs['direction'][0,0] == cosine assert data_array.attrs['direction'][0,1] == sine assert data_array.attrs['direction'][1,0] == -sine assert data_array.attrs['direction'][1,1] == cosine round_trip = itk.image_from_xarray(data_array) assert np.array_equal(itk.array_from_image(round_trip), itk.array_from_image(image)) spacing = round_trip.GetSpacing() assert np.isclose(spacing[0], 0.1) assert np.isclose(spacing[1], 0.2) origin = round_trip.GetOrigin() assert np.isclose(origin[0], 30.0) assert np.isclose(origin[1], 44.0) direction = round_trip.GetDirection() assert np.isclose(direction(0,0), cosine) assert np.isclose(direction(0,1), -sine) assert np.isclose(direction(1,0), sine) assert np.isclose(direction(1,1), cosine) wrong_order = data_array.swap_dims({'y':'z'}) try: round_trip = itk.image_from_xarray(wrong_order) assert False except ValueError: pass empty_array = np.array([], dtype=np.uint8) empty_array.shape = (0,0,0) empty_image = itk.image_from_array(empty_array) empty_da = itk.xarray_from_image(empty_image) empty_image_round = itk.image_from_xarray(empty_da) except ImportError: print('xarray not imported. Skipping xarray conversion tests') pass # vtk conversion try: import vtk import numpy as np print('Testing vtk conversion') image = itk.image_from_array(np.random.rand(2,3,4)) vtk_image = itk.vtk_image_from_image(image) image_round = itk.image_from_vtk_image(vtk_image) assert(np.array_equal(itk.origin(image), itk.origin(image_round))) assert(np.array_equal(itk.spacing(image), itk.spacing(image_round))) assert(np.array_equal(itk.size(image), itk.size(image_round))) assert(np.array_equal(itk.array_view_from_image(image), itk.array_view_from_image(image_round))) image = itk.image_from_array(np.random.rand(5,4,2).astype(np.float32), is_vector=True) vtk_image = itk.vtk_image_from_image(image) image_round = itk.image_from_vtk_image(vtk_image) assert(np.array_equal(itk.origin(image), itk.origin(image_round))) assert(np.array_equal(itk.spacing(image), itk.spacing(image_round))) assert(np.array_equal(itk.size(image), itk.size(image_round))) assert(np.array_equal(itk.array_view_from_image(image), itk.array_view_from_image(image_round))) except ImportError: print('vtk not imported. Skipping vtk conversion tests') pass
1
14,045
Unrelated to this PR, we should remove this exception. We now require `numpy`.
InsightSoftwareConsortium-ITK
py
@@ -29,7 +29,7 @@ type SubWorkflow struct { func (s *SubWorkflow) populate(ctx context.Context, st *Step) DError { if s.Path != "" { var err error - if s.Workflow, err = st.w.NewSubWorkflowFromFile(s.Path); err != nil { + if s.Workflow, err = st.w.NewSubWorkflowFromFile(s.Path, s.Vars); err != nil { return ToDError(err) } }
1
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package daisy import ( "context" "fmt" ) // SubWorkflow defines a Daisy sub workflow. type SubWorkflow struct { Path string Vars map[string]string `json:",omitempty"` Workflow *Workflow `json:",omitempty"` } func (s *SubWorkflow) populate(ctx context.Context, st *Step) DError { if s.Path != "" { var err error if s.Workflow, err = st.w.NewSubWorkflowFromFile(s.Path); err != nil { return ToDError(err) } } if s.Workflow == nil { return Errf("SubWorkflow %q does not have a workflow", st.name) } s.Workflow.parent = st.w s.Workflow.GCSPath = fmt.Sprintf("gs://%s/%s", s.Workflow.parent.bucket, s.Workflow.parent.scratchPath) s.Workflow.Name = st.name s.Workflow.Project = s.Workflow.parent.Project s.Workflow.Zone = s.Workflow.parent.Zone s.Workflow.OAuthPath = s.Workflow.parent.OAuthPath s.Workflow.ComputeClient = s.Workflow.parent.ComputeClient s.Workflow.StorageClient = s.Workflow.parent.StorageClient s.Workflow.Logger = s.Workflow.parent.Logger s.Workflow.DefaultTimeout = st.Timeout var errs DError Loop: for k, v := range s.Vars { for wv := range s.Workflow.Vars { if k == wv { s.Workflow.AddVar(k, v) continue Loop } } errs = addErrs(errs, Errf("unknown workflow Var %q passed to SubWorkflow %q", k, st.name)) } if errs != nil { return errs } return s.Workflow.populate(ctx) } func (s *SubWorkflow) validate(ctx context.Context, st *Step) DError { return s.Workflow.validate(ctx) } func (s *SubWorkflow) run(ctx context.Context, st *Step) DError { if err := s.Workflow.uploadSources(ctx); err != nil { return err } swCleanup := func() { s.Workflow.LogWorkflowInfo("SubWorkflow %q cleaning up (this may take up to 2 minutes).", s.Workflow.Name) for _, hook := range s.Workflow.cleanupHooks { if err := hook(); err != nil { s.Workflow.LogWorkflowInfo("Error returned from SubWorkflow cleanup hook: %s", err) } } } defer swCleanup() // If the workflow fails before the subworkflow completes, the previous // "defer" cleanup won't happen. Add a failsafe here, have the workflow // also call this subworkflow's cleanup. st.w.addCleanupHook(func() DError { swCleanup() return nil }) // Prerun work has already been done. Just run(), not Run(). st.w.LogStepInfo(st.name, "SubWorkflow", "Running subworkflow %q", s.Workflow.Name) if err := s.Workflow.run(ctx); err != nil { s.Workflow.LogStepInfo(st.name, "SubWorkflow", "Error running subworkflow %q: %v", s.Workflow.Name, err) return err } return nil }
1
10,391
Is the loop over vars and adding them to the subworkflow below in this file needed, since it's already done by NewSubWorkflowFromFile?
GoogleCloudPlatform-compute-image-tools
go
@@ -0,0 +1,19 @@ +module LicensesHelper + def license_date_range(license) + formatted_date_range(license.starts_on, license.ends_on) + end + + def formatted_date_range(starts_on, ends_on) + if starts_on.nil? || ends_on.nil? + nil + elsif starts_on == ends_on + starts_on.to_s :simple + elsif starts_on.year != ends_on.year + "#{starts_on.to_s(:simple)}-#{ends_on.to_s(:simple)}" + elsif starts_on.month != ends_on.month + "#{starts_on.strftime("%B %d")}-#{ends_on.to_s(:simple)}" + else + "#{starts_on.strftime("%B %d")}-#{ends_on.strftime("%d, %Y")}" + end + end +end
1
1
10,348
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -0,0 +1,5 @@ +import dagster.pandas_kernel as dagster_pd + + +def simple_csv_input(name): + return dagster_pd.dataframe_input(name, sources=[dagster_pd.csv_dataframe_source()])
1
1
11,602
Having to write this util makes me think that maybe we should have kept the csv_input stuff. I don't know.
dagster-io-dagster
py
@@ -21,9 +21,10 @@ var _ BlockOps = (*BlockOpsStandard)(nil) // NewBlockOpsStandard creates a new BlockOpsStandard func NewBlockOpsStandard(config Config, queueSize int) *BlockOpsStandard { + q := newBlockRetrievalQueue(queueSize, config.Codec(), config.BlockCache()) bops := &BlockOpsStandard{ config: config, - queue: newBlockRetrievalQueue(queueSize, config.Codec()), + queue: q, workers: make([]*blockRetrievalWorker, 0, queueSize), } bg := &realBlockGetter{config: config}
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "github.com/keybase/kbfs/tlf" "golang.org/x/net/context" ) // BlockOpsStandard implements the BlockOps interface by relaying // requests to the block server. type BlockOpsStandard struct { config Config queue *blockRetrievalQueue workers []*blockRetrievalWorker } var _ BlockOps = (*BlockOpsStandard)(nil) // NewBlockOpsStandard creates a new BlockOpsStandard func NewBlockOpsStandard(config Config, queueSize int) *BlockOpsStandard { bops := &BlockOpsStandard{ config: config, queue: newBlockRetrievalQueue(queueSize, config.Codec()), workers: make([]*blockRetrievalWorker, 0, queueSize), } bg := &realBlockGetter{config: config} for i := 0; i < queueSize; i++ { bops.workers = append(bops.workers, newBlockRetrievalWorker(bg, bops.queue)) } return bops } // Get implements the BlockOps interface for BlockOpsStandard. func (b *BlockOpsStandard) Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer, block Block) error { errCh := b.queue.Request(ctx, defaultOnDemandRequestPriority, kmd, blockPtr, block) return <-errCh } // Ready implements the BlockOps interface for BlockOpsStandard. func (b *BlockOpsStandard) Ready(ctx context.Context, kmd KeyMetadata, block Block) (id BlockID, plainSize int, readyBlockData ReadyBlockData, err error) { defer func() { if err != nil { id = BlockID{} plainSize = 0 readyBlockData = ReadyBlockData{} } }() crypto := b.config.Crypto() tlfCryptKey, err := b.config.KeyManager(). GetTLFCryptKeyForEncryption(ctx, kmd) if err != nil { return } // New server key half for the block. serverHalf, err := crypto.MakeRandomBlockCryptKeyServerHalf() if err != nil { return } blockKey, err := crypto.UnmaskBlockCryptKey(serverHalf, tlfCryptKey) if err != nil { return } plainSize, encryptedBlock, err := crypto.EncryptBlock(block, blockKey) if err != nil { return } buf, err := b.config.Codec().Encode(encryptedBlock) if err != nil { return } readyBlockData = ReadyBlockData{ buf: buf, serverHalf: serverHalf, } encodedSize := readyBlockData.GetEncodedSize() if encodedSize < plainSize { err = TooLowByteCountError{ ExpectedMinByteCount: plainSize, ByteCount: encodedSize, } return } id, err = crypto.MakePermanentBlockID(buf) if err != nil { return } // Cache the encoded size. block.SetEncodedSize(uint32(encodedSize)) return } // Delete implements the BlockOps interface for BlockOpsStandard. func (b *BlockOpsStandard) Delete(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) (liveCounts map[BlockID]int, err error) { contexts := make(map[BlockID][]BlockContext) for _, ptr := range ptrs { contexts[ptr.ID] = append(contexts[ptr.ID], ptr.BlockContext) } return b.config.BlockServer().RemoveBlockReferences(ctx, tlfID, contexts) } // Archive implements the BlockOps interface for BlockOpsStandard. func (b *BlockOpsStandard) Archive(ctx context.Context, tlfID tlf.ID, ptrs []BlockPointer) error { contexts := make(map[BlockID][]BlockContext) for _, ptr := range ptrs { contexts[ptr.ID] = append(contexts[ptr.ID], ptr.BlockContext) } return b.config.BlockServer().ArchiveBlockReferences(ctx, tlfID, contexts) } // Shutdown implements the BlockOps interface for BlockOpsStandard. func (b *BlockOpsStandard) Shutdown() { b.queue.Shutdown() for _, w := range b.workers { w.Shutdown() } }
1
14,729
Passing in and saving a reference to the `BlockCache` at init time is going to break if something calls `config.ResetCaches()`, because it replaces the `BlockCache` instance completely. This happens on user logout or by a manual write to `.kbfs_reset_caches`. So you probably want to give it the whole `config`, or maybe just the `config.BlockCache` function pointer.
keybase-kbfs
go
@@ -462,6 +462,9 @@ func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName) st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName)) if err != nil { + if st != nil { + _ = st.Close() + } if err == multistream.ErrNotSupported || err == multistream.ErrIncorrectVersion { return nil, p2p.NewIncompatibleStreamError(err) }
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package libp2p import ( "context" "crypto/ecdsa" "errors" "fmt" "net" "github.com/ethersphere/bee/pkg/addressbook" "github.com/ethersphere/bee/pkg/bzz" beecrypto "github.com/ethersphere/bee/pkg/crypto" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/breaker" handshake "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/handshake" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/tracing" "github.com/libp2p/go-libp2p" autonat "github.com/libp2p/go-libp2p-autonat-svc" crypto "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/helpers" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" libp2ppeer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" protocol "github.com/libp2p/go-libp2p-core/protocol" "github.com/libp2p/go-libp2p-peerstore/pstoremem" libp2pquic "github.com/libp2p/go-libp2p-quic-transport" basichost "github.com/libp2p/go-libp2p/p2p/host/basic" "github.com/libp2p/go-tcp-transport" ws "github.com/libp2p/go-ws-transport" ma "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multistream" ) var ( _ p2p.Service = (*Service)(nil) ) type Service struct { ctx context.Context host host.Host natManager basichost.NATManager libp2pPeerstore peerstore.Peerstore metrics metrics networkID uint64 handshakeService *handshake.Service addressbook addressbook.Putter peers *peerRegistry topologyNotifier topology.Notifier connectionBreaker breaker.Interface logger logging.Logger tracer *tracing.Tracer } type Options struct { PrivateKey *ecdsa.PrivateKey NATAddr string DisableWS bool DisableQUIC bool LightNode bool WelcomeMessage string Addressbook addressbook.Putter Logger logging.Logger Tracer *tracing.Tracer } func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, o Options) (*Service, error) { host, port, err := net.SplitHostPort(addr) if err != nil { return nil, fmt.Errorf("address: %w", err) } ip4Addr := "0.0.0.0" ip6Addr := "::1" if host != "" { ip := net.ParseIP(host) if ip4 := ip.To4(); ip4 != nil { ip4Addr = ip4.String() ip6Addr = "" } else if ip6 := ip.To16(); ip6 != nil { ip6Addr = ip6.String() ip4Addr = "" } } var listenAddrs []string if ip4Addr != "" { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port)) if !o.DisableWS { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port)) } if !o.DisableQUIC { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/udp/%s/quic", ip4Addr, port)) } } if ip6Addr != "" { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port)) if !o.DisableWS { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", ip6Addr, port)) } if !o.DisableQUIC { listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/udp/%s/quic", ip6Addr, port)) } } security := libp2p.DefaultSecurity libp2pPeerstore := pstoremem.NewPeerstore() var natManager basichost.NATManager opts := []libp2p.Option{ libp2p.ListenAddrStrings(listenAddrs...), security, // Use dedicated peerstore instead the global DefaultPeerstore libp2p.Peerstore(libp2pPeerstore), } if o.NATAddr == "" { opts = append(opts, libp2p.NATManager(func(n network.Network) basichost.NATManager { natManager = basichost.NewNATManager(n) return natManager }), ) } if o.PrivateKey != nil { opts = append(opts, libp2p.Identity((*crypto.Secp256k1PrivateKey)(o.PrivateKey)), ) } transports := []libp2p.Option{ libp2p.Transport(tcp.NewTCPTransport), } if !o.DisableWS { transports = append(transports, libp2p.Transport(ws.New)) } if !o.DisableQUIC { transports = append(transports, libp2p.Transport(libp2pquic.NewTransport)) } opts = append(opts, transports...) h, err := libp2p.New(ctx, opts...) if err != nil { return nil, err } // If you want to help other peers to figure out if they are behind // NATs, you can launch the server-side of AutoNAT too (AutoRelay // already runs the client) if _, err = autonat.NewAutoNATService(ctx, h, // Support same non default security and transport options as // original host. append(transports, security)..., ); err != nil { return nil, fmt.Errorf("autonat: %w", err) } var advertisableAddresser handshake.AdvertisableAddressResolver if o.NATAddr == "" { advertisableAddresser = &UpnpAddressResolver{ host: h, } } else { advertisableAddresser, err = newStaticAddressResolver(o.NATAddr) if err != nil { return nil, fmt.Errorf("static nat: %w", err) } } handshakeService, err := handshake.New(signer, advertisableAddresser, overlay, networkID, o.LightNode, o.WelcomeMessage, o.Logger) if err != nil { return nil, fmt.Errorf("handshake service: %w", err) } peerRegistry := newPeerRegistry() s := &Service{ ctx: ctx, host: h, natManager: natManager, handshakeService: handshakeService, libp2pPeerstore: libp2pPeerstore, metrics: newMetrics(), networkID: networkID, peers: peerRegistry, addressbook: o.Addressbook, logger: o.Logger, tracer: o.Tracer, connectionBreaker: breaker.NewBreaker(breaker.Options{}), // use default options } // Construct protocols. id := protocol.ID(p2p.NewSwarmStreamName(handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)) matcher, err := s.protocolSemverMatcher(id) if err != nil { return nil, fmt.Errorf("protocol version match %s: %w", id, err) } // handshake s.host.SetStreamHandlerMatch(id, matcher, func(stream network.Stream) { peerID := stream.Conn().RemotePeer() i, err := s.handshakeService.Handle(NewStream(stream), stream.Conn().RemoteMultiaddr(), peerID) if err != nil { s.logger.Debugf("handshake: handle %s: %v", peerID, err) s.logger.Errorf("unable to handshake with peer %v", peerID) _ = s.disconnect(peerID) return } if exists := s.peers.addIfNotExists(stream.Conn(), i.BzzAddress.Overlay); exists { if err = helpers.FullClose(stream); err != nil { s.logger.Debugf("handshake: could not close stream %s: %v", peerID, err) s.logger.Errorf("unable to handshake with peer %v", peerID) _ = s.disconnect(peerID) } return } if err = helpers.FullClose(stream); err != nil { s.logger.Debugf("handshake: could not close stream %s: %v", peerID, err) s.logger.Errorf("unable to handshake with peer %v", peerID) _ = s.disconnect(peerID) } err = s.addressbook.Put(i.BzzAddress.Overlay, *i.BzzAddress) if err != nil { s.logger.Debugf("handshake: addressbook put error %s: %v", peerID, err) s.logger.Errorf("unable to persist peer %v", peerID) _ = s.disconnect(peerID) return } if s.topologyNotifier != nil { if err := s.topologyNotifier.Connected(ctx, i.BzzAddress.Overlay); err != nil { s.logger.Debugf("topology notifier: %s: %v", peerID, err) } } s.metrics.HandledStreamCount.Inc() s.logger.Infof("successfully connected to peer (inbound) %s", i.BzzAddress.ShortString()) }) h.Network().SetConnHandler(func(_ network.Conn) { s.metrics.HandledConnectionCount.Inc() }) h.Network().Notify(peerRegistry) // update peer registry on network events h.Network().Notify(s.handshakeService) // update handshake service on network events return s, nil } func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) { for _, ss := range p.StreamSpecs { ss := ss id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name)) matcher, err := s.protocolSemverMatcher(id) if err != nil { return fmt.Errorf("protocol version match %s: %w", id, err) } s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) { peerID := streamlibp2p.Conn().RemotePeer() overlay, found := s.peers.overlay(peerID) if !found { // todo: this should never happen _ = s.disconnect(peerID) s.logger.Errorf("overlay address for peer %q not found", peerID) return } stream := newStream(streamlibp2p) // exchange headers if err := handleHeaders(ss.Headler, stream); err != nil { s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: handle headers: %v", p.Name, p.Version, ss.Name, overlay, err) if err := stream.Close(); err != nil { s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: handle headers close stream: %v", p.Name, p.Version, ss.Name, overlay, err) } return } ctx, cancel := context.WithCancel(s.ctx) s.peers.addStream(peerID, streamlibp2p, cancel) defer s.peers.removeStream(peerID, streamlibp2p) // tracing: get span tracing context and add it to the context // silently ignore if the peer is not providing tracing ctx, err := s.tracer.WithContextFromHeaders(ctx, stream.Headers()) if err != nil && !errors.Is(err, tracing.ErrContextNotFound) { s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: get tracing context: %v", p.Name, p.Version, ss.Name, overlay, err) return } logger := tracing.NewLoggerWithTraceID(ctx, s.logger) s.metrics.HandledStreamCount.Inc() if err := ss.Handler(ctx, p2p.Peer{Address: overlay}, stream); err != nil { var e *p2p.DisconnectError if errors.As(err, &e) { _ = s.Disconnect(overlay) } logger.Debugf("error handle protocol %s/%s: stream %s: peer %s: error: %v", p.Name, p.Version, ss.Name, overlay, err) return } }) } return nil } func (s *Service) Addresses() (addreses []ma.Multiaddr, err error) { for _, addr := range s.host.Addrs() { a, err := buildUnderlayAddress(addr, s.host.ID()) if err != nil { return nil, err } addreses = append(addreses, a) } return addreses, nil } func (s *Service) NATManager() basichost.NATManager { return s.natManager } func buildUnderlayAddress(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) { // Build host multiaddress hostAddr, err := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peerID.Pretty())) if err != nil { return nil, err } return addr.Encapsulate(hostAddr), nil } func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) { // Extract the peer ID from the multiaddr. info, err := libp2ppeer.AddrInfoFromP2pAddr(addr) if err != nil { return nil, fmt.Errorf("addr from p2p: %w", err) } if _, found := s.peers.overlay(info.ID); found { return nil, p2p.ErrAlreadyConnected } if err := s.connectionBreaker.Execute(func() error { return s.host.Connect(ctx, *info) }); err != nil { if errors.Is(err, breaker.ErrClosed) { return nil, p2p.NewConnectionBackoffError(err, s.connectionBreaker.ClosedUntil()) } return nil, err } stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName) if err != nil { _ = s.disconnect(info.ID) return nil, fmt.Errorf("connect new stream: %w", err) } i, err := s.handshakeService.Handshake(NewStream(stream), stream.Conn().RemoteMultiaddr(), stream.Conn().RemotePeer()) if err != nil { _ = s.disconnect(info.ID) return nil, fmt.Errorf("handshake: %w", err) } if exists := s.peers.addIfNotExists(stream.Conn(), i.BzzAddress.Overlay); exists { if err := helpers.FullClose(stream); err != nil { _ = s.disconnect(info.ID) return nil, fmt.Errorf("peer exists, full close: %w", err) } return i.BzzAddress, nil } if err := helpers.FullClose(stream); err != nil { _ = s.disconnect(info.ID) return nil, fmt.Errorf("connect full close %w", err) } s.metrics.CreatedConnectionCount.Inc() s.logger.Infof("successfully connected to peer (outbound) %s", i.BzzAddress.ShortString()) return i.BzzAddress, nil } func (s *Service) Disconnect(overlay swarm.Address) error { peerID, found := s.peers.peerID(overlay) if !found { s.peers.disconnecter.Disconnected(overlay) return p2p.ErrPeerNotFound } return s.disconnect(peerID) } func (s *Service) disconnect(peerID libp2ppeer.ID) error { if err := s.host.Network().ClosePeer(peerID); err != nil { return err } s.peers.remove(peerID) return nil } func (s *Service) Peers() []p2p.Peer { return s.peers.peers() } func (s *Service) SetNotifier(n topology.Notifier) { s.topologyNotifier = n s.peers.setDisconnecter(n) } func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) { peerID, found := s.peers.peerID(overlay) if !found { s.peers.disconnecter.Disconnected(overlay) return nil, p2p.ErrPeerNotFound } streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName) if err != nil { return nil, fmt.Errorf("new stream for peerid: %w", err) } stream := newStream(streamlibp2p) // tracing: add span context header if headers == nil { headers = make(p2p.Headers) } if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) { return nil, err } // exchange headers if err := sendHeaders(ctx, headers, stream); err != nil { if err := stream.Close(); err != nil { s.logger.Debugf("send headers %s: close stream %v", peerID, err) } return nil, fmt.Errorf("send headers: %w", err) } return stream, nil } func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) { swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName) st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName)) if err != nil { if err == multistream.ErrNotSupported || err == multistream.ErrIncorrectVersion { return nil, p2p.NewIncompatibleStreamError(err) } return nil, fmt.Errorf("create stream %q to %q: %w", swarmStreamName, peerID, err) } s.metrics.CreatedStreamCount.Inc() return st, nil } func (s *Service) Close() error { if err := s.libp2pPeerstore.Close(); err != nil { return err } return s.host.Close() }
1
10,740
I am not sure this could happen. st should be nil if the error happens. If it does happen, we could also do st.Reset(), since it is an erroureous state so it closes both sides of the stream.
ethersphere-bee
go
@@ -239,6 +239,7 @@ var requirejs, require, define; } function delayedError(e) { + console.log(e.stack); return setTimeout(function() { e.dynaId && trackedErrors[e.dynaId] || (trackedErrors[e.dynaId] = !0, req.onError(e)) }), e
1
var requirejs, require, define; ! function(global, Promise, undef) { function commentReplace(match, singlePrefix) { return singlePrefix || "" } function hasProp(obj, prop) { return hasOwn.call(obj, prop) } function getOwn(obj, prop) { return obj && hasProp(obj, prop) && obj[prop] } function obj() { return Object.create(null) } function eachProp(obj, func) { var prop; for (prop in obj) if (hasProp(obj, prop) && func(obj[prop], prop)) break } function mixin(target, source, force, deepStringMixin) { return source && eachProp(source, function(value, prop) { !force && hasProp(target, prop) || (!deepStringMixin || "object" != typeof value || !value || Array.isArray(value) || "function" == typeof value || value instanceof RegExp ? target[prop] = value : (target[prop] || (target[prop] = {}), mixin(target[prop], value, force, deepStringMixin))) }), target } function getGlobal(value) { if (!value) return value; var g = global; return value.split(".").forEach(function(part) { g = g[part] }), g } function newContext(contextName) { function trimDots(ary) { var i, part, length = ary.length; for (i = 0; i < length; i++) if ("." === (part = ary[i])) ary.splice(i, 1), i -= 1; else if (".." === part) { if (0 === i || 1 === i && ".." === ary[2] || ".." === ary[i - 1]) continue; i > 0 && (ary.splice(i - 1, 2), i -= 2) } } function normalize(name, baseName, applyMap) { var mapValue, nameParts, i, j, nameSegment, lastIndex, foundMap, foundI, foundStarMap, starI, baseParts = baseName && baseName.split("/"), normalizedBaseParts = baseParts, map = config.map, starMap = map && map["*"]; if (name && (name = name.split("/"), lastIndex = name.length - 1, config.nodeIdCompat && jsSuffixRegExp.test(name[lastIndex]) && (name[lastIndex] = name[lastIndex].replace(jsSuffixRegExp, "")), "." === name[0].charAt(0) && baseParts && (normalizedBaseParts = baseParts.slice(0, baseParts.length - 1), name = normalizedBaseParts.concat(name)), trimDots(name), name = name.join("/")), applyMap && map && (baseParts || starMap)) { nameParts = name.split("/"); outerLoop: for (i = nameParts.length; i > 0; i -= 1) { if (nameSegment = nameParts.slice(0, i).join("/"), baseParts) for (j = baseParts.length; j > 0; j -= 1) if ((mapValue = getOwn(map, baseParts.slice(0, j).join("/"))) && (mapValue = getOwn(mapValue, nameSegment))) { foundMap = mapValue, foundI = i; break outerLoop }! foundStarMap && starMap && getOwn(starMap, nameSegment) && (foundStarMap = getOwn(starMap, nameSegment), starI = i) }!foundMap && foundStarMap && (foundMap = foundStarMap, foundI = starI), foundMap && (nameParts.splice(0, foundI, foundMap), name = nameParts.join("/")) } return getOwn(config.pkgs, name) || name } function makeShimExports(value) { function fn() { var ret; return value.init && (ret = value.init.apply(global, arguments)), ret || value.exports && getGlobal(value.exports) } return fn } function takeQueue(anonId) { var i, id, args, shim; for (i = 0; i < queue.length; i += 1) { if ("string" != typeof queue[i][0]) { if (!anonId) break; queue[i].unshift(anonId), anonId = undef } args = queue.shift(), id = args[0], i -= 1, id in defined || id in waiting || (id in deferreds ? main.apply(undef, args) : waiting[id] = args) } anonId && (shim = getOwn(config.shim, anonId) || {}, main(anonId, shim.deps || [], shim.exportsFn)) } function makeRequire(relName, topLevel) { var req = function(deps, callback, errback, alt) { var name, cfg; if (topLevel && takeQueue(), "string" == typeof deps) { if (handlers[deps]) return handlers[deps](relName); if (!((name = makeMap(deps, relName, !0).id) in defined)) throw new Error("Not loaded: " + name); return defined[name] } return deps && !Array.isArray(deps) && (cfg = deps, deps = undef, Array.isArray(callback) && (deps = callback, callback = errback, errback = alt), topLevel) ? req.config(cfg)(deps, callback, errback) : (callback = callback || function() { return slice.call(arguments, 0) }, asyncResolve.then(function() { return takeQueue(), main(undef, deps || [], callback, errback, relName) })) }; return req.isBrowser = "undefined" != typeof document && "undefined" != typeof navigator, req.nameToUrl = function(moduleName, ext, skipExt) { var paths, syms, i, parentModule, url, parentPath, bundleId, pkgMain = getOwn(config.pkgs, moduleName); if (pkgMain && (moduleName = pkgMain), bundleId = getOwn(bundlesMap, moduleName)) return req.nameToUrl(bundleId, ext, skipExt); if (urlRegExp.test(moduleName)) url = moduleName + (ext || ""); else { for (paths = config.paths, syms = moduleName.split("/"), i = syms.length; i > 0; i -= 1) if (parentModule = syms.slice(0, i).join("/"), parentPath = getOwn(paths, parentModule)) { Array.isArray(parentPath) && (parentPath = parentPath[0]), syms.splice(0, i, parentPath); break } url = syms.join("/"), url += ext || (/^data\:|^blob\:|\?/.test(url) || skipExt ? "" : ".js"), url = ("/" === url.charAt(0) || url.match(/^[\w\+\.\-]+:/) ? "" : config.baseUrl) + url } return config.urlArgs && !/^blob\:/.test(url) ? url + config.urlArgs(moduleName, url) : url }, req.toUrl = function(moduleNamePlusExt) { var ext, index = moduleNamePlusExt.lastIndexOf("."), segment = moduleNamePlusExt.split("/")[0], isRelative = "." === segment || ".." === segment; return -1 !== index && (!isRelative || index > 1) && (ext = moduleNamePlusExt.substring(index, moduleNamePlusExt.length), moduleNamePlusExt = moduleNamePlusExt.substring(0, index)), req.nameToUrl(normalize(moduleNamePlusExt, relName), ext, !0) }, req.defined = function(id) { return makeMap(id, relName, !0).id in defined }, req.specified = function(id) { return (id = makeMap(id, relName, !0).id) in defined || id in deferreds }, req } function resolve(name, d, value) { name && (defined[name] = value, requirejs.onResourceLoad && requirejs.onResourceLoad(context, d.map, d.deps)), d.finished = !0, d.resolve(value) } function reject(d, err) { d.finished = !0, d.rejected = !0, d.reject(err) } function makeNormalize(relName) { return function(name) { return normalize(name, relName, !0) } } function defineModule(d) { d.factoryCalled = !0; var ret, name = d.map.id; try { ret = context.execCb(name, d.factory, d.values, defined[name]) } catch (err) { return reject(d, err) } name ? ret === undef && (d.cjsModule ? ret = d.cjsModule.exports : d.usingExports && (ret = defined[name])) : requireDeferreds.splice(requireDeferreds.indexOf(d), 1), resolve(name, d, ret) } function depFinished(val, i) { this.rejected || this.depDefined[i] || (this.depDefined[i] = !0, this.depCount += 1, this.values[i] = val, this.depending || this.depCount !== this.depMax || defineModule(this)) } function makeDefer(name, calculatedMap) { var d = {}; return d.promise = new Promise(function(resolve, reject) { d.resolve = resolve, d.reject = function(err) { name || requireDeferreds.splice(requireDeferreds.indexOf(d), 1), reject(err) } }), d.map = name ? calculatedMap || makeMap(name) : {}, d.depCount = 0, d.depMax = 0, d.values = [], d.depDefined = [], d.depFinished = depFinished, d.map.pr && (d.deps = [makeMap(d.map.pr)]), d } function getDefer(name, calculatedMap) { var d; return name ? (d = name in deferreds && deferreds[name]) || (d = deferreds[name] = makeDefer(name, calculatedMap)) : (d = makeDefer(), requireDeferreds.push(d)), d } function makeErrback(d, name) { return function(err) { d.rejected || (err.dynaId || (err.dynaId = "id" + (errCount += 1), err.requireModules = [name]), reject(d, err)) } } function waitForDep(depMap, relName, d, i) { d.depMax += 1, callDep(depMap, relName).then(function(val) { d.depFinished(val, i) }, makeErrback(d, depMap.id)).catch(makeErrback(d, d.map.id)) } function makeLoad(id) { function load(value) { fromTextCalled || resolve(id, getDefer(id), value) } var fromTextCalled; return load.error = function(err) { reject(getDefer(id), err) }, load.fromText = function(text, textAlt) { var execError, d = getDefer(id), map = makeMap(makeMap(id).n), plainId = map.id; fromTextCalled = !0, d.factory = function(p, val) { return val }, textAlt && (text = textAlt), hasProp(config.config, id) && (config.config[plainId] = config.config[id]); try { req.exec(text) } catch (e) { execError = new Error("fromText eval for " + plainId + " failed: " + e), execError.requireType = "fromtexteval", reject(d, execError) } takeQueue(plainId), d.deps = [map], waitForDep(map, null, d, d.deps.length) }, load } function callPlugin(plugin, map, relName) { plugin.load(map.n, makeRequire(relName), makeLoad(map.id), config) } function splitPrefix(name) { var prefix, index = name ? name.indexOf("!") : -1; return index > -1 && (prefix = name.substring(0, index), name = name.substring(index + 1, name.length)), [prefix, name] } function breakCycle(d, traced, processed) { var id = d.map.id; traced[id] = !0, !d.finished && d.deps && d.deps.forEach(function(depMap) { var depId = depMap.id, dep = !hasProp(handlers, depId) && getDefer(depId, depMap); !dep || dep.finished || processed[depId] || (hasProp(traced, depId) ? d.deps.forEach(function(depMap, i) { depMap.id === depId && d.depFinished(defined[depId], i) }) : breakCycle(dep, traced, processed)) }), processed[id] = !0 } function check(d) { var err, mid, dfd, notFinished = [], waitInterval = 1e3 * config.waitSeconds, expired = waitInterval && startTime + waitInterval < (new Date).getTime(); if (0 === loadCount && (d ? d.finished || breakCycle(d, {}, {}) : requireDeferreds.length && requireDeferreds.forEach(function(d) { breakCycle(d, {}, {}) })), expired) { for (mid in deferreds) dfd = deferreds[mid], dfd.finished || notFinished.push(dfd.map.id); err = new Error("Timeout for modules: " + notFinished), err.requireModules = notFinished, err.requireType = "timeout", notFinished.forEach(function(id) { reject(getDefer(id), err) }) } else(loadCount || requireDeferreds.length) && (checkingLater || (checkingLater = !0, setTimeout(function() { checkingLater = !1, check() }, 70))) } function delayedError(e) { return setTimeout(function() { e.dynaId && trackedErrors[e.dynaId] || (trackedErrors[e.dynaId] = !0, req.onError(e)) }), e } var req, main, makeMap, callDep, handlers, checkingLater, load, context, defined = obj(), waiting = obj(), config = { waitSeconds: 7, baseUrl: "./", paths: {}, bundles: {}, pkgs: {}, shim: {}, config: {} }, mapCache = obj(), requireDeferreds = [], deferreds = obj(), calledDefine = obj(), calledPlugin = obj(), loadCount = 0, startTime = (new Date).getTime(), errCount = 0, trackedErrors = obj(), urlFetched = obj(), bundlesMap = obj(), asyncResolve = Promise.resolve(); return load = "function" == typeof importScripts ? function(map) { var url = map.url; urlFetched[url] || (urlFetched[url] = !0, getDefer(map.id), importScripts(url), takeQueue(map.id)) } : function(map) { var script, id = map.id, url = map.url; urlFetched[url] || (urlFetched[url] = !0, script = document.createElement("script"), script.setAttribute("data-requiremodule", id), script.type = config.scriptType || "text/javascript", script.charset = "utf-8", script.async = !0, loadCount += 1, script.addEventListener("load", function() { loadCount -= 1, takeQueue(id) }, !1), script.addEventListener("error", function() { loadCount -= 1; var err, pathConfig = getOwn(config.paths, id); if (pathConfig && Array.isArray(pathConfig) && pathConfig.length > 1) { script.parentNode.removeChild(script), pathConfig.shift(); var d = getDefer(id); d.map = makeMap(id), d.map.url = req.nameToUrl(id), load(d.map) } else err = new Error("Load failed: " + id + ": " + script.src), err.requireModules = [id], err.requireType = "scripterror", reject(getDefer(id), err) }, !1), script.src = url, 10 === document.documentMode ? asap.then(function() { document.head.appendChild(script) }) : document.head.appendChild(script)) }, callDep = function(map, relName) { var args, bundleId, name = map.id, shim = config.shim[name]; if (name in waiting) args = waiting[name], delete waiting[name], main.apply(undef, args); else if (!(name in deferreds)) if (map.pr) { if (!(bundleId = getOwn(bundlesMap, name))) return callDep(makeMap(map.pr)).then(function(plugin) { var newMap = map.prn ? map : makeMap(name, relName, !0), newId = newMap.id, shim = getOwn(config.shim, newId); return newId in calledPlugin || (calledPlugin[newId] = !0, shim && shim.deps ? req(shim.deps, function() { callPlugin(plugin, newMap, relName) }) : callPlugin(plugin, newMap, relName)), getDefer(newId).promise }); map.url = req.nameToUrl(bundleId), load(map) } else shim && shim.deps ? req(shim.deps, function() { load(map) }) : load(map); return getDefer(name).promise }, makeMap = function(name, relName, applyMap) { if ("string" != typeof name) return name; var plugin, url, parts, prefix, result, prefixNormalized, cacheKey = name + " & " + (relName || "") + " & " + !!applyMap; return parts = splitPrefix(name), prefix = parts[0], name = parts[1], !prefix && cacheKey in mapCache ? mapCache[cacheKey] : (prefix && (prefix = normalize(prefix, relName, applyMap), plugin = prefix in defined && defined[prefix]), prefix ? plugin && plugin.normalize ? (name = plugin.normalize(name, makeNormalize(relName)), prefixNormalized = !0) : name = -1 === name.indexOf("!") ? normalize(name, relName, applyMap) : name : (name = normalize(name, relName, applyMap), parts = splitPrefix(name), prefix = parts[0], name = parts[1], url = req.nameToUrl(name)), result = { id: prefix ? prefix + "!" + name : name, n: name, pr: prefix, url: url, prn: prefix && prefixNormalized }, prefix || (mapCache[cacheKey] = result), result) }, handlers = { require: function(name) { return makeRequire(name) }, exports: function(name) { var e = defined[name]; return void 0 !== e ? e : defined[name] = {} }, module: function(name) { return { id: name, uri: "", exports: handlers.exports(name), config: function() { return getOwn(config.config, name) || {} } } } }, main = function(name, deps, factory, errback, relName) { if (name) { if (name in calledDefine) return; calledDefine[name] = !0 } var d = getDefer(name); return deps && !Array.isArray(deps) && (factory = deps, deps = []), deps = deps ? slice.call(deps, 0) : null, errback || (hasProp(config, "defaultErrback") ? config.defaultErrback && (errback = config.defaultErrback) : errback = delayedError), errback && d.promise.catch(errback), relName = relName || name, "function" == typeof factory ? (!deps.length && factory.length && (factory.toString().replace(commentRegExp, commentReplace).replace(cjsRequireRegExp, function(match, dep) { deps.push(dep) }), deps = (1 === factory.length ? ["require"] : ["require", "exports", "module"]).concat(deps)), d.factory = factory, d.deps = deps, d.depending = !0, deps.forEach(function(depName, i) { var depMap; deps[i] = depMap = makeMap(depName, relName, !0), depName = depMap.id, "require" === depName ? d.values[i] = handlers.require(name) : "exports" === depName ? (d.values[i] = handlers.exports(name), d.usingExports = !0) : "module" === depName ? d.values[i] = d.cjsModule = handlers.module(name) : void 0 === depName ? d.values[i] = void 0 : waitForDep(depMap, relName, d, i) }), d.depending = !1, d.depCount === d.depMax && defineModule(d)) : name && resolve(name, d, factory), startTime = (new Date).getTime(), name || check(d), d.promise }, req = makeRequire(null, !0), req.config = function(cfg) { if (cfg.context && cfg.context !== contextName) { var existingContext = getOwn(contexts, cfg.context); return existingContext ? existingContext.req.config(cfg) : newContext(cfg.context).config(cfg) } if (mapCache = obj(), cfg.baseUrl && "/" !== cfg.baseUrl.charAt(cfg.baseUrl.length - 1) && (cfg.baseUrl += "/"), "string" == typeof cfg.urlArgs) { var urlArgs = cfg.urlArgs; cfg.urlArgs = function(id, url) { return (-1 === url.indexOf("?") ? "?" : "&") + urlArgs } } var shim = config.shim, objs = { paths: !0, bundles: !0, config: !0, map: !0 }; return eachProp(cfg, function(value, prop) { objs[prop] ? (config[prop] || (config[prop] = {}), mixin(config[prop], value, !0, !0)) : config[prop] = value }), cfg.bundles && eachProp(cfg.bundles, function(value, prop) { value.forEach(function(v) { v !== prop && (bundlesMap[v] = prop) }) }), cfg.shim && (eachProp(cfg.shim, function(value, id) { Array.isArray(value) && (value = { deps: value }), !value.exports && !value.init || value.exportsFn || (value.exportsFn = makeShimExports(value)), shim[id] = value }), config.shim = shim), cfg.packages && cfg.packages.forEach(function(pkgObj) { var location, name; pkgObj = "string" == typeof pkgObj ? { name: pkgObj } : pkgObj, name = pkgObj.name, location = pkgObj.location, location && (config.paths[name] = pkgObj.location), config.pkgs[name] = pkgObj.name + "/" + (pkgObj.main || "main").replace(currDirRegExp, "").replace(jsSuffixRegExp, "") }), (cfg.deps || cfg.callback) && req(cfg.deps, cfg.callback), req }, req.onError = function(err) { throw err }, context = { id: contextName, defined: defined, waiting: waiting, config: config, deferreds: deferreds, req: req, execCb: function(name, callback, args, exports) { return callback.apply(exports, args) } }, contexts[contextName] = context, req } if (!Promise) throw new Error("No Promise implementation available"); var topReq, dataMain, src, subPath, bootstrapConfig = requirejs || require, hasOwn = Object.prototype.hasOwnProperty, contexts = {}, queue = [], currDirRegExp = /^\.\//, urlRegExp = /^\/|\:|\?|\.js$/, commentRegExp = /\/\*[\s\S]*?\*\/|([^:"'=]|^)\/\/.*$/gm, cjsRequireRegExp = /[^.]\s*require\s*\(\s*["']([^'"\s]+)["']\s*\)/g, jsSuffixRegExp = /\.js$/, slice = Array.prototype.slice; if ("function" != typeof requirejs) { var asap = Promise.resolve(void 0); requirejs = topReq = newContext("_"), "function" != typeof require && (require = topReq), topReq.exec = function(text) { return eval(text) }, topReq.contexts = contexts, define = function() { queue.push(slice.call(arguments, 0)) }, define.amd = { jQuery: !0 }, bootstrapConfig && topReq.config(bootstrapConfig), topReq.isBrowser && !contexts._.config.skipDataMain && (dataMain = document.querySelectorAll("script[data-main]")[0], (dataMain = dataMain && dataMain.getAttribute("data-main")) && (dataMain = dataMain.replace(jsSuffixRegExp, ""), bootstrapConfig && bootstrapConfig.baseUrl || -1 !== dataMain.indexOf("!") || (src = dataMain.split("/"), dataMain = src.pop(), subPath = src.length ? src.join("/") + "/" : "./", topReq.config({ baseUrl: subPath })), topReq([dataMain]))) } }(this, "undefined" != typeof Promise ? Promise : void 0);
1
11,303
I'm guessing this was just added for debugging? Should probably remove it so we aren't modifying 3rd party libs.
jellyfin-jellyfin-web
js
@@ -15,10 +15,17 @@ module Bolt @object_open = true end + def print_event(node, event) + case event[:type] + when :node_result + print_result(node, event[:result]) + end + end + def print_result(node, result) item = { name: node.uri, - status: result.is_a?(Bolt::ErrorResult) ? 'failure' : 'success', + status: result.success? ? 'success' : 'failure', result: result.to_result }
1
module Bolt class Outputter class JSON < Bolt::Outputter def initialize(stream = $stdout) @items_open = false @object_open = false @preceding_item = false super(stream) end def print_head @stream.puts '{ "items": [' @preceding_item = false @items_open = true @object_open = true end def print_result(node, result) item = { name: node.uri, status: result.is_a?(Bolt::ErrorResult) ? 'failure' : 'success', result: result.to_result } @stream.puts ',' if @preceding_item @stream.puts item.to_json @preceding_item = true end def print_summary(results, elapsed_time) @stream.puts "],\n" @preceding_item = false @items_open = false @stream.puts format('"node_count": %d, "elapsed_time": %d }', results.size, elapsed_time) end def print_plan(result) @stream.puts result.to_json end def fatal_error(e) @stream.puts "],\n" if @items_open @stream.puts '"_error": ' if @object_open @stream.puts e.to_json @stream.puts '}' if @object_open end end end end
1
7,014
This seems reversed... if `success?` is true, wouldn't we use `success`?
puppetlabs-bolt
rb
@@ -0,0 +1,12 @@ +namespace Datadog.Trace +{ + internal enum LogEventLevel + { + Verbose, + Debug, + Information, + Warning, + Error, + Fatal + } +}
1
1
16,494
This seemed like the easiest way to allow log level checks, it will require an update to the vendors tool, to ignore that file on update.
DataDog-dd-trace-dotnet
.cs
@@ -21,6 +21,11 @@ <%= richtext_area :diary_comment, :body, :cols => 80, :rows => 15 %> <%= submit_tag t('diary_entry.view.save_button') %> <% end %> + <% if @user and @entry.subscribers.exists?(@user.id) %> + <div style='position:relative; top: -30px; left: 130px'><%= link_to t('javascripts.changesets.show.unsubscribe'), diary_entry_unsubscribe_path(:display_name => @entry.user.display_name, :id => @entry.id), :method => :post, :class => :button %></div> + <% elsif @user %> + <div style='position:relative; top: -30px; left: 130px'><%= link_to t('javascripts.changesets.show.subscribe'), diary_entry_subscribe_path(:display_name => @entry.user.display_name, :id => @entry.id), :method => :post, :class => :button %></div> + <% end %> <% end %> <%= if_not_logged_in(:div) do %>
1
<% content_for :heading do %> <div id="userinformation" > <%= user_image @entry.user %> <h2><%= link_to t('diary_entry.view.user_title', :user => h(@entry.user.display_name)), :action => :list %></h2> <p><%= rss_link_to :action => :rss, :display_name => @entry.user.display_name %></p> </div> <% end %> <%= render :partial => 'diary_entry', :object => @entry %> <a id="comments"></a> <div class='comments'> <%= render :partial => 'diary_comment', :collection => @entry.visible_comments %> </div> <%= if_logged_in(:div) do %> <h3 id="newcomment"><%= t 'diary_entry.view.leave_a_comment' %></h3> <%= error_messages_for 'diary_comment' %> <%= form_for :diary_comment, :url => { :action => 'comment' } do |f| %> <%= richtext_area :diary_comment, :body, :cols => 80, :rows => 15 %> <%= submit_tag t('diary_entry.view.save_button') %> <% end %> <% end %> <%= if_not_logged_in(:div) do %> <h3 id="newcomment"><%= raw t("diary_entry.view.login_to_leave_a_comment", :login_link => link_to(t("diary_entry.view.login"), :controller => 'user', :action => 'login', :referer => request.fullpath)) %></h3> <% end %> <% content_for :auto_discovery_link_tag do -%> <%= auto_discovery_link_tag :rss, :action => :rss, :display_name => @entry.user.display_name %> <% end -%>
1
10,180
Should we disallow entry authors to unsubscribe to their entries?
openstreetmap-openstreetmap-website
rb
@@ -1,7 +1,7 @@ # -*- coding: UTF-8 -*- #core.py #A part of NonVisual Desktop Access (NVDA) -#Copyright (C) 2006-2016 NV Access Limited, Aleksey Sadovoy, Christopher Toth, Joseph Lee, Peter Vágner +#Copyright (C) 2006-2016 NV Access Limited, Aleksey Sadovoy, Christopher Toth, Joseph Lee, Peter Vágner, Derek Riemer #This file is covered by the GNU General Public License. #See the file COPYING for more details.
1
# -*- coding: UTF-8 -*- #core.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2006-2016 NV Access Limited, Aleksey Sadovoy, Christopher Toth, Joseph Lee, Peter Vágner #This file is covered by the GNU General Public License. #See the file COPYING for more details. """NVDA core""" # Do this first to initialise comtypes.client.gen_dir and the comtypes.gen search path. import comtypes.client # Append our comInterfaces directory to the comtypes.gen search path. import comtypes.gen import comInterfaces comtypes.gen.__path__.append(comInterfaces.__path__[0]) #Apply several monky patches to comtypes import comtypesMonkeyPatches import sys import winVersion import thread import nvwave import os import time import ctypes import logHandler import globalVars from logHandler import log import addonHandler PUMP_MAX_DELAY = 10 #: The thread identifier of the main thread. mainThreadId = thread.get_ident() _pump = None _isPumpPending = False def doStartupDialogs(): import config import gui # Translators: The title of the dialog to tell users that there are erros in the configuration file. if config.conf.baseConfigError: import wx gui.messageBox( # Translators: A message informing the user that there are errors in the configuration file. _("Your configuration file contains errors. " "Your configuration has been reset to factory defaults.\n" "More details about the errors can be found in the log file."), # Translators: The title of the dialog to tell users that there are errors in the configuration file. _("Configuration File Error"), wx.OK | wx.ICON_EXCLAMATION) if config.conf["general"]["showWelcomeDialogAtStartup"]: gui.WelcomeDialog.run() if config.conf["speechViewer"]["showSpeechViewerAtStartup"]: gui.mainFrame.onToggleSpeechViewerCommand(evt=None) import inputCore if inputCore.manager.userGestureMap.lastUpdateContainedError: import wx gui.messageBox(_("Your gesture map file contains errors.\n" "More details about the errors can be found in the log file."), _("gesture map File Error"), wx.OK|wx.ICON_EXCLAMATION) def restart(disableAddons=False, debugLogging=False): """Restarts NVDA by starting a new copy with -r.""" if globalVars.appArgs.launcher: import wx globalVars.exitCode=3 wx.GetApp().ExitMainLoop() return import subprocess import winUser import shellapi options=[] if "-r" not in sys.argv: options.append("-r") try: sys.argv.remove('--disable-addons') except ValueError: pass try: sys.argv.remove('--debug-logging') except ValueError: pass if disableAddons: options.append('--disable-addons') if debugLogging: options.append('--debug-logging') try: sys.argv.remove("--ease-of-access") except ValueError: pass shellapi.ShellExecute(None, None, sys.executable.decode("mbcs"), subprocess.list2cmdline(sys.argv + options).decode("mbcs"), None, # #4475: ensure that the first window of the new process is not hidden by providing SW_SHOWNORMAL winUser.SW_SHOWNORMAL) def resetConfiguration(factoryDefaults=False): """Loads the configuration, installs the correct language support and initialises audio so that it will use the configured synth and speech settings. """ import config import braille import brailleInput import speech import languageHandler import inputCore log.debug("Terminating braille") braille.terminate() log.debug("Terminating brailleInput") brailleInput.terminate() log.debug("terminating speech") speech.terminate() log.debug("terminating addonHandler") addonHandler.terminate() log.debug("Reloading config") config.conf.reset(factoryDefaults=factoryDefaults) logHandler.setLogLevelFromConfig() #Language lang = config.conf["general"]["language"] log.debug("setting language to %s"%lang) languageHandler.setLanguage(lang) # Addons addonHandler.initialize() #Speech log.debug("initializing speech") speech.initialize() #braille log.debug("Initializing brailleInput") brailleInput.initialize() log.debug("Initializing braille") braille.initialize() log.debug("Reloading user and locale input gesture maps") inputCore.manager.loadUserGestureMap() inputCore.manager.loadLocaleGestureMap() import audioDucking if audioDucking.isAudioDuckingSupported(): audioDucking.handleConfigProfileSwitch() log.info("Reverted to saved configuration") def _setInitialFocus(): """Sets the initial focus if no focus event was received at startup. """ import eventHandler import api if eventHandler.lastQueuedFocusObject: # The focus has already been set or a focus event is pending. return try: focus = api.getDesktopObject().objectWithFocus() if focus: eventHandler.queueEvent('gainFocus', focus) except: log.exception("Error retrieving initial focus") def main(): """NVDA's core main loop. This initializes all modules such as audio, IAccessible, keyboard, mouse, and GUI. Then it initialises the wx application object and sets up the core pump, which checks the queues and executes functions when requested. Finally, it starts the wx main loop. """ log.debug("Core starting") try: # Windows >= Vista ctypes.windll.user32.SetProcessDPIAware() except AttributeError: pass import config if not globalVars.appArgs.configPath: globalVars.appArgs.configPath=config.getUserDefaultConfigPath(useInstalledPathIfExists=globalVars.appArgs.launcher) #Initialize the config path (make sure it exists) config.initConfigPath() log.info("Config dir: %s"%os.path.abspath(globalVars.appArgs.configPath)) log.debug("loading config") import config config.initialize() if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]: try: nvwave.playWaveFile("waves\\start.wav") except: pass logHandler.setLogLevelFromConfig() try: lang = config.conf["general"]["language"] import languageHandler log.debug("setting language to %s"%lang) languageHandler.setLanguage(lang) except: log.warning("Could not set language to %s"%lang) import versionInfo log.info("NVDA version %s" % versionInfo.version) log.info("Using Windows version %s" % winVersion.winVersionText) log.info("Using Python version %s"%sys.version) log.info("Using comtypes version %s"%comtypes.__version__) # Set a reasonable timeout for any socket connections NVDA makes. import socket socket.setdefaulttimeout(10) log.debug("Initializing add-ons system") addonHandler.initialize() if globalVars.appArgs.disableAddons: log.info("Add-ons are disabled. Restart NVDA to enable them.") import appModuleHandler log.debug("Initializing appModule Handler") appModuleHandler.initialize() import NVDAHelper log.debug("Initializing NVDAHelper") NVDAHelper.initialize() import speechDictHandler log.debug("Speech Dictionary processing") speechDictHandler.initialize() import speech log.debug("Initializing speech") speech.initialize() if not globalVars.appArgs.minimal and (time.time()-globalVars.startTime)>5: log.debugWarning("Slow starting core (%.2f sec)" % (time.time()-globalVars.startTime)) # Translators: This is spoken when NVDA is starting. speech.speakMessage(_("Loading NVDA. Please wait...")) import wx log.info("Using wx version %s"%wx.version()) class App(wx.App): def OnAssert(self,file,line,cond,msg): message="{file}, line {line}:\nassert {cond}: {msg}".format(file=file,line=line,cond=cond,msg=msg) log.debugWarning(message,codepath="WX Widgets",stack_info=True) app = App(redirect=False) # We do support QueryEndSession events, but we don't want to do anything for them. app.Bind(wx.EVT_QUERY_END_SESSION, lambda evt: None) def onEndSession(evt): # NVDA will be terminated as soon as this function returns, so save configuration if appropriate. config.saveOnExit() speech.cancelSpeech() if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]: try: nvwave.playWaveFile("waves\\exit.wav",async=False) except: pass log.info("Windows session ending") app.Bind(wx.EVT_END_SESSION, onEndSession) log.debug("Initializing braille input") import brailleInput brailleInput.initialize() import braille log.debug("Initializing braille") braille.initialize() import displayModel log.debug("Initializing displayModel") displayModel.initialize() log.debug("Initializing GUI") import gui gui.initialize() import audioDucking if audioDucking.isAudioDuckingSupported(): # the GUI mainloop must be running for this to work so delay it wx.CallAfter(audioDucking.initialize) # #3763: In wxPython 3, the class name of frame windows changed from wxWindowClassNR to wxWindowNR. # NVDA uses the main frame to check for and quit another instance of NVDA. # To remain compatible with older versions of NVDA, create our own wxWindowClassNR. # We don't need to do anything else because wx handles WM_QUIT for all windows. import windowUtils class MessageWindow(windowUtils.CustomWindow): className = u"wxWindowClassNR" messageWindow = MessageWindow(unicode(versionInfo.name)) # initialize wxpython localization support locale = wx.Locale() lang=languageHandler.getLanguage() wxLang=locale.FindLanguageInfo(lang) if not wxLang and '_' in lang: wxLang=locale.FindLanguageInfo(lang.split('_')[0]) if hasattr(sys,'frozen'): locale.AddCatalogLookupPathPrefix(os.path.join(os.getcwdu(),"locale")) if wxLang: try: locale.Init(wxLang.Language) except: log.error("Failed to initialize wx locale",exc_info=True) else: log.debugWarning("wx does not support language %s" % lang) import api import winUser import NVDAObjects.window desktopObject=NVDAObjects.window.Window(windowHandle=winUser.getDesktopWindow()) api.setDesktopObject(desktopObject) api.setFocusObject(desktopObject) api.setNavigatorObject(desktopObject) api.setMouseObject(desktopObject) import JABHandler log.debug("initializing Java Access Bridge support") try: JABHandler.initialize() except NotImplementedError: log.warning("Java Access Bridge not available") except: log.error("Error initializing Java Access Bridge support", exc_info=True) import winConsoleHandler log.debug("Initializing winConsole support") winConsoleHandler.initialize() import UIAHandler log.debug("Initializing UIA support") try: UIAHandler.initialize() except NotImplementedError: log.warning("UIA not available") except: log.error("Error initializing UIA support", exc_info=True) import IAccessibleHandler log.debug("Initializing IAccessible support") IAccessibleHandler.initialize() log.debug("Initializing input core") import inputCore inputCore.initialize() import keyboardHandler log.debug("Initializing keyboard handler") keyboardHandler.initialize() import mouseHandler log.debug("initializing mouse handler") mouseHandler.initialize() import touchHandler log.debug("Initializing touchHandler") try: touchHandler.initialize() except NotImplementedError: pass import globalPluginHandler log.debug("Initializing global plugin handler") globalPluginHandler.initialize() if globalVars.appArgs.install or globalVars.appArgs.installSilent: import wx import gui.installerGui wx.CallAfter(gui.installerGui.doSilentInstall,startAfterInstall=not globalVars.appArgs.installSilent) elif not globalVars.appArgs.minimal: try: # Translators: This is shown on a braille display (if one is connected) when NVDA starts. braille.handler.message(_("NVDA started")) except: log.error("", exc_info=True) if globalVars.appArgs.launcher: gui.LauncherDialog.run() # LauncherDialog will call doStartupDialogs() afterwards if required. else: wx.CallAfter(doStartupDialogs) import queueHandler # Queue the handling of initial focus, # as API handlers might need to be pumped to get the first focus event. queueHandler.queueFunction(queueHandler.eventQueue, _setInitialFocus) import watchdog import baseObject # Doing this here is a bit ugly, but we don't want these modules imported # at module level, including wx. log.debug("Initializing core pump") class CorePump(wx.Timer): "Checks the queues and executes functions." def Notify(self): global _isPumpPending _isPumpPending = False watchdog.alive() try: if touchHandler.handler: touchHandler.handler.pump() JABHandler.pumpAll() IAccessibleHandler.pumpAll() queueHandler.pumpAll() mouseHandler.pumpAll() braille.pumpAll() except: log.exception("errors in this core pump cycle") baseObject.AutoPropertyObject.invalidateCaches() watchdog.asleep() if _isPumpPending and not _pump.IsRunning(): # #3803: A pump was requested, but the timer was ignored by a modal loop # because timers aren't re-entrant. # Therefore, schedule another pump. _pump.Start(PUMP_MAX_DELAY, True) global _pump _pump = CorePump() requestPump() log.debug("Initializing watchdog") watchdog.initialize() try: import updateCheck except RuntimeError: updateCheck=None log.debug("Update checking not supported") else: log.debug("initializing updateCheck") updateCheck.initialize() log.info("NVDA initialized") log.debug("entering wx application main loop") app.MainLoop() log.info("Exiting") if updateCheck: _terminate(updateCheck) _terminate(watchdog) _terminate(globalPluginHandler, name="global plugin handler") _terminate(gui) config.saveOnExit() try: if globalVars.focusObject and hasattr(globalVars.focusObject,"event_loseFocus"): log.debug("calling lose focus on object with focus") globalVars.focusObject.event_loseFocus() except: log.exception("Lose focus error") try: speech.cancelSpeech() except: pass import treeInterceptorHandler _terminate(treeInterceptorHandler) _terminate(IAccessibleHandler, name="IAccessible support") _terminate(UIAHandler, name="UIA support") _terminate(winConsoleHandler, name="winConsole support") _terminate(JABHandler, name="Java Access Bridge support") _terminate(appModuleHandler, name="app module handler") _terminate(NVDAHelper) _terminate(touchHandler) _terminate(keyboardHandler, name="keyboard handler") _terminate(mouseHandler) _terminate(inputCore) _terminate(brailleInput) _terminate(braille) _terminate(speech) _terminate(addonHandler) if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]: try: nvwave.playWaveFile("waves\\exit.wav",async=False) except: pass # #5189: Destroy the message window as late as possible # so new instances of NVDA can find this one even if it freezes during exit. messageWindow.destroy() log.debug("core done") def _terminate(module, name=None): if name is None: name = module.__name__ log.debug("Terminating %s" % name) try: module.terminate() except: log.exception("Error terminating %s" % name) def requestPump(): """Request a core pump. This will perform any queued activity. It is delayed slightly so that queues can implement rate limiting, filter extraneous events, etc. """ global _isPumpPending if not _pump or _isPumpPending: return _isPumpPending = True if thread.get_ident() == mainThreadId: _pump.Start(PUMP_MAX_DELAY, True) return # This isn't the main thread. wx timers cannot be run outside the main thread. # Therefore, Have wx start it in the main thread with a CallAfter. import wx wx.CallAfter(_pump.Start,PUMP_MAX_DELAY, True) def callLater(delay, callable, *args, **kwargs): """Call a callable once after the specified number of milliseconds. As the call is executed within NVDA's core queue, it is possible that execution will take place slightly after the requested time. This function should never be used to execute code that brings up a modal UI as it will cause NVDA's core to block. This function can be safely called from any thread. """ import wx if thread.get_ident() == mainThreadId: return wx.CallLater(delay, _callLaterExec, callable, args, kwargs) else: return wx.CallAfter(wx.CallLater,delay, _callLaterExec, callable, args, kwargs) def _callLaterExec(callable, args, kwargs): import queueHandler queueHandler.queueFunction(queueHandler.eventQueue,callable,*args, **kwargs)
1
18,529
2016 should be 2017.
nvaccess-nvda
py
@@ -155,7 +155,7 @@ public class MicroserviceRegisterTask extends AbstractRegisterTask { String curSchemaSumary = existSchema.getSummary(); String schemaSummary = Hashing.sha256().newHasher().putString(content, Charsets.UTF_8).hash().toString(); if (!schemaSummary.equals(curSchemaSumary)) { - if (microservice.getInstance().getEnvironment().equalsIgnoreCase("development")) { + if (curSchemaSumary == null || microservice.getInstance().getEnvironment().equalsIgnoreCase("development")) { LOGGER.info( "schemaId [{}]'s content changes and the current enviroment is development, so re-register it!", schemaId);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.serviceregistry.task; import java.util.HashSet; import java.util.List; import java.util.Map.Entry; import java.util.Set; import org.apache.servicecomb.serviceregistry.api.registry.Microservice; import org.apache.servicecomb.serviceregistry.api.response.GetSchemaResponse; import org.apache.servicecomb.serviceregistry.client.ServiceRegistryClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.util.StringUtils; import com.google.common.base.Charsets; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import com.google.common.hash.Hashing; public class MicroserviceRegisterTask extends AbstractRegisterTask { private static final Logger LOGGER = LoggerFactory.getLogger(MicroserviceRegisterTask.class); private boolean schemaIdSetMatch; public MicroserviceRegisterTask(EventBus eventBus, ServiceRegistryClient srClient, Microservice microservice) { super(eventBus, srClient, microservice); this.taskStatus = TaskStatus.READY; } public boolean isSchemaIdSetMatch() { return schemaIdSetMatch; } @Subscribe public void onMicroserviceInstanceHeartbeatTask(MicroserviceInstanceHeartbeatTask task) { if (task.getHeartbeatResult() != HeartbeatResult.SUCCESS && isSameMicroservice(task.getMicroservice())) { LOGGER.info("read MicroserviceInstanceHeartbeatTask status is {}", task.taskStatus); this.taskStatus = TaskStatus.READY; this.registered = false; } } @Override protected boolean doRegister() { LOGGER.info("running microservice register task."); String serviceId = srClient.getMicroserviceId(microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), microservice.getEnvironment()); if (!StringUtils.isEmpty(serviceId)) { // 已经注册过了,不需要重新注册 microservice.setServiceId(serviceId); LOGGER.info( "Microservice exists in service center, no need to register. id={} appId={}, name={}, version={}", serviceId, microservice.getAppId(), microservice.getServiceName(), microservice.getVersion()); if (!checkSchemaIdSet()) { return false; } } else { serviceId = srClient.registerMicroservice(microservice); if (StringUtils.isEmpty(serviceId)) { LOGGER.error( "Registry microservice failed. appId={}, name={}, version={}", microservice.getAppId(), microservice.getServiceName(), microservice.getVersion()); return false; } schemaIdSetMatch = true; // 重新注册服务场景下,instanceId不应该缓存 microservice.getInstance().setInstanceId(null); LOGGER.info( "Registry Microservice successfully. id={} appId={}, name={}, version={}, schemaIds={}", serviceId, microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), microservice.getSchemas()); } microservice.setServiceId(serviceId); microservice.getInstance().setServiceId(microservice.getServiceId()); return registerSchemas(); } private boolean checkSchemaIdSet() { Microservice existMicroservice = srClient.getMicroservice(microservice.getServiceId()); if (existMicroservice == null) { LOGGER.error("Error to get microservice from service center when check schema set"); return false; } Set<String> existSchemas = new HashSet<>(existMicroservice.getSchemas()); Set<String> localSchemas = new HashSet<>(microservice.getSchemas()); schemaIdSetMatch = existSchemas.equals(localSchemas); if (!schemaIdSetMatch) { LOGGER.error( "SchemaIds is different between local and service center. Please change microservice version. " + "id={} appId={}, name={}, version={}, local schemaIds={}, service center schemaIds={}", microservice.getServiceId(), microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), localSchemas, existSchemas); return true; } LOGGER.info( "SchemaIds is equals to service center. id={} appId={}, name={}, version={}, schemaIds={}", microservice.getServiceId(), microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), localSchemas); return true; } private boolean registerSchemas() { List<GetSchemaResponse> existSchemas = srClient.getSchemas(microservice.getServiceId()); for (Entry<String, String> entry : microservice.getSchemaMap().entrySet()) { String schemaId = entry.getKey(); String content = entry.getValue(); GetSchemaResponse existSchema = extractSchema(schemaId, existSchemas); boolean exists = existSchema != null; LOGGER.info("schemaId [{}] exists {}", schemaId, exists); if (!exists) { if (!srClient.registerSchema(microservice.getServiceId(), schemaId, content)) { return false; } } else { String curSchemaSumary = existSchema.getSummary(); String schemaSummary = Hashing.sha256().newHasher().putString(content, Charsets.UTF_8).hash().toString(); if (!schemaSummary.equals(curSchemaSumary)) { if (microservice.getInstance().getEnvironment().equalsIgnoreCase("development")) { LOGGER.info( "schemaId [{}]'s content changes and the current enviroment is development, so re-register it!", schemaId); if (!srClient.registerSchema(microservice.getServiceId(), schemaId, content)) { return false; } } else { throw new IllegalStateException("schemaId [" + schemaId + "] exists in service center, but the content does not match the local content that means there are interface change " + "and you need to increment microservice version before deploying. " + "Or you can configure instance_description.environment=development to work in development enviroment and ignore this error"); } } } } return true; } private GetSchemaResponse extractSchema(String schemaId, List<GetSchemaResponse> schemas) { if (schemas == null || schemas.isEmpty()) { return null; } GetSchemaResponse schema = null; for (GetSchemaResponse tempSchema : schemas) { if (tempSchema.getSchemaId().equals(schemaId)) { schema = tempSchema; break; } } return schema; } }
1
9,551
boolean exists = (existSchema != null) && (existSchema .getSummary() != null);
apache-servicecomb-java-chassis
java