patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -453,7 +453,7 @@ func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
blocksFetched := uint64(1) // we already got the first block in the previous step.
var blk *bookkeeping.Block
var client FetcherClient
- for attemptsCount := uint64(1); blocksFetched <= lookback; attemptsCount++ {
+ for attemptsCount := uint64(1); blocksFetched <= lookback; {
if err := cs.ctx.Err(); err != nil {
return cs.stopOrAbort()
} | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package catchup
import (
"context"
"fmt"
"sync"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
)
// CatchpointCatchupNodeServices defines the extenal node support needed
// for the catchpoint service to switch the node between "regular" operational mode and catchup mode.
type CatchpointCatchupNodeServices interface {
SetCatchpointCatchupMode(bool) (newContextCh <-chan context.Context)
}
// CatchpointCatchupStats is used for querying and reporting the current state of the catchpoint catchup process
type CatchpointCatchupStats struct {
CatchpointLabel string
TotalAccounts uint64
ProcessedAccounts uint64
VerifiedAccounts uint64
TotalBlocks uint64
AcquiredBlocks uint64
VerifiedBlocks uint64
ProcessedBytes uint64
StartTime time.Time
}
// CatchpointCatchupService represents the catchpoint catchup service.
type CatchpointCatchupService struct {
// stats is the statistics object, updated async while downloading the ledger
stats CatchpointCatchupStats
// statsMu syncronizes access to stats, as we could attempt to update it while querying for it's current state
statsMu deadlock.Mutex
node CatchpointCatchupNodeServices
// ctx is the node cancelation context, used when the node is being stopped.
ctx context.Context
cancelCtxFunc context.CancelFunc
// running is a waitgroup counting the running goroutine(1), and allow us to exit cleanly.
running sync.WaitGroup
// ledgerAccessor is the ledger accessor used to perform ledger-level operation on the database
ledgerAccessor ledger.CatchpointCatchupAccessor
// stage is the current stage of the catchpoint catchup process
stage ledger.CatchpointCatchupState
// log is the logger object
log logging.Logger
// newService indicates whether this service was created after the node was running ( i.e. true ) or the node just started to find that it was previously perfoming catchup
newService bool
// net is the underlaying network module
net network.GossipNode
// ledger points to the ledger object
ledger *ledger.Ledger
// lastBlockHeader is the latest block we have before going into catchpoint catchup mode. We use it to serve the node status requests instead of going to the ledger.
lastBlockHeader bookkeeping.BlockHeader
// config is a copy of the node configuration
config config.Local
// abortCtx used as a syncronized flag to let us know when the user asked us to abort the catchpoint catchup process. note that it's not being used when we decided to abort
// the catchup due to an internal issue ( such as exceeding number of retries )
abortCtx context.Context
abortCtxFunc context.CancelFunc
}
// MakeResumedCatchpointCatchupService creates a catchpoint catchup service for a node that is already in catchpoint catchup mode
func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
service = &CatchpointCatchupService{
stats: CatchpointCatchupStats{
StartTime: time.Now(),
},
node: node,
ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
log: log,
newService: false,
net: net,
ledger: l,
config: cfg,
}
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
}
err = service.loadStateVariables(ctx)
if err != nil {
return nil, err
}
return service, nil
}
// MakeNewCatchpointCatchupService creates a new catchpoint catchup service for a node that is not in catchpoint catchup mode
func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
if catchpoint == "" {
return nil, fmt.Errorf("MakeNewCatchpointCatchupService: catchpoint is invalid")
}
service = &CatchpointCatchupService{
stats: CatchpointCatchupStats{
CatchpointLabel: catchpoint,
StartTime: time.Now(),
},
node: node,
ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
stage: ledger.CatchpointCatchupStateInactive,
log: log,
newService: true,
net: net,
ledger: l,
config: cfg,
}
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
}
return service, nil
}
// Start starts the catchpoint catchup service ( continue in the process )
func (cs *CatchpointCatchupService) Start(ctx context.Context) {
cs.ctx, cs.cancelCtxFunc = context.WithCancel(ctx)
cs.abortCtx, cs.abortCtxFunc = context.WithCancel(context.Background())
cs.running.Add(1)
go cs.run()
}
// Abort aborts the catchpoint catchup process
func (cs *CatchpointCatchupService) Abort() {
// In order to abort the catchpoint catchup process, we need to first set the flag of abortCtxFunc, and follow that by canceling the main context.
// The order of these calls is crucial : The various stages are blocked on the main context. When that one expires, it uses the abort context to determine
// if the cancelation meaning that we want to shut down the process, or aborting the catchpoint catchup completly.
cs.abortCtxFunc()
cs.cancelCtxFunc()
}
// Stop stops the catchpoint catchup service - unlike Abort, this is not intended to abort the process but rather to allow
// cleanup of in-memory resources for the purpose of clean shutdown.
func (cs *CatchpointCatchupService) Stop() {
// signal the running goroutine that we want to stop
cs.cancelCtxFunc()
// wait for the running goroutine to terminate.
cs.running.Wait()
// call the abort context canceling, just to release it's goroutine.
cs.abortCtxFunc()
}
// GetLatestBlockHeader returns the last block header that was available at the time the catchpoint catchup service started
func (cs *CatchpointCatchupService) GetLatestBlockHeader() bookkeeping.BlockHeader {
return cs.lastBlockHeader
}
// run is the main stage-swtiching background service function. It switches the current stage into the correct stage handler.
func (cs *CatchpointCatchupService) run() {
defer cs.running.Done()
var err error
for {
// check if we need to abort.
select {
case <-cs.ctx.Done():
return
default:
}
switch cs.stage {
case ledger.CatchpointCatchupStateInactive:
err = cs.processStageInactive()
case ledger.CatchpointCatchupStateLedgerDownload:
err = cs.processStageLedgerDownload()
case ledger.CatchpointCatchupStateLastestBlockDownload:
err = cs.processStageLastestBlockDownload()
case ledger.CatchpointCatchupStateBlocksDownload:
err = cs.processStageBlocksDownload()
case ledger.CatchpointCatchupStateSwitch:
err = cs.processStageSwitch()
default:
err = cs.abort(fmt.Errorf("unexpected catchpoint catchup stage encountered : %v", cs.stage))
}
if cs.ctx.Err() != nil {
if err != nil {
cs.log.Warnf("catchpoint catchup stage error : %v", err)
}
continue
}
if err != nil {
cs.log.Warnf("catchpoint catchup stage error : %v", err)
time.Sleep(200 * time.Millisecond)
}
}
}
// loadStateVariables loads the current stage and catchpoint label from disk. It's used only in the case of catchpoint catchup recovery.
// ( i.e. the node never completed the catchup, and the node was shutdown )
func (cs *CatchpointCatchupService) loadStateVariables(ctx context.Context) (err error) {
var label string
label, err = cs.ledgerAccessor.GetLabel(ctx)
if err != nil {
return err
}
cs.statsMu.Lock()
cs.stats.CatchpointLabel = label
cs.statsMu.Unlock()
cs.stage, err = cs.ledgerAccessor.GetState(ctx)
if err != nil {
return err
}
return nil
}
// processStageInactive is the first catchpoint stage. It stores the desired label for catching up, so that if the catchpoint catchup is interrupted
// it could be resumed from that point.
func (cs *CatchpointCatchupService) processStageInactive() (err error) {
cs.statsMu.Lock()
label := cs.stats.CatchpointLabel
cs.statsMu.Unlock()
err = cs.ledgerAccessor.SetLabel(cs.ctx, label)
if err != nil {
return cs.abort(fmt.Errorf("processStageInactive failed to set a catchpoint label : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateLedgerDownload)
if err != nil {
return cs.abort(fmt.Errorf("processStageInactive failed to update stage : %v", err))
}
if cs.newService {
// we need to let the node know that it should shut down all the unneed services to avoid clashes.
cs.updateNodeCatchupMode(true)
}
return nil
}
// processStageLedgerDownload is the second catchpoint catchup stage. It downloads the ledger.
func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
cs.statsMu.Lock()
label := cs.stats.CatchpointLabel
cs.statsMu.Unlock()
round, _, err0 := ledger.ParseCatchpointLabel(label)
if err0 != nil {
return cs.abort(fmt.Errorf("processStageLedgerDownload failed to patse label : %v", err0))
}
// download balances file.
ledgerFetcher := makeLedgerFetcher(cs.net, cs.ledgerAccessor, cs.log, cs, cs.config)
attemptsCount := 0
for {
attemptsCount++
err = cs.ledgerAccessor.ResetStagingBalances(cs.ctx, true)
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
return cs.abort(fmt.Errorf("processStageLedgerDownload failed to reset staging balances : %v", err))
}
err = ledgerFetcher.downloadLedger(cs.ctx, round)
if err == nil {
err = cs.ledgerAccessor.BuildMerkleTrie(cs.ctx, cs.updateVerifiedAccounts)
if err == nil {
break
}
// failed to build the merkle trie for the above catchpoint file.
}
// instead of testing for err == cs.ctx.Err() , we'll check on the context itself.
// this is more robust, as the http client library sometimes wrap the context canceled
// error with other errors.
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount >= cs.config.CatchupLedgerDownloadRetryAttempts {
err = fmt.Errorf("catchpoint catchup exceeded number of attempts to retrieve ledger")
return cs.abort(err)
}
cs.log.Warnf("unable to download ledger : %v", err)
}
err = cs.updateStage(ledger.CatchpointCatchupStateLastestBlockDownload)
if err != nil {
return cs.abort(fmt.Errorf("processStageLedgerDownload failed to update stage to CatchpointCatchupStateLastestBlockDownload : %v", err))
}
return nil
}
// updateVerifiedAccounts update the user's statistics for the given verified accounts
func (cs *CatchpointCatchupService) updateVerifiedAccounts(verifiedAccounts uint64) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
cs.stats.VerifiedAccounts = verifiedAccounts
}
// processStageLastestBlockDownload is the third catchpoint catchup stage. It downloads the latest block and verify that against the previously downloaded ledger.
func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err error) {
blockRound, err := cs.ledgerAccessor.GetCatchupBlockRound(cs.ctx)
if err != nil {
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to retrieve catchup block round : %v", err))
}
fetcherFactory := MakeNetworkFetcherFactory(cs.net, 10, nil, &cs.config)
attemptsCount := 0
var blk *bookkeeping.Block
var client FetcherClient
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
if ledgerBlock, err := cs.ledger.Block(blockRound); err == nil {
blk = &ledgerBlock
}
for {
attemptsCount++
if blk == nil {
fetcher := fetcherFactory.New()
blk, _, client, err = fetcher.FetchBlock(cs.ctx, blockRound)
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
cs.log.Infof("processStageLastestBlockDownload: block %d download failed, another attempt will be made; err = %v", blockRound, err)
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to get block %d : %v", blockRound, err))
}
// success
client.Close()
}
// check block protocol version support.
if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
cs.log.Warnf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
cs.log.Warnf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header")
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header"))
}
// verify that the catchpoint is valid.
err = cs.ledgerAccessor.VerifyCatchpoint(cs.ctx, blk)
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
cs.log.Infof("processStageLastestBlockDownload: block %d verification against catchpoint failed, another attempt will be made; err = %v", blockRound, err)
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling VerifyCatchpoint : %v", err))
}
err = cs.ledgerAccessor.StoreBalancesRound(cs.ctx, blk)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreBalancesRound : %v", err))
}
err = cs.ledgerAccessor.StoreFirstBlock(cs.ctx, blk)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreFirstBlock : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateBlocksDownload)
if err != nil {
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
continue
}
return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to update stage : %v", err))
}
// great ! everything is ready for next stage.
break
}
return nil
}
// processStageBlocksDownload is the fourth catchpoint catchup stage. It downloads all the reminder of the blocks, verifying each one of them against it's predecessor.
func (cs *CatchpointCatchupService) processStageBlocksDownload() (err error) {
topBlock, err := cs.ledgerAccessor.EnsureFirstBlock(cs.ctx)
if err != nil {
return cs.abort(fmt.Errorf("processStageBlocksDownload failed, unable to ensure first block : %v", err))
}
// pick the lookback with the greater of either MaxTxnLife or MaxBalLookback
lookback := config.Consensus[topBlock.CurrentProtocol].MaxTxnLife
if lookback < config.Consensus[topBlock.CurrentProtocol].MaxBalLookback {
lookback = config.Consensus[topBlock.CurrentProtocol].MaxBalLookback
}
// in case the effective lookback is going before our rounds count, trim it there.
// ( a catchpoint is generated starting round MaxBalLookback, and this is a possible in any round in the range of MaxBalLookback..MaxTxnLife)
if lookback >= uint64(topBlock.Round()) {
lookback = uint64(topBlock.Round() - 1)
}
cs.statsMu.Lock()
cs.stats.TotalBlocks = uint64(lookback)
cs.stats.AcquiredBlocks = 0
cs.stats.VerifiedBlocks = 0
cs.statsMu.Unlock()
prevBlock := &topBlock
fetcherFactory := MakeNetworkFetcherFactory(cs.net, 10, nil, &cs.config)
blocksFetched := uint64(1) // we already got the first block in the previous step.
var blk *bookkeeping.Block
var client FetcherClient
for attemptsCount := uint64(1); blocksFetched <= lookback; attemptsCount++ {
if err := cs.ctx.Err(); err != nil {
return cs.stopOrAbort()
}
blk = nil
// check to see if the current ledger might have this block. If so, we should try this first instead of downloading anything.
if ledgerBlock, err := cs.ledger.Block(topBlock.Round() - basics.Round(blocksFetched)); err == nil {
blk = &ledgerBlock
} else {
switch err.(type) {
case ledger.ErrNoEntry:
// this is expected, ignore this one.
default:
cs.log.Warnf("processStageBlocksDownload encountered the following error when attempting to retrieve the block for round %d : %v", topBlock.Round()-basics.Round(blocksFetched), err)
}
}
if blk == nil {
fetcher := fetcherFactory.New()
blk, _, client, err = fetcher.FetchBlock(cs.ctx, topBlock.Round()-basics.Round(blocksFetched))
if err != nil {
if cs.ctx.Err() != nil {
return cs.stopOrAbort()
}
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
cs.log.Infof("Failed to download block %d on attempt %d out of %d. %v", topBlock.Round()-basics.Round(blocksFetched), attemptsCount, cs.config.CatchupBlockDownloadRetryAttempts, err)
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload failed after multiple blocks download attempts"))
}
// success
client.Close()
}
cs.updateBlockRetrievalStatistics(1, 0)
// validate :
if prevBlock.BlockHeader.Branch != blk.Hash() {
// not identical, retry download.
cs.log.Warnf("processStageBlocksDownload downloaded block(%d) did not match it's successor(%d) block hash %v != %v", blk.Round(), prevBlock.Round(), blk.Hash(), prevBlock.BlockHeader.Branch)
cs.updateBlockRetrievalStatistics(-1, 0)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload downloaded block(%d) did not match it's successor(%d) block hash %v != %v", blk.Round(), prevBlock.Round(), blk.Hash(), prevBlock.BlockHeader.Branch))
}
// check block protocol version support.
if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
cs.log.Warnf("processStageBlocksDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
cs.updateBlockRetrievalStatistics(-1, 0)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
cs.log.Warnf("processStageBlocksDownload: downloaded block content does not match downloaded block header")
// try again.
cs.updateBlockRetrievalStatistics(-1, 0)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload: downloaded block content does not match downloaded block header"))
}
cs.updateBlockRetrievalStatistics(0, 1)
// all good, persist and move on.
err = cs.ledgerAccessor.StoreBlock(cs.ctx, blk)
if err != nil {
cs.log.Warnf("processStageBlocksDownload failed to store downloaded staging block for round %d", blk.Round())
cs.updateBlockRetrievalStatistics(-1, -1)
if attemptsCount <= uint64(cs.config.CatchupBlockDownloadRetryAttempts) {
// try again.
continue
}
return cs.abort(fmt.Errorf("processStageBlocksDownload failed to store downloaded staging block for round %d", blk.Round()))
}
prevBlock = blk
blocksFetched++
}
err = cs.updateStage(ledger.CatchpointCatchupStateSwitch)
if err != nil {
return cs.abort(fmt.Errorf("processStageBlocksDownload failed to update stage : %v", err))
}
return nil
}
// processStageLedgerDownload is the fifth catchpoint catchup stage. It completes the catchup process, swap the new tables and restart the node functionality.
func (cs *CatchpointCatchupService) processStageSwitch() (err error) {
err = cs.ledgerAccessor.CompleteCatchup(cs.ctx)
if err != nil {
return cs.abort(fmt.Errorf("processStageSwitch failed to complete catchup : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateInactive)
if err != nil {
return cs.abort(fmt.Errorf("processStageSwitch failed to update stage : %v", err))
}
cs.updateNodeCatchupMode(false)
// we've completed the catchup, so we want to cancel the context so that the
// run function would exit.
cs.cancelCtxFunc()
return nil
}
// stopOrAbort is called when any of the stage processing function sees that cs.ctx has been canceled. It can be
// due to the end user attempting to abort the current catchpoint catchup operation or due to a node shutdown.
func (cs *CatchpointCatchupService) stopOrAbort() error {
if cs.abortCtx.Err() == context.Canceled {
return cs.abort(context.Canceled)
}
return nil
}
// abort aborts the current catchpoint catchup process, reverting to node to standard operation.
func (cs *CatchpointCatchupService) abort(originatingErr error) error {
outError := originatingErr
err0 := cs.ledgerAccessor.ResetStagingBalances(cs.ctx, false)
if err0 != nil {
outError = fmt.Errorf("unable to reset staging balances : %v; %v", err0, outError)
}
cs.updateNodeCatchupMode(false)
// we want to abort the catchpoint catchup process, and the node already reverted to normal operation.
// as part of the returning to normal operation, we've re-created our context. This context need to be
// canceled so that when we go back to run(), we would exit from there right away.
cs.cancelCtxFunc()
return outError
}
// updateStage updates the current catchpoint catchup stage to the provided new stage.
func (cs *CatchpointCatchupService) updateStage(newStage ledger.CatchpointCatchupState) (err error) {
err = cs.ledgerAccessor.SetState(cs.ctx, newStage)
if err != nil {
return err
}
cs.stage = newStage
return nil
}
// updateNodeCatchupMode requests the node to change it's operational mode from
// catchup mode to normal mode and vice versa.
func (cs *CatchpointCatchupService) updateNodeCatchupMode(catchupModeEnabled bool) {
newCtxCh := cs.node.SetCatchpointCatchupMode(catchupModeEnabled)
select {
case newCtx, open := <-newCtxCh:
if open {
cs.ctx, cs.cancelCtxFunc = context.WithCancel(newCtx)
} else {
// channel is closed, this means that the node is stopping
}
case <-cs.ctx.Done():
// the node context was canceled before the SetCatchpointCatchupMode goroutine had
// the chance of completing. We At this point, the service is shutting down. However,
// we don't know how long it would take for the node mutex until it's become available.
// given that the SetCatchpointCatchupMode gave us a non-buffered channel, it might get blocked
// if we won't be draining that channel. To resolve that, we will create another goroutine here
// which would drain that channel.
go func() {
// We'll wait here for the above goroutine to complete :
<-newCtxCh
}()
}
}
func (cs *CatchpointCatchupService) updateLedgerFetcherProgress(fetcherStats *ledger.CatchpointCatchupAccessorProgress) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
cs.stats.TotalAccounts = fetcherStats.TotalAccounts
cs.stats.ProcessedAccounts = fetcherStats.ProcessedAccounts
cs.stats.ProcessedBytes = fetcherStats.ProcessedBytes
}
// GetStatistics returns a copy of the current catchpoint catchup statistics
func (cs *CatchpointCatchupService) GetStatistics() (out CatchpointCatchupStats) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
out = cs.stats
return
}
// updateBlockRetrievalStatistics updates the blocks retrieval statistics by applying the provided deltas
func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(aquiredBlocksDelta, verifiedBlocksDelta int64) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + aquiredBlocksDelta)
cs.stats.VerifiedBlocks = uint64(int64(cs.stats.VerifiedBlocks) + verifiedBlocksDelta)
}
| 1 | 41,290 | Since this is only counting the failed attempts, can you call it failedAttemptsCount or retryCount instead of attemptsCount? | algorand-go-algorand | go |
@@ -21,7 +21,7 @@
"""Installation script for Python nupic package."""
-import os
+import OOOOOOOOOOOos
import pkg_resources
import sys
| 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Installation script for Python nupic package."""
import os
import pkg_resources
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as BaseTestCommand
REPO_DIR = os.path.dirname(os.path.realpath(__file__))
def getVersion():
"""
Get version from local file.
"""
with open(os.path.join(REPO_DIR, "VERSION"), "r") as versionFile:
return versionFile.read().strip()
def nupicBindingsPrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
"""
try:
nupicDistribution = pkg_resources.get_distribution("nupic.bindings")
if pkg_resources.parse_version(nupicDistribution.version).is_prerelease:
# A pre-release dev version of nupic.bindings is installed.
return True
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of nupic.bindings will be handled by
# setuptools by default
return False
def parse_file(requirementFile):
try:
return [
line.strip()
for line in open(requirementFile).readlines()
if not line.startswith("#")
]
except IOError:
return []
class TestCommand(BaseTestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
def initialize_options(self):
BaseTestCommand.initialize_options(self)
self.pytest_args = ["unit"] # pylint: disable=W0201
def finalize_options(self):
BaseTestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
cwd = os.getcwd()
try:
os.chdir("tests")
errno = pytest.main(self.pytest_args)
finally:
os.chdir(cwd)
sys.exit(errno)
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
requirementsPath = os.path.join(REPO_DIR, "requirements.txt")
requirements = parse_file(requirementsPath)
if nupicBindingsPrereleaseInstalled():
# User has a pre-release version of nupic.bindings installed, which is only
# possible if the user installed and built nupic.bindings from source and
# it is up to the user to decide when to update nupic.bindings. We'll
# quietly remove the entry in requirements.txt so as to not conflate the
# two.
requirements = [req for req in requirements if "nupic.bindings" not in req]
return requirements
if __name__ == "__main__":
requirements = findRequirements()
setup(
name="nupic",
version=getVersion(),
install_requires=requirements,
package_dir = {"": "src"},
packages=find_packages("src"),
namespace_packages = ["nupic"],
package_data={
"nupic.support": ["nupic-default.xml",
"nupic-logging.conf"],
"nupic": ["README.md", "LICENSE.txt"],
"nupic.data": ["*.json"],
"nupic.frameworks.opf.exp_generator": ["*.json", "*.tpl"],
"nupic.frameworks.opf.jsonschema": ["*.json"],
"nupic.swarming.exp_generator": ["*.json", "*.tpl"],
"nupic.swarming.jsonschema": ["*.json"],
"nupic.datafiles": ["*.csv", "*.txt"],
},
cmdclass = {"test": TestCommand},
include_package_data=True,
zip_safe=False,
extras_require = {
# Default requirement based on system type
":platform_system=='Linux' or platform_system=='Darwin'":
["pycapnp==0.5.8"],
# Superseded by platform_system-conditional requirement, but keeping
# empty extra for compatibility as recommended by setuptools doc.
"capnp": [],
},
description="Numenta Platform for Intelligent Computing",
author="Numenta",
author_email="[email protected]",
url="https://github.com/numenta/nupic",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
# It has to be "5 - Production/Stable" or else pypi rejects it!
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
long_description=(
"Numenta Platform for Intelligent Computing: a machine intelligence "
"platform that implements the HTM learning algorithms. HTM is a "
"detailed computational theory of the neocortex. At the core of HTM "
"are time-based continuous learning algorithms that store and recall "
"spatial and temporal patterns. NuPIC is suited to a variety of "
"problems, particularly anomaly detection and prediction of streaming "
"data sources.\n\n"
"For more information, see http://numenta.org or the NuPIC wiki at "
"https://github.com/numenta/nupic/wiki.")
)
| 1 | 21,617 | I don't think this will work. | numenta-nupic | py |
@@ -0,0 +1,11 @@
+using System.IO.Pipelines;
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
+{
+ public interface IHttpParser
+ {
+ bool ParseRequestLine<T>(T handler, ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined) where T : IHttpRequestLineHandler;
+
+ bool ParseHeaders<T>(T handler, ReadableBuffer buffer, out ReadCursor consumed, out ReadCursor examined, out int consumedBytes) where T : IHttpHeadersHandler;
+ }
+} | 1 | 1 | 11,709 | Since http2 is coming do we want to call this something more specific? | aspnet-KestrelHttpServer | .cs |
|
@@ -11,8 +11,8 @@
</div>
</nav>
-<%= content_tag :div, class: 'navbar-search navbar navbar-light bg-light', role: 'navigation', aria: { label: t('blacklight.search.header') } do %>
+<div class="navbar-search navbar navbar-light bg-light" role="navigation">
<div class="<%= container_classes %>">
<%= render_search_bar %>
</div>
-<% end %>
+</div> | 1 | <nav class="navbar navbar-expand-md navbar-dark bg-dark topbar" role="navigation">
<div class="<%= container_classes %>">
<%= link_to application_name, root_path, class: 'mb-0 navbar-brand navbar-logo' %>
<button class="navbar-toggler navbar-toggler-right" type="button" data-toggle="collapse" data-bs-toggle="collapse" data-target="#user-util-collapse" data-bs-target="#user-util-collapse" aria-controls="user-util-collapse" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse justify-content-md-end" id="user-util-collapse">
<%= render 'shared/user_util_links' %>
</div>
</div>
</nav>
<%= content_tag :div, class: 'navbar-search navbar navbar-light bg-light', role: 'navigation', aria: { label: t('blacklight.search.header') } do %>
<div class="<%= container_classes %>">
<%= render_search_bar %>
</div>
<% end %>
| 1 | 8,873 | I don't think we want to revert the accessibility issues the tool caught, do we? | projectblacklight-blacklight | rb |
@@ -285,8 +285,11 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) {
rt.hostRequests.pruneRequests(rateLimitingWindowStartTime)
originConnections := rt.hostRequests.countOriginConnections(trackerRequest.remoteHost, rateLimitingWindowStartTime)
+ remoteHostIsNonLocal := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackerRequest.remoteHost))
+ connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0
+
// check the number of connections
- if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 {
+ if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && remoteHostIsNonLocal {
rt.hostRequestsMu.Unlock()
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_tcp_rate_limit"})
rt.log.With("connection", "tcp").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate") | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package network
import (
"fmt"
"net"
"net/http"
"sort"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
)
const (
// maxHeaderReadTimeout is the time limit where items would remain in the acceptedConnections cache before being pruned.
// certain malicious connections would never get to the http handler, and therefore must be pruned every so often.
maxHeaderReadTimeout = 30 * time.Second
)
// TrackerRequest hold the tracking data associated with a single request.
type TrackerRequest struct {
created time.Time
remoteHost string
remotePort string
remoteAddr string
request *http.Request
otherTelemetryGUID string
otherInstanceName string
otherPublicAddr string
connection net.Conn
noPrune bool
}
// makeTrackerRequest creates a new TrackerRequest.
func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime time.Time, conn net.Conn) *TrackerRequest {
if remoteHost == "" {
remoteHost, remotePort, _ = net.SplitHostPort(remoteAddr)
}
return &TrackerRequest{
created: createTime,
remoteAddr: remoteAddr,
remoteHost: remoteHost,
remotePort: remotePort,
connection: conn,
}
}
// hostIncomingRequests holds all the requests that are originating from a single host.
type hostIncomingRequests struct {
remoteHost string
requests []*TrackerRequest // this is an ordered list, according to the requestsHistory.created
additionalHostRequests map[*TrackerRequest]struct{} // additional requests that aren't included in the "requests", and always assumed to be "alive".
}
// findTimestampIndex finds the first an index (i) in the sorted requests array, where requests[i].created is greater than t.
// if no such item exists, it returns the index where the item should
func (ard *hostIncomingRequests) findTimestampIndex(t time.Time) int {
if len(ard.requests) == 0 {
return 0
}
i := sort.Search(len(ard.requests), func(i int) bool {
return ard.requests[i].created.After(t)
})
return i
}
// convertToAdditionalRequest converts the given trackerRequest into a "additional request".
// unlike regular tracker requests, additional requests does not get pruned.
func (ard *hostIncomingRequests) convertToAdditionalRequest(trackerRequest *TrackerRequest) {
if _, has := ard.additionalHostRequests[trackerRequest]; has {
return
}
i := sort.Search(len(ard.requests), func(i int) bool {
return ard.requests[i].created.After(trackerRequest.created)
})
i--
if i < 0 {
return
}
// we could have several entries with the same timestamp, so we need to consider all of them.
for ; i >= 0; i-- {
if ard.requests[i] == trackerRequest {
break
}
if ard.requests[i].created != trackerRequest.created {
// we can't find the item in the list.
return
}
}
if i < 0 {
return
}
// ok, item was found at index i.
copy(ard.requests[i:], ard.requests[i+1:])
ard.requests[len(ard.requests)-1] = nil
ard.requests = ard.requests[:len(ard.requests)-1]
ard.additionalHostRequests[trackerRequest] = struct{}{}
}
// removeTrackedConnection removes a trackerRequest from the additional requests map
func (ard *hostIncomingRequests) removeTrackedConnection(trackerRequest *TrackerRequest) {
delete(ard.additionalHostRequests, trackerRequest)
}
// add adds the trackerRequest at the correct index within the sorted array.
func (ard *hostIncomingRequests) add(trackerRequest *TrackerRequest) {
// find the new item index.
itemIdx := ard.findTimestampIndex(trackerRequest.created)
if itemIdx >= len(ard.requests) {
// it's going to be added as the last item on the list.
ard.requests = append(ard.requests, trackerRequest)
return
}
if itemIdx == 0 {
// it's going to be added as the first item on the list.
ard.requests = append([]*TrackerRequest{trackerRequest}, ard.requests...)
return
}
// it's going to be added somewhere in the middle.
ard.requests = append(ard.requests[:itemIdx], append([]*TrackerRequest{trackerRequest}, ard.requests[itemIdx:]...)...)
return
}
// countConnections counts the number of connection that we have that occured after the provided specified time
func (ard *hostIncomingRequests) countConnections(rateLimitingWindowStartTime time.Time) (count uint) {
i := ard.findTimestampIndex(rateLimitingWindowStartTime)
return uint(len(ard.requests) - i + len(ard.additionalHostRequests))
}
type hostsIncomingMap map[string]*hostIncomingRequests
// pruneRequests cleans stale items from the hostRequests maps
func (him *hostsIncomingMap) pruneRequests(rateLimitingWindowStartTime time.Time) {
// try to eliminate as many entries from a *single* connection. the goal here is not to wipe it clean
// but rather to make a progressive cleanup.
var removeHost string
for host, requestData := range *him {
i := requestData.findTimestampIndex(rateLimitingWindowStartTime)
if i == 0 {
continue
}
requestData.requests = requestData.requests[i:]
if len(requestData.requests) == 0 {
// remove the entire key.
removeHost = host
}
break
}
if removeHost != "" {
delete(*him, removeHost)
}
}
// addRequest adds an entry to the hostRequests map, or update the item within the map
func (him *hostsIncomingMap) addRequest(trackerRequest *TrackerRequest) {
requestData, has := (*him)[trackerRequest.remoteHost]
if !has {
requestData = &hostIncomingRequests{
remoteHost: trackerRequest.remoteHost,
requests: make([]*TrackerRequest, 0, 1),
additionalHostRequests: make(map[*TrackerRequest]struct{}),
}
(*him)[trackerRequest.remoteHost] = requestData
}
requestData.add(trackerRequest)
}
// countOriginConnections counts the number of connection that were seen since rateLimitingWindowStartTime coming from the host rateLimitingWindowStartTime
func (him *hostsIncomingMap) countOriginConnections(remoteHost string, rateLimitingWindowStartTime time.Time) uint {
if requestData, has := (*him)[remoteHost]; has {
return requestData.countConnections(rateLimitingWindowStartTime)
}
return 0
}
// convertToAdditionalRequest converts the given trackerRequest into a "additional request".
func (him *hostsIncomingMap) convertToAdditionalRequest(trackerRequest *TrackerRequest) {
requestData, has := (*him)[trackerRequest.remoteHost]
if !has {
return
}
requestData.convertToAdditionalRequest(trackerRequest)
}
// removeTrackedConnection removes a trackerRequest from the additional requests map
func (him *hostsIncomingMap) removeTrackedConnection(trackerRequest *TrackerRequest) {
requestData, has := (*him)[trackerRequest.remoteHost]
if !has {
return
}
requestData.removeTrackedConnection(trackerRequest)
}
// RequestTracker tracks the incoming request connections
type RequestTracker struct {
downstreamHandler http.Handler
log logging.Logger
config config.Local
// once we detect that we have a misconfigured UseForwardedForAddress, we set this and write an warning message.
misconfiguredUseForwardedForAddress bool
listener net.Listener // this is the downsteam listener
hostRequests hostsIncomingMap // maps a request host to a request data (i.e. "1.2.3.4" -> *hostIncomingRequests )
acceptedConnections map[net.Addr]*TrackerRequest // maps a local address interface to a tracked request data (i.e. "1.2.3.4:1560" -> *TrackerRequest ); used to associate connection between the Accept and the ServeHTTP
hostRequestsMu deadlock.Mutex // used to syncronize access to the hostRequests and acceptedConnections variables
httpHostRequests hostsIncomingMap // maps a request host to a request data (i.e. "1.2.3.4" -> *hostIncomingRequests )
httpConnections map[net.Addr]*TrackerRequest // maps a local address interface to a tracked request data (i.e. "1.2.3.4:1560" -> *TrackerRequest ); used to associate connection between the Accept and the ServeHTTP
httpConnectionsMu deadlock.Mutex // used to syncronize access to the httpHostRequests and httpConnections variables
}
// makeRequestsTracker creates a request tracker object.
func makeRequestsTracker(downstreamHandler http.Handler, log logging.Logger, config config.Local) *RequestTracker {
return &RequestTracker{
downstreamHandler: downstreamHandler,
log: log,
config: config,
hostRequests: make(map[string]*hostIncomingRequests, 0),
acceptedConnections: make(map[net.Addr]*TrackerRequest, 0),
httpConnections: make(map[net.Addr]*TrackerRequest, 0),
httpHostRequests: make(map[string]*hostIncomingRequests, 0),
}
}
// requestTrackedConnection used to track the active connections. In particular, it used to remove the
// tracked connection entry from the RequestTracker once a connection is closed.
type requestTrackedConnection struct {
net.Conn
tracker *RequestTracker
}
// Close removes the connection from the tracker's connections map and call the underlaying Close function.
func (c *requestTrackedConnection) Close() error {
c.tracker.hostRequestsMu.Lock()
trackerRequest := c.tracker.acceptedConnections[c.Conn.LocalAddr()]
delete(c.tracker.acceptedConnections, c.Conn.LocalAddr())
if trackerRequest != nil {
c.tracker.hostRequests.removeTrackedConnection(trackerRequest)
}
c.tracker.hostRequestsMu.Unlock()
return c.Conn.Close()
}
// Accept waits for and returns the next connection to the listener.
func (rt *RequestTracker) Accept() (conn net.Conn, err error) {
// the following for loop is a bit tricky :
// in the normal use case, we accept the connection and exit right away.
// the only case where the for loop is being iterated is when we are rejecting a connection.
for {
conn, err = rt.listener.Accept()
if err != nil || conn == nil {
return
}
trackerRequest := makeTrackerRequest(conn.RemoteAddr().String(), "", "", time.Now(), conn)
rateLimitingWindowStartTime := trackerRequest.created.Add(-time.Duration(rt.config.ConnectionsRateLimitingWindowSeconds) * time.Second)
rt.hostRequestsMu.Lock()
rt.hostRequests.addRequest(trackerRequest)
rt.hostRequests.pruneRequests(rateLimitingWindowStartTime)
originConnections := rt.hostRequests.countOriginConnections(trackerRequest.remoteHost, rateLimitingWindowStartTime)
// check the number of connections
if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 {
rt.hostRequestsMu.Unlock()
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_tcp_rate_limit"})
rt.log.With("connection", "tcp").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate")
rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
Address: trackerRequest.remoteHost,
Incoming: true,
Reason: "Remote IP Connection TCP Rate Limit",
})
// we've already *doubled* the amount of allowed connections; disconnect right away.
// we don't want to create more go routines beyond this point.
if originConnections > rt.config.ConnectionsRateLimitingCount*2 {
err := conn.Close()
if err != nil {
rt.log.With("connection", "tcp").With("count", originConnections).Debugf("Failed to close connection : %v", err)
}
} else {
// we want to make an attempt to read the connection reqest and send a response, but not within this go routine -
// this go routine is used single-threaded and should not get blocked.
go rt.sendBlockedConnectionResponse(conn, trackerRequest.created)
}
continue
}
rt.pruneAcceptedConnections(trackerRequest.created.Add(-maxHeaderReadTimeout))
// add an entry to the acceptedConnections so that the ServeHTTP could find the connection quickly.
rt.acceptedConnections[conn.LocalAddr()] = trackerRequest
rt.hostRequestsMu.Unlock()
conn = &requestTrackedConnection{Conn: conn, tracker: rt}
return
}
}
// sendBlockedConnectionResponse reads the incoming connection request followed by sending a "too many requests" response.
func (rt *RequestTracker) sendBlockedConnectionResponse(conn net.Conn, requestTime time.Time) {
defer func() {
err := conn.Close()
if err != nil {
rt.log.With("connection", "tcp").Debugf("Failed to close connection of blocked connection response: %v", err)
}
}()
err := conn.SetReadDeadline(requestTime.Add(500 * time.Millisecond))
if err != nil {
rt.log.With("connection", "tcp").Debugf("Failed to set a read deadline of blocked connection response: %v", err)
return
}
err = conn.SetWriteDeadline(requestTime.Add(500 * time.Millisecond))
if err != nil {
rt.log.With("connection", "tcp").Debugf("Failed to set a write deadline of blocked connection response: %v", err)
return
}
var dummyBuffer [1024]byte
var readingErr error
for readingErr == nil {
_, readingErr = conn.Read(dummyBuffer[:])
}
// this is not a normal - usually we want to wait for the HTTP handler to give the response; however, it seems that we're either getting requests faster than the
// http handler can handle, or getting requests that fails before the header retrieval is complete.
// in this case, we want to send our response right away and disconnect. If the client is currently still sending it's request, it might not know how to handle
// this correctly. This use case is similar to the issue handled by the go-server in the same manner. ( see "431 Request Header Fields Too Large" in the server.go )
_, err = conn.Write([]byte(
fmt.Sprintf("HTTP/1.1 %d %s\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n%s: %d\r\n\r\n", http.StatusTooManyRequests, http.StatusText(http.StatusTooManyRequests), TooManyRequestsRetryAfterHeader, rt.config.ConnectionsRateLimitingWindowSeconds)))
if err != nil {
rt.log.With("connection", "tcp").Debugf("Failed to write response to a blocked connection response: %v", err)
return
}
}
// pruneAcceptedConnections clean stale items form the acceptedConnections map; it's syncornized via the acceptedConnectionsMu mutex which is expected to be taken by the caller.
// in case the created is 0, the pruning is disabled for this connection. The HTTP handlers would call Close to have this entry cleared out.
func (rt *RequestTracker) pruneAcceptedConnections(pruneStartDate time.Time) {
localAddrToRemove := []net.Addr{}
for localAddr, request := range rt.acceptedConnections {
if request.noPrune == false && request.created.Before(pruneStartDate) {
localAddrToRemove = append(localAddrToRemove, localAddr)
}
}
for _, localAddr := range localAddrToRemove {
delete(rt.acceptedConnections, localAddr)
}
}
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.
func (rt *RequestTracker) Close() error {
return rt.listener.Close()
}
// Addr returns the listener's network address.
func (rt *RequestTracker) Addr() net.Addr {
return rt.listener.Addr()
}
// Listener initialize the underlaying listener, and return the request tracker wrapping listener
func (rt *RequestTracker) Listener(listener net.Listener) net.Listener {
rt.listener = listener
return rt
}
// GetTrackedRequest return the tracked request
func (rt *RequestTracker) GetTrackedRequest(request *http.Request) (trackedRequest *TrackerRequest) {
rt.httpConnectionsMu.Lock()
defer rt.httpConnectionsMu.Unlock()
localAddr := request.Context().Value(http.LocalAddrContextKey).(net.Addr)
return rt.httpConnections[localAddr]
}
// GetRequestConnection return the underlying connection for the given request
func (rt *RequestTracker) GetRequestConnection(request *http.Request) net.Conn {
rt.httpConnectionsMu.Lock()
defer rt.httpConnectionsMu.Unlock()
localAddr := request.Context().Value(http.LocalAddrContextKey).(net.Addr)
return rt.httpConnections[localAddr].connection
}
func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.Request) {
// this function is called only after we've fetched all the headers. on some malicious clients, this could get delayed, so we can't rely on the
// tcp-connection established time to align with current time.
rateLimitingWindowStartTime := time.Now().Add(-time.Duration(rt.config.ConnectionsRateLimitingWindowSeconds) * time.Second)
// get the connection local address. Note that it's the interface of a immutable object, so it will be unique and matching the original connection interface.
localAddr := request.Context().Value(http.LocalAddrContextKey).(net.Addr)
rt.hostRequestsMu.Lock()
trackedRequest := rt.acceptedConnections[localAddr]
if trackedRequest != nil {
// update the original tracker request so that it won't get pruned.
if trackedRequest.noPrune == false {
trackedRequest.noPrune = true
rt.hostRequests.convertToAdditionalRequest(trackedRequest)
}
// create a copy, so we can unlock
trackedRequest = makeTrackerRequest(trackedRequest.remoteAddr, trackedRequest.remoteHost, trackedRequest.remotePort, trackedRequest.created, trackedRequest.connection)
}
rt.hostRequestsMu.Unlock()
// we have no request tracker ? no problem; create one on the fly.
if trackedRequest == nil {
trackedRequest = makeTrackerRequest(request.RemoteAddr, "", "", time.Now(), nil)
}
// update the origin address.
rt.updateRequestRemoteAddr(trackedRequest, request)
rt.httpConnectionsMu.Lock()
trackedRequest.request = request
trackedRequest.otherTelemetryGUID, trackedRequest.otherInstanceName, trackedRequest.otherPublicAddr = getCommonHeaders(request.Header)
rt.httpHostRequests.addRequest(trackedRequest)
rt.httpHostRequests.pruneRequests(rateLimitingWindowStartTime)
originConnections := rt.httpHostRequests.countOriginConnections(trackedRequest.remoteHost, rateLimitingWindowStartTime)
rt.httpConnections[localAddr] = trackedRequest
rt.httpConnectionsMu.Unlock()
defer func() {
rt.httpConnectionsMu.Lock()
defer rt.httpConnectionsMu.Unlock()
// now that we're done with it, we can remove the trackedRequest from the httpConnections.
delete(rt.httpConnections, localAddr)
}()
if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 {
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_rate_limit"})
rt.log.With("connection", "http").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate")
rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
Address: trackedRequest.remoteHost,
HostName: trackedRequest.otherTelemetryGUID,
Incoming: true,
InstanceName: trackedRequest.otherInstanceName,
Reason: "Remote IP Connection Rate Limit",
})
response.Header().Add(TooManyRequestsRetryAfterHeader, fmt.Sprintf("%d", rt.config.ConnectionsRateLimitingWindowSeconds))
response.WriteHeader(http.StatusTooManyRequests)
return
}
// send the request downstream; in our case, it would go to the router.
rt.downstreamHandler.ServeHTTP(response, request)
}
// updateRequestRemoteAddr updates the origin IP address in both the trackedRequest as well as in the request.RemoteAddr string
func (rt *RequestTracker) updateRequestRemoteAddr(trackedRequest *TrackerRequest, request *http.Request) {
originIP := rt.getForwardedConnectionAddress(request.Header)
if originIP == nil {
return
}
request.RemoteAddr = originIP.String() + ":" + trackedRequest.remotePort
trackedRequest.remoteHost = originIP.String()
}
// retrieve the origin ip address from the http header, if such exists and it's a valid ip address.
func (rt *RequestTracker) getForwardedConnectionAddress(header http.Header) (ip net.IP) {
if rt.config.UseXForwardedForAddressField == "" {
return
}
forwardedForString := header.Get(rt.config.UseXForwardedForAddressField)
if forwardedForString == "" {
rt.httpConnectionsMu.Lock()
defer rt.httpConnectionsMu.Unlock()
if !rt.misconfiguredUseForwardedForAddress {
rt.log.Warnf("UseForwardedForAddressField is configured as '%s', but no value was retrieved from header", rt.config.UseXForwardedForAddressField)
rt.misconfiguredUseForwardedForAddress = true
}
return
}
ip = net.ParseIP(forwardedForString)
if ip == nil {
// if origin isn't a valid IP Address, log this.,
rt.log.Warnf("unable to parse origin address: '%s'", forwardedForString)
}
return
}
| 1 | 42,060 | I prefer naming this rateLimitedRemoteHost This can be local host but reads remote Host Is Non Local. | algorand-go-algorand | go |
@@ -105,7 +105,7 @@ export function initDebug() {
`\n\n${getOwnerStack(vnode)}`
);
} else if (type != null && typeof type === 'object') {
- if (type._lastDomChild !== undefined && type._dom !== undefined) {
+ if (type._lastDomChildSibling !== undefined && type._dom !== undefined) {
throw new Error(
`Invalid type passed to createElement(): ${type}\n\n` +
'Did you accidentally pass a JSX literal as JSX twice?\n\n' + | 1 | import { checkPropTypes } from './check-props';
import { options, Component } from 'preact';
import {
ELEMENT_NODE,
DOCUMENT_NODE,
DOCUMENT_FRAGMENT_NODE
} from './constants';
import {
getOwnerStack,
setupComponentStack,
getCurrentVNode,
getDisplayName
} from './component-stack';
const isWeakMapSupported = typeof WeakMap === 'function';
function getClosestDomNodeParent(parent) {
if (!parent) return {};
if (typeof parent.type === 'function') {
return getClosestDomNodeParent(parent._parent);
}
return parent;
}
export function initDebug() {
setupComponentStack();
/* eslint-disable no-console */
let oldBeforeDiff = options._diff;
let oldDiffed = options.diffed;
let oldVnode = options.vnode;
let oldCatchError = options._catchError;
let oldRoot = options._root;
let oldHook = options._hook;
const warnedComponents = !isWeakMapSupported
? null
: {
useEffect: new WeakMap(),
useLayoutEffect: new WeakMap(),
lazyPropTypes: new WeakMap()
};
options._catchError = (error, vnode, oldVNode) => {
let component = vnode && vnode._component;
if (component && typeof error.then === 'function') {
const promise = error;
error = new Error(
`Missing Suspense. The throwing component was: ${getDisplayName(vnode)}`
);
let parent = vnode;
for (; parent; parent = parent._parent) {
if (parent._component && parent._component._childDidSuspend) {
error = promise;
break;
}
}
// We haven't recovered and we know at this point that there is no
// Suspense component higher up in the tree
if (error instanceof Error) {
throw error;
}
}
oldCatchError(error, vnode, oldVNode);
};
options._root = (vnode, parentNode) => {
if (!parentNode) {
throw new Error(
'Undefined parent passed to render(), this is the second argument.\n' +
'Check if the element is available in the DOM/has the correct id.'
);
}
let isValid;
switch (parentNode.nodeType) {
case ELEMENT_NODE:
case DOCUMENT_FRAGMENT_NODE:
case DOCUMENT_NODE:
isValid = true;
break;
default:
isValid = false;
}
if (!isValid) {
let componentName = getDisplayName(vnode);
throw new Error(
`Expected a valid HTML node as a second argument to render. Received ${parentNode} instead: render(<${componentName} />, ${parentNode});`
);
}
if (oldRoot) oldRoot(vnode, parentNode);
};
options._diff = vnode => {
let { type, _parent: parent } = vnode;
let parentVNode = getClosestDomNodeParent(parent);
if (type === undefined) {
throw new Error(
'Undefined component passed to createElement()\n\n' +
'You likely forgot to export your component or might have mixed up default and named imports' +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
} else if (type != null && typeof type === 'object') {
if (type._lastDomChild !== undefined && type._dom !== undefined) {
throw new Error(
`Invalid type passed to createElement(): ${type}\n\n` +
'Did you accidentally pass a JSX literal as JSX twice?\n\n' +
` let My${getDisplayName(vnode)} = ${serializeVNode(type)};\n` +
` let vnode = <My${getDisplayName(vnode)} />;\n\n` +
'This usually happens when you export a JSX literal and not the component.' +
`\n\n${getOwnerStack(vnode)}`
);
}
throw new Error(
'Invalid type passed to createElement(): ' +
(Array.isArray(type) ? 'array' : type)
);
}
if (
(type === 'thead' || type === 'tfoot' || type === 'tbody') &&
parentVNode.type !== 'table'
) {
console.error(
'Improper nesting of table. Your <thead/tbody/tfoot> should have a <table> parent.' +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
} else if (
type === 'tr' &&
(parentVNode.type !== 'thead' &&
parentVNode.type !== 'tfoot' &&
parentVNode.type !== 'tbody' &&
parentVNode.type !== 'table')
) {
console.error(
'Improper nesting of table. Your <tr> should have a <thead/tbody/tfoot/table> parent.' +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
} else if (type === 'td' && parentVNode.type !== 'tr') {
console.error(
'Improper nesting of table. Your <td> should have a <tr> parent.' +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
} else if (type === 'th' && parentVNode.type !== 'tr') {
console.error(
'Improper nesting of table. Your <th> should have a <tr>.' +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
}
if (
vnode.ref !== undefined &&
typeof vnode.ref !== 'function' &&
typeof vnode.ref !== 'object' &&
!('$$typeof' in vnode) // allow string refs when preact-compat is installed
) {
throw new Error(
`Component's "ref" property should be a function, or an object created ` +
`by createRef(), but got [${typeof vnode.ref}] instead\n` +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
}
if (typeof vnode.type === 'string') {
for (const key in vnode.props) {
if (
key[0] === 'o' &&
key[1] === 'n' &&
typeof vnode.props[key] !== 'function' &&
vnode.props[key] != null
) {
throw new Error(
`Component's "${key}" property should be a function, ` +
`but got [${typeof vnode.props[key]}] instead\n` +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
}
}
}
// Check prop-types if available
if (typeof vnode.type === 'function' && vnode.type.propTypes) {
if (
vnode.type.displayName === 'Lazy' &&
warnedComponents &&
!warnedComponents.lazyPropTypes.has(vnode.type)
) {
const m =
'PropTypes are not supported on lazy(). Use propTypes on the wrapped component itself. ';
try {
const lazyVNode = vnode.type();
warnedComponents.lazyPropTypes.set(vnode.type, true);
console.warn(
m + `Component wrapped in lazy() is ${getDisplayName(lazyVNode)}`
);
} catch (promise) {
console.warn(
m + "We will log the wrapped component's name once it is loaded."
);
}
}
checkPropTypes(
vnode.type.propTypes,
vnode.props,
getDisplayName(vnode),
serializeVNode(vnode)
);
}
if (oldBeforeDiff) oldBeforeDiff(vnode);
};
options._hook = comp => {
if (!comp) {
throw new Error('Hook can only be invoked from render methods.');
}
if (oldHook) oldHook(comp);
};
const warn = (property, err) => ({
get() {
throw new Error(`getting vnode.${property} is deprecated, ${err}`);
},
set() {
throw new Error(`setting vnode.${property} is not allowed, ${err}`);
}
});
const deprecatedAttributes = {
nodeName: warn('nodeName', 'use vnode.type'),
attributes: warn('attributes', 'use vnode.props'),
children: warn('children', 'use vnode.props.children')
};
options.vnode = vnode => {
let source, self;
if (vnode.props && vnode.props.__source) {
source = vnode.props.__source;
delete vnode.props.__source;
}
if (vnode.props && vnode.props.__self) {
self = vnode.props.__self;
delete vnode.props.__self;
}
vnode.__self = self;
vnode.__source = source;
Object.defineProperties(vnode, deprecatedAttributes);
if (oldVnode) oldVnode(vnode);
};
options.diffed = vnode => {
// Check if the user passed plain objects as children. Note that we cannot
// move this check into `options.vnode` because components can receive
// children in any shape they want (e.g.
// `<MyJSONFormatter>{{ foo: 123, bar: "abc" }}</MyJSONFormatter>`).
// Putting this check in `options.diffed` ensures that
// `vnode._children` is set and that we only validate the children
// that were actually rendered.
if (vnode._children) {
vnode._children.forEach(child => {
if (child && child.type === undefined) {
// Remove internal vnode keys that will always be patched
delete child._parent;
delete child._depth;
const keys = Object.keys(child).join(',');
throw new Error(
`Objects are not valid as a child. Encountered an object with the keys {${keys}}.`
);
}
});
}
/** @type {import('./internal').Component} */
const component = vnode._component;
if (component && component.__hooks) {
let hooks = component.__hooks;
if (Array.isArray(hooks._list)) {
hooks._list.forEach(hook => {
if (hook._factory && (!hook._args || !Array.isArray(hook._args))) {
let componentName = getDisplayName(vnode);
console.warn(
`In ${componentName} you are calling useMemo/useCallback without passing arguments.\n` +
`This is a noop since it will not be able to memoize, it will execute it every render.` +
`\n\n${getOwnerStack(vnode)}`
);
}
});
}
// After paint effects
if (Array.isArray(hooks._pendingEffects)) {
hooks._pendingEffects.forEach(effect => {
if (
!Array.isArray(effect._args) &&
warnedComponents &&
!warnedComponents.useEffect.has(vnode.type)
) {
warnedComponents.useEffect.set(vnode.type, true);
let componentName = getDisplayName(vnode);
console.warn(
'You should provide an array of arguments as the second argument to the "useEffect" hook.\n\n' +
'Not doing so will invoke this effect on every render.\n\n' +
`This effect can be found in the render of ${componentName}.` +
`\n\n${getOwnerStack(vnode)}`
);
}
});
}
// Layout Effects
component._renderCallbacks.forEach(possibleEffect => {
if (
possibleEffect._value &&
!Array.isArray(possibleEffect._args) &&
warnedComponents &&
!warnedComponents.useLayoutEffect.has(vnode.type)
) {
warnedComponents.useLayoutEffect.set(vnode.type, true);
let componentName = getDisplayName(vnode);
console.warn(
'You should provide an array of arguments as the second argument to the "useLayoutEffect" hook.\n\n' +
'Not doing so will invoke this effect on every render.\n\n' +
`This effect can be found in the render of ${componentName}.` +
`\n\n${getOwnerStack(vnode)}`
);
}
});
}
if (oldDiffed) oldDiffed(vnode);
if (vnode._children != null) {
const keys = [];
for (let i = 0; i < vnode._children.length; i++) {
const child = vnode._children[i];
if (!child || child.key == null) continue;
const key = child.key;
if (keys.indexOf(key) !== -1) {
console.error(
'Following component has two or more children with the ' +
`same key attribute: "${key}". This may cause glitches and misbehavior ` +
'in rendering process. Component: \n\n' +
serializeVNode(vnode) +
`\n\n${getOwnerStack(vnode)}`
);
// Break early to not spam the console
break;
}
keys.push(key);
}
}
};
}
const setState = Component.prototype.setState;
Component.prototype.setState = function(update, callback) {
if (this._vnode == null) {
console.warn(
`Calling "this.setState" inside the constructor of a component is a ` +
`no-op and might be a bug in your application. Instead, set ` +
`"this.state = {}" directly.\n\n${getOwnerStack(getCurrentVNode())}`
);
} else if (this._parentDom == null) {
console.warn(
`Can't call "this.setState" on an unmounted component. This is a no-op, ` +
`but it indicates a memory leak in your application. To fix, cancel all ` +
`subscriptions and asynchronous tasks in the componentWillUnmount method.` +
`\n\n${getOwnerStack(this._vnode)}`
);
}
return setState.call(this, update, callback);
};
const forceUpdate = Component.prototype.forceUpdate;
Component.prototype.forceUpdate = function(callback) {
if (this._vnode == null) {
console.warn(
`Calling "this.forceUpdate" inside the constructor of a component is a ` +
`no-op and might be a bug in your application.\n\n${getOwnerStack(
getCurrentVNode()
)}`
);
} else if (this._parentDom == null) {
console.warn(
`Can't call "this.setState" on an unmounted component. This is a no-op, ` +
`but it indicates a memory leak in your application. To fix, cancel all ` +
`subscriptions and asynchronous tasks in the componentWillUnmount method.` +
`\n\n${getOwnerStack(this._vnode)}`
);
}
return forceUpdate.call(this, callback);
};
/**
* Serialize a vnode tree to a string
* @param {import('./internal').VNode} vnode
* @returns {string}
*/
export function serializeVNode(vnode) {
let { props } = vnode;
let name = getDisplayName(vnode);
let attrs = '';
for (let prop in props) {
if (props.hasOwnProperty(prop) && prop !== 'children') {
let value = props[prop];
// If it is an object but doesn't have toString(), use Object.toString
if (typeof value === 'function') {
value = `function ${value.displayName || value.name}() {}`;
}
value =
Object(value) === value && !value.toString
? Object.prototype.toString.call(value)
: value + '';
attrs += ` ${prop}=${JSON.stringify(value)}`;
}
}
let children = props.children;
return `<${name}${attrs}${
children && children.length ? '>..</' + name + '>' : ' />'
}`;
}
| 1 | 15,135 | Hmm I think I might've messed up this condition here... Need to look more at it | preactjs-preact | js |
@@ -118,11 +118,9 @@ class DocumentQueue:
if doc_type == DocTypes.PACKAGE:
if not handle or not package_hash or not pointer_file:
raise ValueError("missing required argument for package document")
- if (
- package_stats
- and not isinstance(package_stats, dict)
- or isinstance(package_stats, dict)
- and any(k not in package_stats for k in ['total_files', 'total_bytes'])
+ if not (
+ package_stats is None
+ or isinstance(package_stats, dict) and {'total_files', 'total_bytes'}.issubset(package_stats)
):
raise ValueError("Malformed package_stats")
body.update({ | 1 | """ core logic for fetching documents from S3 and queueing them locally before
sending to elastic search in memory-limited batches"""
from datetime import datetime
from enum import Enum
from math import floor
from typing import Dict, List
import os
from aws_requests_auth.aws_auth import AWSRequestsAuth
import boto3
from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch.helpers import bulk
from t4_lambda_shared.utils import separated_env_to_iter
from t4_lambda_shared.preview import ELASTIC_LIMIT_BYTES
CONTENT_INDEX_EXTS = separated_env_to_iter("CONTENT_INDEX_EXTS") or {
".csv",
".ipynb",
".json",
".md",
".parquet",
".rmd",
".tsv",
".txt"
}
EVENT_PREFIX = {
"Created": "ObjectCreated:",
"Removed": "ObjectRemoved:"
}
# See https://amzn.to/2xJpngN for chunk size as a function of container size
CHUNK_LIMIT_BYTES = int(os.getenv('CHUNK_LIMIT_BYTES') or 9_500_000)
ELASTIC_TIMEOUT = 30
MAX_BACKOFF = 360 # seconds
MAX_RETRY = 4 # prevent long-running lambdas due to malformed calls
# signifies that the object is truly deleted, not to be confused with
# s3:ObjectRemoved:DeleteMarkerCreated, which we may see in versioned buckets
# see https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
QUEUE_LIMIT_BYTES = 100_000_000 # 100MB
RETRY_429 = 5
# pylint: disable=super-init-not-called
class RetryError(Exception):
"""Fatal and final error if docs fail after multiple retries"""
def __init__(self, message):
pass
class DocTypes(Enum):
OBJECT = 1 # S3 objects
PACKAGE = 2 # Quilt packages
class DocumentQueue:
"""transient in-memory queue for documents to be indexed"""
def __init__(self, context):
"""constructor"""
self.queue = []
self.size = 0
self.context = context
def append(
self,
event_type: str,
doc_type: DocTypes,
# properties unique to a document type are non-required kwargs
ext: str = '',
handle: str = '',
metadata: str = '',
pointer_file: str = '',
package_hash: str = '',
package_stats: Dict[str, int] = None,
tags: List[str] = (),
text: str = '',
version_id=None,
*,
# common properties are required kwargs
bucket: str,
comment: str = '',
key: str,
etag: str,
last_modified: datetime,
size: int = 0
):
"""format event as a document and then queue the document"""
if not bucket or not key:
raise ValueError(f"bucket={bucket} or key={key} required but missing")
if event_type.startswith(EVENT_PREFIX["Created"]):
_op_type = "index"
elif event_type.startswith(EVENT_PREFIX["Removed"]):
_op_type = "delete"
else:
print("Skipping unrecognized event type {event_type}")
return
# On types and fields, see
# https://www.elastic.co/guide/en/elasticsearch/reference/master/mapping.html
# Set common properties on the document
# BE CAREFUL changing these values, as type changes or missing fields
# can cause exceptions from ES
index_name = bucket
if doc_type == DocTypes.PACKAGE:
index_name += "_packages"
if not index_name:
raise ValueError(f"Can't infer index name; bucket={bucket}, doc_type={doc_type}")
body = {
"_index": index_name,
"_type": "_doc",
"comment": comment,
"etag": etag,
"key": key,
"last_modified": last_modified.isoformat(),
"size": size,
}
if doc_type == DocTypes.PACKAGE:
if not handle or not package_hash or not pointer_file:
raise ValueError("missing required argument for package document")
if (
package_stats
and not isinstance(package_stats, dict)
or isinstance(package_stats, dict)
and any(k not in package_stats for k in ['total_files', 'total_bytes'])
):
raise ValueError("Malformed package_stats")
body.update({
"_id": f"{handle}:{package_hash}",
"handle": handle,
"hash": package_hash,
"metadata": metadata,
"pointer_file": pointer_file,
"tags": ",".join(tags)
})
if package_stats:
body.update({
"package_stats": package_stats,
})
elif doc_type == DocTypes.OBJECT:
body.update({
# Elastic native keys
"_id": f"{key}:{version_id}",
# TODO: remove this field from ES in /enterprise (now deprecated and unused)
# here we explicitly drop the comment
"comment": "",
"content": text, # field for full-text search
"event": event_type,
"ext": ext,
# TODO: remove this field from ES in /enterprise (now deprecated and unused)
"meta_text": "",
"target": "",
"updated": datetime.utcnow().isoformat(),
"version_id": version_id
})
else:
print(f"Skipping unhandled document type: {doc_type}")
self._append_document(body)
if self.size >= QUEUE_LIMIT_BYTES:
self.send_all()
def _append_document(self, doc):
"""append well-formed documents (used for retry or by append())"""
if doc.get("content"):
# document text dominates memory footprint; OK to neglect the
# small fixed size for the JSON metadata
self.size += min(doc["size"], ELASTIC_LIMIT_BYTES)
self.queue.append(doc)
def send_all(self):
"""flush self.queue in 1-2 bulk calls"""
if not self.queue:
return
elastic_host = os.environ["ES_HOST"]
session = boto3.session.Session()
credentials = session.get_credentials().get_frozen_credentials()
awsauth = AWSRequestsAuth(
# These environment variables are automatically set by Lambda
aws_access_key=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
aws_token=credentials.token,
aws_host=elastic_host,
aws_region=session.region_name,
aws_service="es"
)
elastic = Elasticsearch(
hosts=[{"host": elastic_host, "port": 443}],
http_auth=awsauth,
max_backoff=get_time_remaining(self.context) if self.context else MAX_BACKOFF,
# Give ES time to respond when under load
timeout=ELASTIC_TIMEOUT,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
# For response format see
# https://www.elastic.co/guide/en/elasticsearch/reference/6.7/docs-bulk.html
# (We currently use Elastic 6.7 per quiltdata/deployment search.py)
# note that `elasticsearch` post-processes this response
_, errors = bulk_send(elastic, self.queue)
if errors:
id_to_doc = {d["_id"]: d for d in self.queue}
send_again = []
for error in errors:
# retry index and delete errors
if "index" in error or "delete" in error:
if "index" in error:
inner = error["index"]
if "delete" in error:
inner = error["delete"]
if "_id" in inner:
doc = id_to_doc[inner["_id"]]
# Always retry the source document if we can identify it.
# This catches temporary 403 on index write blocks & other
# transient issues.
send_again.append(doc)
# retry the entire batch
else:
# Unclear what would cause an error that's neither index nor delete
# but if there's an unknown error we need to assume it applies to
# the batch.
send_again = self.queue
# Last retry (though elasticsearch might retry 429s tho)
if send_again:
_, errors = bulk_send(elastic, send_again)
if errors:
raise RetryError(
"Failed to load messages into Elastic on second retry.\n"
f"{_}\nErrors: {errors}\nTo resend:{send_again}"
)
# empty the queue
self.size = 0
self.queue = []
def get_time_remaining(context):
"""returns time remaining in seconds before lambda context is shut down"""
time_remaining = floor(context.get_remaining_time_in_millis()/1000)
if time_remaining < 30:
print(
f"Warning: Lambda function has less than {time_remaining} seconds."
" Consider reducing bulk batch size."
)
return time_remaining
def bulk_send(elastic, list_):
"""make a bulk() call to elastic"""
return bulk(
elastic,
list_,
# Some magic numbers to reduce memory pressure
# e.g. see https://github.com/wagtail/wagtail/issues/4554
chunk_size=100, # max number of documents sent in one chunk
# The stated default is max_chunk_bytes=10485760, but with default
# ES will still return an exception stating that the very
# same request size limit has been exceeded
max_chunk_bytes=CHUNK_LIMIT_BYTES,
# number of retries for 429 (too many requests only)
# all other errors handled by our code
max_retries=RETRY_429,
# we'll process errors on our own
raise_on_error=False,
raise_on_exception=False
)
| 1 | 19,251 | This also makes the exception to be raised on non-dict falsy values which I think was expected. | quiltdata-quilt | py |
@@ -173,7 +173,7 @@ class Dataset(Element):
# In the 1D case the interfaces should not automatically add x-values
# to supplied data
- _auto_indexable_1d = False
+ _auto_indexable_1d = True
# Define a class used to transform Datasets into other Element types
_conversion_interface = DataConversion | 1 | from __future__ import absolute_import
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
import param
from ..dimension import redim
from ..util import dimension_range, unique_iterator
from .interface import Interface, iloc, ndloc
from .array import ArrayInterface
from .dictionary import DictInterface
from .grid import GridInterface
from .multipath import MultiInterface # noqa (API import)
from .image import ImageInterface # noqa (API import)
datatypes = ['dictionary', 'grid']
try:
import pandas as pd # noqa (Availability import)
from .pandas import PandasInterface
datatypes = ['dataframe', 'dictionary', 'grid', 'ndelement', 'array']
DFColumns = PandasInterface
except ImportError:
pass
except Exception as e:
param.main.warning('Pandas interface failed to import with '
'following error: %s' % e)
try:
import iris # noqa (Availability import)
from .iris import CubeInterface # noqa (Conditional API import)
datatypes.append('cube')
except ImportError:
pass
except Exception as e:
param.main.warning('Iris interface failed to import with '
'following error: %s' % e)
try:
import xarray # noqa (Availability import)
from .xarray import XArrayInterface # noqa (Conditional API import)
datatypes.append('xarray')
except ImportError:
pass
try:
from .dask import DaskInterface # noqa (Conditional API import)
datatypes.append('dask')
except ImportError:
pass
if 'array' not in datatypes:
datatypes.append('array')
from ..dimension import Dimension, process_dimensions
from ..element import Element
from ..ndmapping import OrderedDict
from ..spaces import HoloMap, DynamicMap
from .. import util
class DataConversion(object):
"""
DataConversion is a very simple container object which can be
given an existing Dataset Element and provides methods to convert
the Dataset into most other Element types.
"""
def __init__(self, element):
self._element = element
def __call__(self, new_type, kdims=None, vdims=None, groupby=None,
sort=False, **kwargs):
"""
Generic conversion method for Dataset based Element
types. Supply the Dataset Element type to convert to and
optionally the key dimensions (kdims), value dimensions
(vdims) and the dimensions. to group over. Converted Columns
can be automatically sorted via the sort option and kwargs can
be passed through.
"""
if 'mdims' in kwargs:
if groupby:
raise ValueError('Cannot supply both mdims and groupby')
else:
self._element.warning("'mdims' keyword has been renamed "
"to 'groupby'; the name mdims is "
"deprecated and will be removed "
"after version 1.7.")
groupby = kwargs.pop('mdims')
element_params = new_type.params()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
if isinstance(kdim_param.bounds[1], int):
ndim = min([kdim_param.bounds[1], len(kdim_param.default)])
else:
ndim = None
nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None
if kdims is None:
kd_filter = groupby or []
if not isinstance(kd_filter, list):
kd_filter = [groupby]
kdims = [kd for kd in self._element.kdims if kd not in kd_filter][:ndim]
elif kdims and not isinstance(kdims, list): kdims = [kdims]
if vdims is None:
vdims = [d for d in self._element.vdims if d not in kdims][:nvdim]
if vdims and not isinstance(vdims, list): vdims = [vdims]
# Checks Element type supports dimensionality
type_name = new_type.__name__
for dim_type, dims in (('kdims', kdims), ('vdims', vdims)):
min_d, max_d = new_type.params(dim_type).bounds
if ((min_d is not None and len(dims) < min_d) or
(max_d is not None and len(dims) > max_d)):
raise ValueError("%s %s must be between length %s and %s." %
(type_name, dim_type, min_d, max_d))
if groupby is None:
groupby = [d for d in self._element.kdims if d not in kdims+vdims]
elif groupby and not isinstance(groupby, list):
groupby = [groupby]
if self._element.interface.gridded:
dropped_kdims = [kd for kd in self._element.kdims if kd not in groupby+kdims]
if dropped_kdims:
selected = self._element.reindex(groupby+kdims, vdims)
else:
selected = self._element
else:
selected = self._element.reindex(groupby+kdims, vdims)
params = {'kdims': [selected.get_dimension(kd, strict=True) for kd in kdims],
'vdims': [selected.get_dimension(vd, strict=True) for vd in vdims],
'label': selected.label}
if selected.group != selected.params()['group'].default:
params['group'] = selected.group
params.update(kwargs)
if len(kdims) == selected.ndims or not groupby:
element = new_type(selected, **params)
return element.sort() if sort else element
group = selected.groupby(groupby, container_type=HoloMap,
group_type=new_type, **params)
if sort:
return group.map(lambda x: x.sort(), [new_type])
else:
return group
class Dataset(Element):
"""
Dataset provides a general baseclass for Element types that
contain structured data and supports a range of data formats.
The Dataset class supports various methods offering a consistent way
of working with the stored data regardless of the storage format
used. These operations include indexing, selection and various ways
of aggregating or collapsing the data with a supplied function.
"""
datatype = param.List(datatypes,
doc=""" A priority list of the data types to be used for storage
on the .data attribute. If the input supplied to the element
constructor cannot be put into the requested format, the next
format listed will be used until a suitable format is found (or
the data fails to be understood).""")
group = param.String(default='Dataset', constant=True)
# In the 1D case the interfaces should not automatically add x-values
# to supplied data
_auto_indexable_1d = False
# Define a class used to transform Datasets into other Element types
_conversion_interface = DataConversion
# Whether the key dimensions are specified as bins
_binned = False
_vdim_reductions = {}
_kdim_reductions = {}
def __init__(self, data, kdims=None, vdims=None, **kwargs):
if isinstance(data, Element):
pvals = util.get_param_values(data)
kwargs.update([(l, pvals[l]) for l in ['group', 'label']
if l in pvals and l not in kwargs])
kwargs.update(process_dimensions(kdims, vdims))
kdims, vdims = kwargs.get('kdims'), kwargs.get('vdims')
validate_vdims = kwargs.pop('_validate_vdims', True)
initialized = Interface.initialize(type(self), data, kdims, vdims,
datatype=kwargs.get('datatype'))
(data, self.interface, dims, extra_kws) = initialized
super(Dataset, self).__init__(data, **dict(kwargs, **dict(dims, **extra_kws)))
self.interface.validate(self, validate_vdims)
self.redim = redim(self, mode='dataset')
def closest(self, coords=[], **kwargs):
"""
Given a single coordinate or multiple coordinates as
a tuple or list of tuples or keyword arguments matching
the dimension closest will find the closest actual x/y
coordinates. Different Element types should implement this
appropriately depending on the space they represent, if the
Element does not support snapping raise NotImplementedError.
"""
if self.ndims > 1:
raise NotImplementedError("Closest method currently only "
"implemented for 1D Elements")
if kwargs:
if len(kwargs) > 1:
raise NotImplementedError("Closest method currently only "
"supports 1D indexes")
samples = list(kwargs.values())[0]
coords = samples if isinstance(samples, list) else [samples]
xs = self.dimension_values(0)
if xs.dtype.kind in 'SO':
raise NotImplementedError("Closest only supported for numeric types")
idxs = [np.argmin(np.abs(xs-coord)) for coord in coords]
return [xs[idx] for idx in idxs]
def sort(self, by=[], reverse=False):
"""
Sorts the data by the values along the supplied dimensions.
"""
if not by: by = self.kdims
if not isinstance(by, list): by = [by]
sorted_columns = self.interface.sort(self, by, reverse)
return self.clone(sorted_columns)
def range(self, dim, data_range=True):
"""
Computes the range of values along a supplied dimension, taking
into account the range and soft_range defined on the Dimension
object.
"""
dim = self.get_dimension(dim)
if dim is None:
return (None, None)
elif all(v is not None and np.isfinite(v) for v in dim.range):
return dim.range
elif dim in self.dimensions() and data_range and len(self):
lower, upper = self.interface.range(self, dim)
else:
lower, upper = (np.NaN, np.NaN)
return dimension_range(lower, upper, dim)
def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):
"""
Create a new object with an additional key dimensions. Requires
the dimension name or object, the desired position in the key
dimensions and a key value scalar or sequence of the same length
as the existing keys.
"""
if isinstance(dimension, (util.basestring, tuple)):
dimension = Dimension(dimension)
if dimension.name in self.kdims:
raise Exception('{dim} dimension already defined'.format(dim=dimension.name))
if vdim:
dims = self.vdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(vdims=dims)
dim_pos += self.ndims
else:
dims = self.kdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(kdims=dims)
if issubclass(self.interface, ArrayInterface) and np.asarray(dim_val).dtype != self.data.dtype:
element = self.clone(datatype=['pandas', 'dictionary'])
data = element.interface.add_dimension(element, dimension, dim_pos, dim_val, vdim)
else:
data = self.interface.add_dimension(self, dimension, dim_pos, dim_val, vdim)
return self.clone(data, **dimensions)
def select(self, selection_specs=None, **selection):
"""
Allows selecting data by the slices, sets and scalar values
along a particular dimension. The indices should be supplied as
keywords mapping between the selected dimension and
value. Additionally selection_specs (taking the form of a list
of type.group.label strings, types or functions) may be
supplied, which will ensure the selection is only applied if the
specs match the selected object.
"""
selection = {dim: sel for dim, sel in selection.items()
if dim in self.dimensions()+['selection_mask']}
if (selection_specs and not any(self.matches(sp) for sp in selection_specs)
or not selection):
return self
data = self.interface.select(self, **selection)
if np.isscalar(data):
return data
else:
return self.clone(data)
def reindex(self, kdims=None, vdims=None):
"""
Create a new object with a re-ordered set of dimensions. Allows
converting key dimensions to value dimensions and vice versa.
"""
if kdims is None:
key_dims = [d for d in self.kdims if not vdims or d not in vdims]
else:
key_dims = [self.get_dimension(k, strict=True) for k in kdims]
new_type = None
if vdims is None:
val_dims = [d for d in self.vdims if not kdims or d not in kdims]
else:
val_dims = [self.get_dimension(v, strict=True) for v in vdims]
new_type = self._vdim_reductions.get(len(val_dims), type(self))
data = self.interface.reindex(self, key_dims, val_dims)
return self.clone(data, kdims=key_dims, vdims=val_dims,
new_type=new_type)
def __getitem__(self, slices):
"""
Allows slicing and selecting values in the Dataset object.
Supports multiple indexing modes:
(1) Slicing and indexing along the values of each dimension
in the columns object using either scalars, slices or
sets of values.
(2) Supplying the name of a dimension as the first argument
will return the values along that dimension as a numpy
array.
(3) Slicing of all key dimensions and selecting a single
value dimension by name.
(4) A boolean array index matching the length of the Dataset
object.
"""
slices = util.process_ellipses(self, slices, vdim_selection=True)
if isinstance(slices, np.ndarray) and slices.dtype.kind == 'b':
if not len(slices) == len(self):
raise IndexError("Boolean index must match length of sliced object")
return self.clone(self.select(selection_mask=slices))
elif slices in [(), Ellipsis]:
return self
if not isinstance(slices, tuple): slices = (slices,)
value_select = None
if len(slices) == 1 and slices[0] in self.dimensions():
return self.dimension_values(slices[0])
elif len(slices) == self.ndims+1 and slices[self.ndims] in self.dimensions():
selection = dict(zip(self.dimensions('key', label=True), slices))
value_select = slices[self.ndims]
elif len(slices) == self.ndims+1 and isinstance(slices[self.ndims],
(Dimension,str)):
raise IndexError("%r is not an available value dimension" % slices[self.ndims])
else:
selection = dict(zip(self.dimensions(label=True), slices))
data = self.select(**selection)
if value_select:
if data.shape[0] == 1:
return data[value_select][0]
else:
return data.reindex(vdims=[value_select])
return data
def sample(self, samples=[], closest=True, **kwargs):
"""
Allows sampling of Dataset as an iterator of coordinates
matching the key dimensions, returning a new object containing
just the selected samples. Alternatively may supply kwargs
to sample a coordinate on an object. By default it will attempt
to snap to the nearest coordinate if the Element supports it,
snapping may be disabled with the closest argument.
"""
if kwargs and samples:
raise Exception('Supply explicit list of samples or kwargs, not both.')
elif kwargs:
sample = [slice(None) for _ in range(self.ndims)]
for dim, val in kwargs.items():
sample[self.get_dimension_index(dim)] = val
samples = [tuple(sample)]
# Note: Special handling sampling of gridded 2D data as Curve
# may be replaced with more general handling
# see https://github.com/ioam/holoviews/issues/1173
from ...element import Table, Curve
if len(samples) == 1:
sel = {kd.name: s for kd, s in zip(self.kdims, samples[0])}
dims = [kd for kd, v in sel.items() if not np.isscalar(v)]
selection = self.select(**sel)
# If a 1D cross-section of 2D space return Curve
if self.interface.gridded and self.ndims == 2 and len(dims) == 1:
new_type = Curve
kdims = [self.get_dimension(kd) for kd in dims]
else:
new_type = Table
kdims = self.kdims
if np.isscalar(selection):
selection = [samples[0]+(selection,)]
else:
selection = tuple(selection.columns(kdims+self.vdims).values())
datatype = list(util.unique_iterator(self.datatype+['dataframe', 'dict']))
return self.clone(selection, kdims=kdims, new_type=new_type,
datatype=datatype)
lens = set(len(util.wrap_tuple(s)) for s in samples)
if len(lens) > 1:
raise IndexError('Sample coordinates must all be of the same length.')
if closest:
try:
samples = self.closest(samples)
except NotImplementedError:
pass
samples = [util.wrap_tuple(s) for s in samples]
return self.clone(self.interface.sample(self, samples), new_type=Table)
def reduce(self, dimensions=[], function=None, spreadfn=None, **reduce_map):
"""
Allows reducing the values along one or more key dimension with
the supplied function. The dimensions may be supplied as a list
and a function to apply or a mapping between the dimensions and
functions to apply along each dimension.
"""
if any(dim in self.vdims for dim in dimensions):
raise Exception("Reduce cannot be applied to value dimensions")
function, dims = self._reduce_map(dimensions, function, reduce_map)
dims = [d for d in self.kdims if d not in dims]
return self.aggregate(dims, function, spreadfn)
def aggregate(self, dimensions=None, function=None, spreadfn=None, **kwargs):
"""
Aggregates over the supplied key dimensions with the defined
function.
"""
if function is None:
raise ValueError("The aggregate method requires a function to be specified")
if dimensions is None: dimensions = self.kdims
elif not isinstance(dimensions, list): dimensions = [dimensions]
kdims = [self.get_dimension(d, strict=True) for d in dimensions]
if not len(self):
if spreadfn:
spread_name = spreadfn.__name__
vdims = [d for vd in self.vdims for d in [vd, vd('_'.join([vd.name, spread_name]))]]
else:
vdims = self.vdims
return self.clone([], kdims=kdims, vdims=vdims)
aggregated = self.interface.aggregate(self, kdims, function, **kwargs)
aggregated = self.interface.unpack_scalar(self, aggregated)
ndims = len(dimensions)
min_d, max_d = self.params('kdims').bounds
generic_type = (min_d is not None and ndims < min_d) or (max_d is not None and ndims > max_d)
vdims = self.vdims
if spreadfn:
error = self.interface.aggregate(self, dimensions, spreadfn)
spread_name = spreadfn.__name__
ndims = len(vdims)
error = self.clone(error, kdims=kdims, new_type=Dataset)
combined = self.clone(aggregated, kdims=kdims, new_type=Dataset)
for i, d in enumerate(vdims):
dim = d('_'.join([d.name, spread_name]))
dvals = error.dimension_values(d, flat=False)
combined = combined.add_dimension(dim, ndims+i, dvals, True)
return combined.clone(new_type=Dataset if generic_type else type(self))
if np.isscalar(aggregated):
return aggregated
else:
try:
# Should be checking the dimensions declared on the element are compatible
return self.clone(aggregated, kdims=kdims, vdims=vdims)
except:
datatype = self.params('datatype').default
return self.clone(aggregated, kdims=kdims, vdims=vdims,
new_type=Dataset if generic_type else None,
datatype=datatype)
def groupby(self, dimensions=[], container_type=HoloMap, group_type=None,
dynamic=False, **kwargs):
"""Return the results of a groupby operation over the specified
dimensions as an object of type container_type (expected to be
dictionary-like).
Keys vary over the columns (dimensions) and the corresponding
values are collections of group_type (e.g an Element, list, tuple)
constructed with kwargs (if supplied).
If dynamic is requested container_type is automatically set to
a DynamicMap, allowing dynamic exploration of large
datasets. If the data does not represent a full cartesian grid
of the requested dimensions some Elements will be empty.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
if not len(dimensions): dimensions = self.dimensions('key', True)
if group_type is None: group_type = type(self)
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
dim_names = [d.name for d in dimensions]
if dynamic:
group_dims = [kd for kd in self.kdims if kd not in dimensions]
kdims = [self.get_dimension(d) for d in kwargs.pop('kdims', group_dims)]
drop_dim = len(group_dims) != len(kdims)
group_kwargs = dict(util.get_param_values(self), kdims=kdims)
group_kwargs.update(kwargs)
def load_subset(*args):
constraint = dict(zip(dim_names, args))
group = self.select(**constraint)
if np.isscalar(group):
return group_type(([group],), group=self.group,
label=self.label, vdims=self.vdims)
data = group.reindex(kdims)
if drop_dim and self.interface.gridded:
data = data.columns()
return group_type(data, **group_kwargs)
dynamic_dims = [d(values=list(self.interface.values(self, d.name, False)))
for d in dimensions]
return DynamicMap(load_subset, kdims=dynamic_dims)
return self.interface.groupby(self, dim_names, container_type,
group_type, **kwargs)
def __len__(self):
"""
Returns the number of rows in the Dataset object.
"""
return self.interface.length(self)
def __nonzero__(self):
return self.interface.nonzero(self)
__bool__ = __nonzero__
@property
def shape(self):
"Returns the shape of the data."
return self.interface.shape(self)
def dimension_values(self, dim, expanded=True, flat=True):
"""
Returns the values along a particular dimension. If unique
values are requested will return only unique values.
"""
dim = self.get_dimension(dim, strict=True)
return self.interface.values(self, dim, expanded, flat)
def get_dimension_type(self, dim):
"""
Returns the specified Dimension type if specified or
if the dimension_values types are consistent otherwise
None is returned.
"""
dim_obj = self.get_dimension(dim)
if dim_obj and dim_obj.type is not None:
return dim_obj.type
return self.interface.dimension_type(self, dim_obj)
def dframe(self, dimensions=None):
"""
Returns the data in the form of a DataFrame. Supplying a list
of dimensions filters the dataframe. If the data is already
a DataFrame a copy is returned.
"""
if dimensions:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
return self.interface.dframe(self, dimensions)
def columns(self, dimensions=None):
if dimensions is None:
dimensions = self.dimensions()
else:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])
@property
def to(self):
"""
Property to create a conversion interface with methods to
convert to other Element types.
"""
return self._conversion_interface(self)
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
If shared_data is set to True and no data explicitly supplied,
the clone will share data with the original. May also supply
a new_type, which will inherit all shared parameters.
"""
if 'datatype' not in overrides:
datatypes = [self.interface.datatype] + self.datatype
overrides['datatype'] = list(unique_iterator(datatypes))
return super(Dataset, self).clone(data, shared_data, new_type, *args, **overrides)
@property
def iloc(self):
"""
Returns an iloc object providing a convenient interface to
slice and index into the Dataset using row and column indices.
Allow selection by integer index, slice and list of integer
indices and boolean arrays.
Examples:
* Index the first row and column:
dataset.iloc[0, 0]
* Select rows 1 and 2 with a slice:
dataset.iloc[1:3, :]
* Select with a list of integer coordinates:
dataset.iloc[[0, 2, 3]]
"""
return iloc(self)
@property
def ndloc(self):
"""
Returns an ndloc object providing nd-array like indexing for
gridded datasets. Follows NumPy array indexing conventions,
allowing for indexing, slicing and selecting a list of indices
on multi-dimensional arrays using integer indices. The order
of array indices is inverted relative to the Dataset key
dimensions, e.g. an Image with key dimensions 'x' and 'y' can
be indexed with ``image.ndloc[iy, ix]``, where ``iy`` and
``ix`` are integer indices along the y and x dimensions.
Examples:
* Index value in 2D array:
dataset.ndloc[3, 1]
* Slice along y-axis of 2D array:
dataset.ndloc[2:5, :]
* Vectorized (non-orthogonal) indexing along x- and y-axes:
dataset.ndloc[[1, 2, 3], [0, 2, 3]]
"""
return ndloc(self)
# Aliases for pickle backward compatibility
Columns = Dataset
ArrayColumns = ArrayInterface
DictColumns = DictInterface
GridColumns = GridInterface
| 1 | 20,077 | Is this correct? The auto indexing behavior caused problems fixed in a recently merged PR... | holoviz-holoviews | py |
@@ -6,9 +6,9 @@ setup(
py_modules=["rtool"],
install_requires=[
"click>=6.2, <7.0",
- "twine>=1.6.5, <1.9",
+ "twine>=1.6.5, <1.10",
"pysftp==0.2.8",
- "cryptography>=1.6, <1.7",
+ "cryptography>=1.6, <1.9",
],
entry_points={
"console_scripts": [ | 1 | from setuptools import setup
setup(
name='mitmproxy-rtool',
version="1.0",
py_modules=["rtool"],
install_requires=[
"click>=6.2, <7.0",
"twine>=1.6.5, <1.9",
"pysftp==0.2.8",
"cryptography>=1.6, <1.7",
],
entry_points={
"console_scripts": [
"rtool=rtool:cli",
],
},
)
| 1 | 13,258 | Hmm, didn't we just find out that 1.6 *does NOT* work? We should bump the minimum as well. | mitmproxy-mitmproxy | py |
@@ -0,0 +1,13 @@
+#include <iostream>
+#include <rai/lib/utility.hpp>
+
+void release_assert_internal (bool check, const char * check_expr, const char * file, unsigned int line)
+{
+ if (check)
+ {
+ return;
+ }
+
+ std::cerr << "Assertion (" << check_expr << ") failed " << file << ":" << line << std::endl;
+ abort();
+} | 1 | 1 | 14,071 | Will calling `abort()` cause problems for our test suite? | nanocurrency-nano-node | cpp |
|
@@ -292,6 +292,11 @@ ast_result_t pass_expr(ast_t** astp, pass_opt_t* options)
case TK_ADDRESS: r = expr_addressof(options, ast); break;
case TK_DIGESTOF: r = expr_digestof(options, ast); break;
+ case TK_OBJECT:
+ if(!expr_object(options, astp))
+ return AST_FATAL;
+ break;
+
case TK_LAMBDA:
if(!expr_lambda(options, astp))
return AST_FATAL; | 1 | #include "expr.h"
#include "../expr/literal.h"
#include "../expr/reference.h"
#include "../expr/operator.h"
#include "../expr/postfix.h"
#include "../expr/call.h"
#include "../expr/control.h"
#include "../expr/match.h"
#include "../expr/array.h"
#include "../expr/ffi.h"
#include "../expr/lambda.h"
#include "ponyassert.h"
static bool is_numeric_primitive(const char* name)
{
if(name == stringtab("U8") ||
name == stringtab("I8") ||
name == stringtab("U16") ||
name == stringtab("I16") ||
name == stringtab("U32") ||
name == stringtab("I32") ||
name == stringtab("U64") ||
name == stringtab("I64") ||
name == stringtab("U128") ||
name == stringtab("I128") ||
name == stringtab("ULong") ||
name == stringtab("ILong") ||
name == stringtab("USize") ||
name == stringtab("ISize") ||
name == stringtab("F32") ||
name == stringtab("F64"))
return true;
return false;
}
bool is_result_needed(ast_t* ast)
{
ast_t* parent = ast_parent(ast);
switch(ast_id(parent))
{
case TK_SEQ:
// If we're not the last element, we don't need the result.
if(ast_sibling(ast) != NULL)
return false;
return is_result_needed(parent);
case TK_IF:
case TK_IFDEF:
case TK_WHILE:
case TK_MATCH:
// Condition needed, body/else needed only if parent needed.
if(ast_child(parent) == ast)
return true;
return is_result_needed(parent);
case TK_REPEAT:
// Cond needed, body/else needed only if parent needed.
if(ast_childidx(parent, 1) == ast)
return true;
return is_result_needed(parent);
case TK_CASE:
// Pattern, guard needed, body needed only if parent needed
if(ast_childidx(parent, 2) != ast)
return true;
return is_result_needed(parent);
case TK_CASES:
case TK_TRY:
case TK_TRY_NO_CHECK:
case TK_RECOVER:
// Only if parent needed.
return is_result_needed(parent);
case TK_NEW:
{
// Only if it is a numeric primitive constructor.
ast_t* type = ast_childidx(parent, 4);
pony_assert(ast_id(type) == TK_NOMINAL);
const char* pkg_name = ast_name(ast_child(type));
const char* type_name = ast_name(ast_childidx(type, 1));
if(pkg_name == stringtab("$0")) // Builtin package.
return is_numeric_primitive(type_name);
return false;
}
case TK_BE:
// Result of a behaviour isn't needed.
return false;
case TK_BECHAIN:
case TK_FUNCHAIN:
// Result of a chained method isn't needed.
return false;
default: {}
}
// All others needed.
return true;
}
bool is_method_result(typecheck_t* t, ast_t* ast)
{
if(ast == t->frame->method_body)
return true;
ast_t* parent = ast_parent(ast);
switch(ast_id(parent))
{
case TK_SEQ:
// More expressions in a sequence means we're not the result.
if(ast_sibling(ast) != NULL)
return false;
break;
case TK_IF:
case TK_WHILE:
case TK_MATCH:
// The condition is not the result.
if(ast_child(parent) == ast)
return false;
break;
case TK_REPEAT:
// The condition is not the result.
if(ast_childidx(parent, 1) == ast)
return false;
break;
case TK_CASE:
// The pattern and the guard are not the result.
if(ast_childidx(parent, 2) != ast)
return false;
break;
case TK_CASES:
case TK_RECOVER:
// These can be results.
break;
case TK_TRY:
case TK_TRY_NO_CHECK:
// The finally block is not the result.
if(ast_childidx(parent, 2) == ast)
return false;
break;
default:
// Other expressions are not results.
return false;
}
return is_method_result(t, parent);
}
bool is_method_return(typecheck_t* t, ast_t* ast)
{
ast_t* parent = ast_parent(ast);
if(ast_id(parent) == TK_SEQ)
{
parent = ast_parent(parent);
if(ast_id(parent) == TK_RETURN)
return true;
}
return is_method_result(t, ast);
}
bool is_typecheck_error(ast_t* type)
{
if(type == NULL)
return true;
if(ast_id(type) == TK_INFERTYPE || ast_id(type) == TK_ERRORTYPE)
return true;
return false;
}
bool is_control_type(ast_t* type)
{
if(type == NULL)
return true;
switch(ast_id(type))
{
case TK_IF:
case TK_TRY:
case TK_MATCH:
case TK_CASES:
case TK_WHILE:
case TK_REPEAT:
case TK_BREAK:
case TK_CONTINUE:
case TK_RETURN:
case TK_ERROR:
case TK_COMPILE_ERROR:
return true;
default: {}
}
return false;
}
ast_result_t pass_pre_expr(ast_t** astp, pass_opt_t* options)
{
(void)options;
ast_t* ast = *astp;
switch(ast_id(ast))
{
case TK_USE:
// Don't look in use commands to avoid false type errors from the guard
return AST_IGNORE;
default: {}
}
return AST_OK;
}
ast_result_t pass_expr(ast_t** astp, pass_opt_t* options)
{
ast_t* ast = *astp;
bool r = true;
switch(ast_id(ast))
{
case TK_PRIMITIVE:
case TK_STRUCT:
case TK_CLASS:
case TK_ACTOR:
case TK_TRAIT:
case TK_INTERFACE: r = expr_provides(options, ast); break;
case TK_NOMINAL: r = expr_nominal(options, astp); break;
case TK_FVAR:
case TK_FLET:
case TK_EMBED: r = expr_field(options, ast); break;
case TK_PARAM: r = expr_param(options, ast); break;
case TK_NEW:
case TK_BE:
case TK_FUN: r = expr_fun(options, ast); break;
case TK_SEQ: r = expr_seq(options, ast); break;
case TK_VAR:
case TK_LET: r = expr_local(options, ast); break;
case TK_BREAK: r = expr_break(options, ast); break;
case TK_CONTINUE: r = expr_continue(options, ast); break;
case TK_RETURN: r = expr_return(options, ast); break;
case TK_IS:
case TK_ISNT: r = expr_identity(options, ast); break;
case TK_ASSIGN: r = expr_assign(options, ast); break;
case TK_CONSUME: r = expr_consume(options, ast); break;
case TK_RECOVER: r = expr_recover(options, ast); break;
case TK_DOT: r = expr_dot(options, astp); break;
case TK_TILDE: r = expr_tilde(options, astp); break;
case TK_CHAIN: r = expr_chain(options, astp); break;
case TK_QUALIFY: r = expr_qualify(options, astp); break;
case TK_CALL: r = expr_call(options, astp); break;
case TK_IFDEF:
case TK_IF: r = expr_if(options, ast); break;
case TK_WHILE: r = expr_while(options, ast); break;
case TK_REPEAT: r = expr_repeat(options, ast); break;
case TK_TRY_NO_CHECK:
case TK_TRY: r = expr_try(options, ast); break;
case TK_MATCH: r = expr_match(options, ast); break;
case TK_CASES: r = expr_cases(options, ast); break;
case TK_CASE: r = expr_case(options, ast); break;
case TK_MATCH_CAPTURE:
r = expr_match_capture(options, ast); break;
case TK_TUPLE: r = expr_tuple(options, ast); break;
case TK_ARRAY: r = expr_array(options, astp); break;
case TK_REFERENCE: r = expr_reference(options, astp); break;
case TK_THIS: r = expr_this(options, ast); break;
case TK_TRUE:
case TK_FALSE: r = expr_literal(options, ast, "Bool"); break;
case TK_ERROR: r = expr_error(options, ast); break;
case TK_COMPILE_ERROR:
r = expr_compile_error(options, ast); break;
case TK_COMPILE_INTRINSIC:
r = expr_compile_intrinsic(options, ast); break;
case TK_LOCATION: r = expr_location(options, ast); break;
case TK_ADDRESS: r = expr_addressof(options, ast); break;
case TK_DIGESTOF: r = expr_digestof(options, ast); break;
case TK_LAMBDA:
if(!expr_lambda(options, astp))
return AST_FATAL;
break;
case TK_INT:
// Integer literals can be integers or floats
make_literal_type(ast);
break;
case TK_FLOAT:
make_literal_type(ast);
break;
case TK_STRING:
if(ast_id(ast_parent(ast)) == TK_PACKAGE)
return AST_OK;
r = expr_literal(options, ast, "String");
break;
case TK_FFICALL:
r = expr_ffi(options, ast);
default: {}
}
if(!r)
{
pony_assert(errors_get_count(options->check.errors) > 0);
return AST_ERROR;
}
// Can't use ast here, it might have changed
symtab_t* symtab = ast_get_symtab(*astp);
if(symtab != NULL && !symtab_check_all_defined(symtab, options->check.errors))
return AST_ERROR;
return AST_OK;
}
| 1 | 10,042 | for my own edificaton, what's this? | ponylang-ponyc | c |
@@ -248,13 +248,14 @@ func (sf *factory) NewWorkingSet() (WorkingSet, error) {
// Commit persists all changes in RunActions() into the DB
func (sf *factory) Commit(ws WorkingSet) error {
+ if ws == nil {
+ return nil
+ }
sf.mutex.Lock()
defer sf.mutex.Unlock()
- if ws != nil {
- if sf.currentChainHeight != ws.Version() {
- // another working set with correct version already committed, do nothing
- return nil
- }
+ if sf.currentChainHeight != ws.Version() {
+ // another working set with correct version already committed, do nothing
+ return nil
}
if err := ws.Commit(); err != nil {
return errors.Wrap(err, "failed to commit working set") | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package state
import (
"context"
"math/big"
"sync"
"github.com/boltdb/bolt"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/trie"
)
var (
// ErrNotEnoughBalance is the error that the balance is not enough
ErrNotEnoughBalance = errors.New("not enough balance")
// ErrStateNotExist is the error that the stat does not exist
ErrStateNotExist = errors.New("state does not exist")
// ErrAccountCollision is the error that the account already exists
ErrAccountCollision = errors.New("account already exists")
)
const (
// CurrentHeightKey indicates the key of current factory height in underlying DB
CurrentHeightKey = "currentHeight"
// AccountTrieRootKey indicates the key of accountTrie root hash in underlying DB
AccountTrieRootKey = "accountTrieRoot"
)
type (
// Factory defines an interface for managing states
Factory interface {
lifecycle.StartStopper
// Accounts
Balance(string) (*big.Int, error)
Nonce(string) (uint64, error) // Note that Nonce starts with 1.
AccountState(string) (*Account, error)
RootHash() hash.Hash32B
Height() (uint64, error)
NewWorkingSet() (WorkingSet, error)
Commit(WorkingSet) error
// Candidate pool
CandidatesByHeight(uint64) ([]*Candidate, error)
State(hash.PKHash, State) (State, error)
AddActionHandlers(...ActionHandler)
}
// factory implements StateFactory interface, tracks changes to account/contract and batch-commits to DB
factory struct {
lifecycle lifecycle.Lifecycle
mutex sync.RWMutex
currentChainHeight uint64
numCandidates uint
rootHash hash.Hash32B // new root hash after running executions in this block
accountTrie trie.Trie // global state trie
dao db.KVStore // the underlying DB for account/contract storage
actionHandlers []ActionHandler // the handlers to handle actions
}
// ActionHandler is the interface for the action handlers. For each incoming action, the assembled actions will be
// called one by one to process it. ActionHandler implementation is supposed to parse the sub-type of the action to
// decide if it wants to handle this action or not.
ActionHandler interface {
Handle(context.Context, action.Action, WorkingSet) (*action.Receipt, error)
}
)
// FactoryOption sets Factory construction parameter
type FactoryOption func(*factory, config.Config) error
// PrecreatedTrieDBOption uses pre-created trie DB for state factory
func PrecreatedTrieDBOption(kv db.KVStore) FactoryOption {
return func(sf *factory, cfg config.Config) (err error) {
if kv == nil {
return errors.New("Invalid empty trie db")
}
if err = kv.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start trie db")
}
sf.dao = kv
// get state trie root
if sf.rootHash, err = sf.getRoot(trie.AccountKVNameSpace, AccountTrieRootKey); err != nil {
return errors.Wrap(err, "failed to get accountTrie's root hash from underlying DB")
}
if sf.accountTrie, err = trie.NewTrie(sf.dao, trie.AccountKVNameSpace, sf.rootHash); err != nil {
return errors.Wrap(err, "failed to generate accountTrie from config")
}
return nil
}
}
// DefaultTrieOption creates trie from config for state factory
func DefaultTrieOption() FactoryOption {
return func(sf *factory, cfg config.Config) (err error) {
dbPath := cfg.Chain.TrieDBPath
if len(dbPath) == 0 {
return errors.New("Invalid empty trie db path")
}
trieDB := db.NewBoltDB(dbPath, cfg.DB)
if err = trieDB.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start trie db")
}
sf.dao = trieDB
// get state trie root
if sf.rootHash, err = sf.getRoot(trie.AccountKVNameSpace, AccountTrieRootKey); err != nil {
return errors.Wrap(err, "failed to get accountTrie's root hash from underlying DB")
}
if sf.accountTrie, err = trie.NewTrie(sf.dao, trie.AccountKVNameSpace, sf.rootHash); err != nil {
return errors.Wrap(err, "failed to generate accountTrie from config")
}
return nil
}
}
// InMemTrieOption creates in memory trie for state factory
func InMemTrieOption() FactoryOption {
return func(sf *factory, cfg config.Config) (err error) {
trieDB := db.NewMemKVStore()
if err = trieDB.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start trie db")
}
sf.dao = trieDB
// get state trie root
if sf.rootHash, err = sf.getRoot(trie.AccountKVNameSpace, AccountTrieRootKey); err != nil {
return errors.Wrap(err, "failed to get accountTrie's root hash from underlying DB")
}
if sf.accountTrie, err = trie.NewTrie(sf.dao, trie.AccountKVNameSpace, sf.rootHash); err != nil {
return errors.Wrap(err, "failed to generate accountTrie from config")
}
return nil
}
}
// NewFactory creates a new state factory
func NewFactory(cfg config.Config, opts ...FactoryOption) (Factory, error) {
sf := &factory{
currentChainHeight: 0,
numCandidates: cfg.Chain.NumCandidates,
}
for _, opt := range opts {
if err := opt(sf, cfg); err != nil {
logger.Error().Err(err).Msgf("Failed to execute state factory creation option %p", opt)
return nil, err
}
}
if sf.accountTrie != nil {
sf.lifecycle.Add(sf.accountTrie)
}
return sf, nil
}
func (sf *factory) Start(ctx context.Context) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
return sf.lifecycle.OnStart(ctx)
}
func (sf *factory) Stop(ctx context.Context) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
return sf.lifecycle.OnStop(ctx)
}
// AddActionHandlers adds action handlers to the state factory
func (sf *factory) AddActionHandlers(actionHandlers ...ActionHandler) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
sf.actionHandlers = append(sf.actionHandlers, actionHandlers...)
}
//======================================
// account functions
//======================================
// Balance returns balance
func (sf *factory) Balance(addr string) (*big.Int, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
account, err := sf.accountState(addr)
if err != nil {
return nil, err
}
return account.Balance, nil
}
// Nonce returns the Nonce if the account exists
func (sf *factory) Nonce(addr string) (uint64, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
account, err := sf.accountState(addr)
if err != nil {
return 0, err
}
return account.Nonce, nil
}
// account returns the confirmed account state on the chain
func (sf *factory) AccountState(addr string) (*Account, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.accountState(addr)
}
// RootHash returns the hash of the root node of the state trie
func (sf *factory) RootHash() hash.Hash32B {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.rootHash
}
// Height returns factory's height
func (sf *factory) Height() (uint64, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
height, err := sf.dao.Get(trie.AccountKVNameSpace, []byte(CurrentHeightKey))
if err != nil {
return 0, errors.Wrap(err, "failed to get factory's height from underlying DB")
}
return byteutil.BytesToUint64(height), nil
}
func (sf *factory) NewWorkingSet() (WorkingSet, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return NewWorkingSet(sf.currentChainHeight, sf.dao, sf.rootHash, sf.actionHandlers)
}
// Commit persists all changes in RunActions() into the DB
func (sf *factory) Commit(ws WorkingSet) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
if ws != nil {
if sf.currentChainHeight != ws.Version() {
// another working set with correct version already committed, do nothing
return nil
}
}
if err := ws.Commit(); err != nil {
return errors.Wrap(err, "failed to commit working set")
}
// Update chain height and root
sf.currentChainHeight = ws.Height()
sf.rootHash = ws.RootHash()
if err := sf.accountTrie.SetRoot(sf.rootHash); err != nil {
return errors.Wrap(err, "failed to commit working set")
}
return nil
}
//======================================
// Candidate functions
//======================================
// CandidatesByHeight returns array of Candidates in candidate pool of a given height
func (sf *factory) CandidatesByHeight(height uint64) ([]*Candidate, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
// Load Candidates on the given height from underlying db
candidatesBytes, err := sf.dao.Get(trie.CandidateKVNameSpace, byteutil.Uint64ToBytes(height))
if err != nil {
return []*Candidate{}, errors.Wrapf(err, "failed to get candidates on Height %d", height)
}
var candidates CandidateList
if err := candidates.Deserialize(candidatesBytes); err != nil {
return []*Candidate{}, errors.Wrapf(err, "failed to get candidates on height %d", height)
}
if len(candidates) > int(sf.numCandidates) {
candidates = candidates[:sf.numCandidates]
}
return candidates, nil
}
// State returns a confirmed state in the state factory
func (sf *factory) State(addr hash.PKHash, state State) (State, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.state(addr, state)
}
//======================================
// private trie constructor functions
//======================================
func (sf *factory) getRoot(nameSpace string, key string) (hash.Hash32B, error) {
var trieRoot hash.Hash32B
switch root, err := sf.dao.Get(nameSpace, []byte(key)); errors.Cause(err) {
case nil:
trieRoot = byteutil.BytesTo32B(root)
case bolt.ErrBucketNotFound:
trieRoot = trie.EmptyRoot
default:
return hash.ZeroHash32B, err
}
return trieRoot, nil
}
func (sf *factory) state(addr hash.PKHash, state State) (State, error) {
data, err := sf.accountTrie.Get(addr[:])
if err != nil {
if errors.Cause(err) == trie.ErrNotExist {
return nil, errors.Wrapf(ErrStateNotExist, "state of %x doesn't exist", addr)
}
return nil, errors.Wrapf(err, "error when getting the state of %x", addr)
}
if err := state.Deserialize(data); err != nil {
return nil, errors.Wrapf(err, "error when deserializing state data into %T", state)
}
return state, nil
}
func (sf *factory) accountState(addr string) (*Account, error) {
pkHash, err := iotxaddress.AddressToPKHash(addr)
if err != nil {
return nil, errors.Wrap(err, "error when getting the pubkey hash")
}
var account Account
state, err := sf.state(pkHash, &account)
if err != nil {
return nil, errors.Wrapf(err, "error when loading state of %x", pkHash)
}
accountPtr, ok := state.(*Account)
if !ok {
return nil, errors.New("error when casting state into account")
}
return accountPtr, nil
}
| 1 | 13,303 | make it harmless to commit ws == nil | iotexproject-iotex-core | go |
@@ -59,6 +59,8 @@ var locationsReplacement = regexp.MustCompile(`\$\(locations ([^\)]+)\)`)
var exeReplacement = regexp.MustCompile(`\$\(exe ([^\)]+)\)`)
var outExeReplacement = regexp.MustCompile(`\$\(out_exe ([^\)]+)\)`)
var outReplacement = regexp.MustCompile(`\$\(out_location ([^\)]+)\)`)
+var absOutExeReplacement = regexp.MustCompile(`\$\(abs_out_exe ([^\)]+)\)`)
+var absOutReplacement = regexp.MustCompile(`\$\(abs_out_location ([^\)]+)\)`)
var dirReplacement = regexp.MustCompile(`\$\(dir ([^\)]+)\)`)
var hashReplacement = regexp.MustCompile(`\$\(hash ([^\)]+)\)`)
var workerReplacement = regexp.MustCompile(`^(.*)\$\(worker ([^\)]+)\) *([^&]*)(?: *&& *(.*))?$`) | 1 | // Replacement of sequences in genrule commands.
//
// Genrules can contain certain replacement variables which Please substitutes
// with locations of the actual thing before running.
// The following replacements are currently made:
//
// $(location //path/to:target)
// Expands to the output of the given build rule. The rule can only have one
// output (use $locations if there are multiple).
//
// $(locations //path/to:target)
// Expands to all the outputs (space separated) of the given build rule.
// Equivalent to $(location ...) for rules with a single output.
//
// $(exe //path/to:target)
// Expands to a command to run the output of the given target from within a
// genrule or test directory. For example,
// java -jar path/to/target.jar.
// The rule must be tagged as 'binary'.
//
// $(out_exe //path/to:target)
// Expands to a command to run the output of the given target. For example,
// java -jar plz-out/bin/path/to/target.jar.
// The rule must be tagged as 'binary'.
//
// $(dir //path/to:target)
// Expands to the package directory containing the outputs of the given target.
// Useful for rules that have multiple outputs where you only need to know
// what directory they're in.
//
// $(out_location //path/to:target)
// Expands to a path to the output of the given target, with the preceding plz-out/gen
// or plz-out/bin etc. Useful when these things will be run by a user.
//
// $(worker //path/to:target)
// Indicates that this target will be run by a remote worker process. The following
// arguments are sent to the remote worker.
// This is subject to some additional rules: it must appear initially in the command,
// and if "&&" appears subsequently in the command, that part is run locally after
// the worker has completed. All workers must be listed as tools of the rule.
//
// In general it's a good idea to use these where possible in genrules rather than
// hardcoding specific paths.
package core
import (
"encoding/base64"
"fmt"
"path"
"path/filepath"
"regexp"
"runtime/debug"
"strings"
)
var locationReplacement = regexp.MustCompile(`\$\(location ([^\)]+)\)`)
var locationsReplacement = regexp.MustCompile(`\$\(locations ([^\)]+)\)`)
var exeReplacement = regexp.MustCompile(`\$\(exe ([^\)]+)\)`)
var outExeReplacement = regexp.MustCompile(`\$\(out_exe ([^\)]+)\)`)
var outReplacement = regexp.MustCompile(`\$\(out_location ([^\)]+)\)`)
var dirReplacement = regexp.MustCompile(`\$\(dir ([^\)]+)\)`)
var hashReplacement = regexp.MustCompile(`\$\(hash ([^\)]+)\)`)
var workerReplacement = regexp.MustCompile(`^(.*)\$\(worker ([^\)]+)\) *([^&]*)(?: *&& *(.*))?$`)
// ReplaceSequences replaces escape sequences in the given string.
func ReplaceSequences(state *BuildState, target *BuildTarget, command string) (string, error) {
return replaceSequencesInternal(state, target, command, false)
}
// ReplaceTestSequences replaces escape sequences in the given string when running a test.
func ReplaceTestSequences(state *BuildState, target *BuildTarget, command string) (string, error) {
if command == "" {
// An empty test command implies running the test binary.
return replaceSequencesInternal(state, target, fmt.Sprintf("$(exe :%s)", target.Label.Name), true)
} else if strings.HasPrefix(command, "$(worker") {
_, _, cmd, err := workerAndArgs(state, target, command)
return cmd, err
}
return replaceSequencesInternal(state, target, command, true)
}
// TestWorkerCommand returns the worker & its arguments (if any) for a test, and the command to run for the test itself.
func TestWorkerCommand(state *BuildState, target *BuildTarget) (string, string, string, error) {
return workerAndArgs(state, target, target.GetTestCommand(state))
}
// WorkerCommandAndArgs returns the worker & its command (if any) and subsequent local command for the rule.
func WorkerCommandAndArgs(state *BuildState, target *BuildTarget) (string, string, string, error) {
return workerAndArgs(state, target, target.GetCommand(state))
}
func workerAndArgs(state *BuildState, target *BuildTarget, command string) (string, string, string, error) {
match := workerReplacement.FindStringSubmatch(command)
if match == nil {
cmd, err := ReplaceSequences(state, target, command)
return "", "", cmd, err
} else if match[1] != "" {
panic("$(worker) replacements cannot have any commands preceding them.")
}
cmd1, err := replaceSequencesInternal(state, target, strings.TrimSpace(match[3]), false)
if err != nil {
return "", "", "", err
}
cmd2, err := replaceSequencesInternal(state, target, match[4], false)
return replaceWorkerSequence(state, target, ExpandHomePath(match[2]), true, false, false, true, false, false), cmd1, cmd2, err
}
func replaceSequencesInternal(state *BuildState, target *BuildTarget, command string, test bool) (cmd string, err error) {
// TODO(peterebden): should probably just get rid of all the panics and thread errors around properly.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%s", r)
log.Debug(string(debug.Stack()))
}
}()
cmd = locationReplacement.ReplaceAllStringFunc(command, func(in string) string {
return replaceSequence(state, target, in[11:len(in)-1], false, false, false, false, false, test)
})
cmd = locationsReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[12:len(in)-1], false, true, false, false, false, test)
})
cmd = exeReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[6:len(in)-1], true, false, false, false, false, test)
})
cmd = outReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[15:len(in)-1], false, false, false, true, false, test)
})
cmd = outExeReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[10:len(in)-1], true, false, false, true, false, test)
})
cmd = dirReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[6:len(in)-1], false, true, true, false, false, test)
})
cmd = hashReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[7:len(in)-1], false, true, true, false, true, test)
})
if state.Config.Bazel.Compatibility {
// Bazel allows several obscure Make-style variable expansions.
// Our replacement here is not very principled but should work better than not doing it at all.
cmd = strings.Replace(cmd, "$<", "$SRCS", -1)
cmd = strings.Replace(cmd, "$(<)", "$SRCS", -1)
cmd = strings.Replace(cmd, "$@D", "$TMP_DIR", -1)
cmd = strings.Replace(cmd, "$(@D)", "$TMP_DIR", -1)
cmd = strings.Replace(cmd, "$@", "$OUTS", -1)
cmd = strings.Replace(cmd, "$(@)", "$OUTS", -1)
// It also seemingly allows you to get away with this syntax, which means something
// fairly different in Bash, but never mind.
cmd = strings.Replace(cmd, "$(SRCS)", "$SRCS", -1)
cmd = strings.Replace(cmd, "$(OUTS)", "$OUTS", -1)
}
// We would ideally check for this when doing matches above, but not easy in
// Go since its regular expressions are actually regular and principled.
return strings.Replace(cmd, "\\$", "$", -1), nil
}
// replaceSequence replaces a single escape sequence in a command.
func replaceSequence(state *BuildState, target *BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test bool) string {
if LooksLikeABuildLabel(in) {
label, err := TryParseBuildLabel(in, target.Label.PackageName, target.Label.Subrepo)
if err != nil {
panic(err)
}
return replaceSequenceLabel(state, target, label, in, runnable, multiple, dir, outPrefix, hash, test, true)
}
for _, src := range sourcesOrTools(target, runnable) {
if label := src.Label(); label != nil && src.String() == in {
return replaceSequenceLabel(state, target, *label, in, runnable, multiple, dir, outPrefix, hash, test, false)
} else if runnable && src.String() == in {
return src.String()
}
}
if hash {
return base64.RawURLEncoding.EncodeToString(state.PathHasher.MustHash(path.Join(target.Label.PackageName, in)))
}
if strings.HasPrefix(in, "/") {
return in // Absolute path, probably on a tool or system src.
}
return quote(path.Join(target.Label.PackageName, in))
}
// replaceWorkerSequence is like replaceSequence but for worker commands, which do not
// prefix the target's directory if it's not a build label.
func replaceWorkerSequence(state *BuildState, target *BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test bool) string {
if !LooksLikeABuildLabel(in) {
return in
}
return replaceSequence(state, target, in, runnable, multiple, dir, outPrefix, hash, test)
}
// sourcesOrTools returns either the tools of a target if runnable is true, otherwise its sources.
func sourcesOrTools(target *BuildTarget, runnable bool) []BuildInput {
if runnable {
return target.Tools
}
return target.AllSources()
}
func replaceSequenceLabel(state *BuildState, target *BuildTarget, label BuildLabel, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs bool) string {
// Check this label is a dependency of the target, otherwise it's not allowed.
if label == target.Label { // targets can always use themselves.
return checkAndReplaceSequence(state, target, target, in, runnable, multiple, dir, outPrefix, hash, test, allOutputs, false)
}
deps := target.DependenciesFor(label)
if len(deps) == 0 {
panic(fmt.Sprintf("Rule %s can't use %s; doesn't depend on target %s", target.Label, in, label))
}
// TODO(pebers): this does not correctly handle the case where there are multiple deps here
// (but is better than the previous case where it never worked at all)
return checkAndReplaceSequence(state, target, deps[0], in, runnable, multiple, dir, outPrefix, hash, test, allOutputs, target.IsTool(label))
}
func checkAndReplaceSequence(state *BuildState, target, dep *BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs, tool bool) string {
if allOutputs && !multiple && len(dep.Outputs()) > 1 {
// Label must have only one output.
panic(fmt.Sprintf("Rule %s can't use %s; %s has multiple outputs.", target.Label, in, dep.Label))
} else if runnable && !dep.IsBinary {
panic(fmt.Sprintf("Rule %s can't $(exe %s), it's not executable", target.Label, dep.Label))
} else if runnable && len(dep.Outputs()) == 0 {
panic(fmt.Sprintf("Rule %s is tagged as binary but produces no output.", dep.Label))
} else if test && tool {
panic(fmt.Sprintf("Rule %s uses %s in its test command, but tools are not accessible at test time", target, dep))
}
if hash {
h, err := state.TargetHasher.OutputHash(dep)
if err != nil {
panic(err)
}
return base64.RawURLEncoding.EncodeToString(h)
}
output := ""
for _, out := range dep.Outputs() {
if allOutputs || out == in {
if tool && !state.WillRunRemotely(target) {
abs, err := filepath.Abs(handleDir(dep.OutDir(), out, dir))
if err != nil {
log.Fatalf("Couldn't calculate relative path: %s", err)
}
output += quote(abs) + " "
} else {
output += quote(fileDestination(target, dep, out, dir, outPrefix, test)) + " "
}
if dir {
break
}
}
}
if runnable && dep.HasLabel("java_non_exe") {
// The target is a Java target that isn't self-executable, hence it needs something to run it.
output = "java -jar " + output
}
return strings.TrimRight(output, " ")
}
func fileDestination(target, dep *BuildTarget, out string, dir, outPrefix, test bool) string {
if outPrefix {
return handleDir(dep.OutDir(), out, dir)
}
if test && target == dep {
// Slightly fiddly case because tests put binaries in a possibly slightly unusual place.
return "./" + out
}
return handleDir(dep.Label.PackageName, out, dir)
}
// Encloses the given string in quotes if needed.
func quote(s string) string {
if strings.ContainsAny(s, "|&;()<>") {
return "\"" + s + "\""
}
return s
}
// handleDir chooses either the out dir or the actual output location depending on the 'dir' flag.
func handleDir(outDir, output string, dir bool) string {
if dir {
return outDir
}
return path.Join(outDir, output)
}
| 1 | 9,232 | Hmmmm, I'm not sure I like these becoming a replacement any build rule has access to. It seems like if it were used in an actual build rule it would only be doing bad things. Can we just use `filepath.Abs` on the returned path in run_step.go? | thought-machine-please | go |
@@ -24,8 +24,11 @@ import (
var _reqBody = []byte("hello")
-func yarpcEcho(ctx context.Context, reqMeta yarpc.ReqMeta, body []byte) ([]byte, yarpc.ResMeta, error) {
- return body, yarpc.NewResMeta().Headers(reqMeta.Headers()), nil
+func yarpcEcho(ctx context.Context, body []byte) ([]byte, error) {
+ for _, k := range yarpc.HeaderNames(ctx) {
+ yarpc.WriteResponseHeader(ctx, k, yarpc.Header(ctx, k))
+ }
+ return body, nil
}
func httpEcho(t testing.TB) http.HandlerFunc { | 1 | package yarpc_test
import (
"bytes"
"context"
"io"
"io/ioutil"
"net"
"net/http"
"testing"
"time"
"go.uber.org/yarpc"
"go.uber.org/yarpc/encoding/raw"
yhttp "go.uber.org/yarpc/transport/http"
ytchannel "go.uber.org/yarpc/transport/tchannel"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber/tchannel-go"
traw "github.com/uber/tchannel-go/raw"
ncontext "golang.org/x/net/context"
)
var _reqBody = []byte("hello")
func yarpcEcho(ctx context.Context, reqMeta yarpc.ReqMeta, body []byte) ([]byte, yarpc.ResMeta, error) {
return body, yarpc.NewResMeta().Headers(reqMeta.Headers()), nil
}
func httpEcho(t testing.TB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
hs := w.Header()
for k, vs := range r.Header {
hs[k] = vs
}
_, err := io.Copy(w, r.Body)
assert.NoError(t, err, "failed to write HTTP response body")
}
}
type tchannelEcho struct{ t testing.TB }
func (tchannelEcho) Handle(ctx ncontext.Context, args *traw.Args) (*traw.Res, error) {
return &traw.Res{Arg2: args.Arg2, Arg3: args.Arg3}, nil
}
func (t tchannelEcho) OnError(ctx ncontext.Context, err error) {
t.t.Fatalf("request failed: %v", err)
}
func withDispatcher(t testing.TB, cfg yarpc.Config, f func(*yarpc.Dispatcher)) {
d := yarpc.NewDispatcher(cfg)
require.NoError(t, d.Start(), "failed to start server")
defer d.Stop()
f(d)
}
func withHTTPServer(t testing.TB, listenOn string, h http.Handler, f func()) {
l, err := net.Listen("tcp", listenOn)
require.NoError(t, err, "could not listen on %q", listenOn)
ch := make(chan struct{})
go func() {
http.Serve(l, h)
close(ch)
}()
f()
assert.NoError(t, l.Close(), "failed to stop listener on %q", listenOn)
<-ch // wait until server has stopped
}
func runYARPCClient(b *testing.B, c raw.Client) {
for i := 0; i < b.N; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
_, err := c.Call(ctx, "echo", _reqBody)
require.NoError(b, err, "request %d failed", i+1)
}
}
func runHTTPClient(b *testing.B, c *http.Client, url string) {
for i := 0; i < b.N; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
req, err := http.NewRequest("POST", url, bytes.NewReader(_reqBody))
require.NoError(b, err, "failed to build request %d", i+1)
req = req.WithContext(ctx)
req.Header = http.Header{
"Context-TTL-MS": {"100"},
"Rpc-Caller": {"http-client"},
"Rpc-Encoding": {"raw"},
"Rpc-Procedure": {"echo"},
"Rpc-Service": {"server"},
}
res, err := c.Do(req)
require.NoError(b, err, "request %d failed", i+1)
_, err = ioutil.ReadAll(res.Body)
require.NoError(b, err, "failed to read response %d", i+1)
require.NoError(b, res.Body.Close(), "failed to close response body %d", i+1)
}
}
func runTChannelClient(b *testing.B, c *tchannel.Channel, hostPort string) {
headers := []byte{0x00, 0x00} // TODO: YARPC TChannel should support empty arg2
for i := 0; i < b.N; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
call, err := c.BeginCall(ctx, hostPort, "server", "echo",
&tchannel.CallOptions{Format: tchannel.Raw})
require.NoError(b, err, "BeginCall %v failed", i+1)
_, _, _, err = traw.WriteArgs(call, headers, _reqBody)
require.NoError(b, err, "request %v failed", i+1)
}
}
func Benchmark_HTTP_YARPCToYARPC(b *testing.B) {
httpTransport := yhttp.NewTransport()
serverCfg := yarpc.Config{
Name: "server",
Inbounds: yarpc.Inbounds{httpTransport.NewInbound(":8999")},
}
clientCfg := yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: httpTransport.NewSingleOutbound("http://localhost:8999"),
},
},
}
withDispatcher(b, serverCfg, func(server *yarpc.Dispatcher) {
server.Register(raw.Procedure("echo", yarpcEcho))
withDispatcher(b, clientCfg, func(client *yarpc.Dispatcher) {
b.ResetTimer()
runYARPCClient(b, raw.New(client.ClientConfig("server")))
})
})
}
func Benchmark_HTTP_YARPCToNetHTTP(b *testing.B) {
httpTransport := yhttp.NewTransport()
clientCfg := yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: httpTransport.NewSingleOutbound("http://localhost:8998"),
},
},
}
withHTTPServer(b, ":8998", httpEcho(b), func() {
withDispatcher(b, clientCfg, func(client *yarpc.Dispatcher) {
b.ResetTimer()
runYARPCClient(b, raw.New(client.ClientConfig("server")))
})
})
}
func Benchmark_HTTP_NetHTTPToYARPC(b *testing.B) {
httpTransport := yhttp.NewTransport()
serverCfg := yarpc.Config{
Name: "server",
Inbounds: yarpc.Inbounds{httpTransport.NewInbound(":8996")},
}
withDispatcher(b, serverCfg, func(server *yarpc.Dispatcher) {
server.Register(raw.Procedure("echo", yarpcEcho))
b.ResetTimer()
runHTTPClient(b, http.DefaultClient, "http://localhost:8996")
})
}
func Benchmark_HTTP_NetHTTPToNetHTTP(b *testing.B) {
withHTTPServer(b, ":8997", httpEcho(b), func() {
b.ResetTimer()
runHTTPClient(b, http.DefaultClient, "http://localhost:8997")
})
}
func Benchmark_TChannel_YARPCToYARPC(b *testing.B) {
serverTChannel := ytchannel.NewChannelTransport(
ytchannel.ServiceName("server"),
)
serverCfg := yarpc.Config{
Name: "server",
Inbounds: yarpc.Inbounds{serverTChannel.NewInbound()},
}
clientTChannel := ytchannel.NewChannelTransport(
ytchannel.ServiceName("client"),
)
// no defer close on channels because YARPC will take care of that
withDispatcher(b, serverCfg, func(server *yarpc.Dispatcher) {
server.Register(raw.Procedure("echo", yarpcEcho))
// Need server already started to build client config
clientCfg := yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: clientTChannel.NewSingleOutbound(serverTChannel.ListenAddr()),
},
},
}
withDispatcher(b, clientCfg, func(client *yarpc.Dispatcher) {
b.ResetTimer()
runYARPCClient(b, raw.New(client.ClientConfig("server")))
})
})
}
func Benchmark_TChannel_YARPCToTChannel(b *testing.B) {
serverCh, err := tchannel.NewChannel("server", nil)
require.NoError(b, err, "failed to build server TChannel")
defer serverCh.Close()
serverCh.Register(traw.Wrap(tchannelEcho{t: b}), "echo")
require.NoError(b, serverCh.ListenAndServe(":0"), "failed to start up TChannel")
clientTChannel := ytchannel.NewChannelTransport(ytchannel.ServiceName("client"))
clientCfg := yarpc.Config{
Name: "client",
Outbounds: yarpc.Outbounds{
"server": {
Unary: clientTChannel.NewSingleOutbound(serverCh.PeerInfo().HostPort),
},
},
}
withDispatcher(b, clientCfg, func(client *yarpc.Dispatcher) {
b.ResetTimer()
runYARPCClient(b, raw.New(client.ClientConfig("server")))
})
}
func Benchmark_TChannel_TChannelToYARPC(b *testing.B) {
tchannelTransport := ytchannel.NewChannelTransport(
ytchannel.ServiceName("server"),
)
serverCfg := yarpc.Config{
Name: "server",
Inbounds: yarpc.Inbounds{tchannelTransport.NewInbound()},
}
withDispatcher(b, serverCfg, func(dispatcher *yarpc.Dispatcher) {
dispatcher.Register(raw.Procedure("echo", yarpcEcho))
clientCh, err := tchannel.NewChannel("client", nil)
require.NoError(b, err, "failed to build client TChannel")
defer clientCh.Close()
b.ResetTimer()
runTChannelClient(b, clientCh, tchannelTransport.ListenAddr())
})
}
func Benchmark_TChannel_TChannelToTChannel(b *testing.B) {
serverCh, err := tchannel.NewChannel("server", nil)
require.NoError(b, err, "failed to build server TChannel")
defer serverCh.Close()
serverCh.Register(traw.Wrap(tchannelEcho{t: b}), "echo")
require.NoError(b, serverCh.ListenAndServe(":0"), "failed to start up TChannel")
clientCh, err := tchannel.NewChannel("client", nil)
require.NoError(b, err, "failed to build client TChannel")
defer clientCh.Close()
b.ResetTimer()
runTChannelClient(b, clientCh, serverCh.PeerInfo().HostPort)
}
| 1 | 12,020 | That's actually not as bad as I thought it was going to be. | yarpc-yarpc-go | go |
@@ -198,6 +198,11 @@ public class DatasetServiceBean implements java.io.Serializable {
return em.createQuery("SELECT o.id FROM Dataset o WHERE o.indexTime IS null ORDER BY o.id DESC", Long.class).getResultList();
}
+ //Used in datasets listcurationstatus API
+ public List<Dataset> findAllUnpublished() {
+ return em.createQuery("SELECT object(o) FROM Dataset o, DvObject d WHERE d.id=o.id and d.publicationDate IS null ORDER BY o.id ASC", Dataset.class).getResultList();
+ }
+
/**
* For docs, see the equivalent method on the DataverseServiceBean.
* @param numPartitions | 1 | package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import edu.harvard.iq.dataverse.engine.command.CommandContext;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.DestroyDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.FinalizeDatasetPublicationCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetDatasetStorageSizeCommand;
import edu.harvard.iq.dataverse.export.ExportService;
import edu.harvard.iq.dataverse.harvest.server.OAIRecordServiceBean;
import edu.harvard.iq.dataverse.search.IndexServiceBean;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import edu.harvard.iq.dataverse.workflows.WorkflowComment;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.FileHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.ejb.Asynchronous;
import javax.ejb.EJB;
import javax.ejb.EJBException;
import javax.ejb.Stateless;
import javax.ejb.TransactionAttribute;
import javax.ejb.TransactionAttributeType;
import javax.inject.Named;
import javax.persistence.EntityManager;
import javax.persistence.NoResultException;
import javax.persistence.PersistenceContext;
import javax.persistence.Query;
import javax.persistence.StoredProcedureQuery;
import javax.persistence.TypedQuery;
import org.apache.commons.lang3.RandomStringUtils;
import org.ocpsoft.common.util.Strings;
/**
*
* @author skraffmiller
*/
@Stateless
@Named
public class DatasetServiceBean implements java.io.Serializable {
private static final Logger logger = Logger.getLogger(DatasetServiceBean.class.getCanonicalName());
@EJB
IndexServiceBean indexService;
@EJB
DOIEZIdServiceBean doiEZIdServiceBean;
@EJB
SettingsServiceBean settingsService;
@EJB
DatasetVersionServiceBean versionService;
@EJB
DvObjectServiceBean dvObjectService;
@EJB
AuthenticationServiceBean authentication;
@EJB
DataFileServiceBean fileService;
@EJB
PermissionServiceBean permissionService;
@EJB
OAIRecordServiceBean recordService;
@EJB
EjbDataverseEngine commandEngine;
@EJB
SystemConfig systemConfig;
private static final SimpleDateFormat logFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss");
@PersistenceContext(unitName = "VDCNet-ejbPU")
protected EntityManager em;
public Dataset find(Object pk) {
return em.find(Dataset.class, pk);
}
public List<Dataset> findByOwnerId(Long ownerId) {
return findByOwnerId(ownerId, false);
}
public List<Dataset> findPublishedByOwnerId(Long ownerId) {
return findByOwnerId(ownerId, true);
}
private List<Dataset> findByOwnerId(Long ownerId, boolean onlyPublished) {
List<Dataset> retList = new ArrayList<>();
TypedQuery<Dataset> query = em.createNamedQuery("Dataset.findByOwnerId", Dataset.class);
query.setParameter("ownerId", ownerId);
if (!onlyPublished) {
return query.getResultList();
} else {
for (Dataset ds : query.getResultList()) {
if (ds.isReleased() && !ds.isDeaccessioned()) {
retList.add(ds);
}
}
return retList;
}
}
public List<Long> findIdsByOwnerId(Long ownerId) {
return findIdsByOwnerId(ownerId, false);
}
private List<Long> findIdsByOwnerId(Long ownerId, boolean onlyPublished) {
List<Long> retList = new ArrayList<>();
if (!onlyPublished) {
return em.createNamedQuery("Dataset.findIdByOwnerId")
.setParameter("ownerId", ownerId)
.getResultList();
} else {
List<Dataset> results = em.createNamedQuery("Dataset.findByOwnerId")
.setParameter("ownerId", ownerId).getResultList();
for (Dataset ds : results) {
if (ds.isReleased() && !ds.isDeaccessioned()) {
retList.add(ds.getId());
}
}
return retList;
}
}
public List<Dataset> findByCreatorId(Long creatorId) {
return em.createNamedQuery("Dataset.findByCreatorId").setParameter("creatorId", creatorId).getResultList();
}
public List<Dataset> findByReleaseUserId(Long releaseUserId) {
return em.createNamedQuery("Dataset.findByReleaseUserId").setParameter("releaseUserId", releaseUserId).getResultList();
}
public List<Dataset> filterByPidQuery(String filterQuery) {
// finds only exact matches
Dataset ds = findByGlobalId(filterQuery);
List<Dataset> ret = new ArrayList<>();
if (ds != null) ret.add(ds);
/*
List<Dataset> ret = em.createNamedQuery("Dataset.filterByPid", Dataset.class)
.setParameter("affiliation", "%" + filterQuery.toLowerCase() + "%").getResultList();
//logger.info("created native query: select o from Dataverse o where o.alias LIKE '" + filterQuery + "%' order by o.alias");
logger.info("created named query");
*/
if (ret != null) {
logger.info("results list: "+ret.size()+" results.");
}
return ret;
}
public List<Dataset> findAll() {
return em.createQuery("select object(o) from Dataset as o order by o.id", Dataset.class).getResultList();
}
public List<Long> findIdStale() {
return em.createNamedQuery("Dataset.findIdStale").getResultList();
}
public List<Long> findIdStalePermission() {
return em.createNamedQuery("Dataset.findIdStalePermission").getResultList();
}
public List<Long> findAllLocalDatasetIds() {
return em.createQuery("SELECT o.id FROM Dataset o WHERE o.harvestedFrom IS null ORDER BY o.id", Long.class).getResultList();
}
public List<Long> findAllUnindexed() {
return em.createQuery("SELECT o.id FROM Dataset o WHERE o.indexTime IS null ORDER BY o.id DESC", Long.class).getResultList();
}
/**
* For docs, see the equivalent method on the DataverseServiceBean.
* @param numPartitions
* @param partitionId
* @param skipIndexed
* @return a list of datasets
* @see DataverseServiceBean#findAllOrSubset(long, long, boolean)
*/
public List<Long> findAllOrSubset(long numPartitions, long partitionId, boolean skipIndexed) {
if (numPartitions < 1) {
long saneNumPartitions = 1;
numPartitions = saneNumPartitions;
}
String skipClause = skipIndexed ? "AND o.indexTime is null " : "";
TypedQuery<Long> typedQuery = em.createQuery("SELECT o.id FROM Dataset o WHERE MOD( o.id, :numPartitions) = :partitionId " +
skipClause +
"ORDER BY o.id", Long.class);
typedQuery.setParameter("numPartitions", numPartitions);
typedQuery.setParameter("partitionId", partitionId);
return typedQuery.getResultList();
}
/**
* Merges the passed dataset to the persistence context.
* @param ds the dataset whose new state we want to persist.
* @return The managed entity representing {@code ds}.
*/
public Dataset merge( Dataset ds ) {
return em.merge(ds);
}
public Dataset findByGlobalId(String globalId) {
Dataset retVal = (Dataset) dvObjectService.findByGlobalId(globalId, "Dataset");
if (retVal != null){
return retVal;
} else {
//try to find with alternative PID
return (Dataset) dvObjectService.findByGlobalId(globalId, "Dataset", true);
}
}
/**
* Instantiate dataset, and its components (DatasetVersions and FileMetadatas)
* this method is used for object validation; if there are any invalid values
* in the dataset components, a ConstraintViolationException will be thrown,
* which can be further parsed to detect the specific offending values.
* @param id the id of the dataset
* @throws javax.validation.ConstraintViolationException
*/
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void instantiateDatasetInNewTransaction(Long id, boolean includeVariables) {
Dataset dataset = find(id);
for (DatasetVersion version : dataset.getVersions()) {
for (FileMetadata fileMetadata : version.getFileMetadatas()) {
// todo: make this optional!
if (includeVariables) {
if (fileMetadata.getDataFile().isTabularData()) {
DataTable dataTable = fileMetadata.getDataFile().getDataTable();
for (DataVariable dataVariable : dataTable.getDataVariables()) {
}
}
}
}
}
}
public String generateDatasetIdentifier(Dataset dataset, GlobalIdServiceBean idServiceBean) {
String identifierType = settingsService.getValueForKey(SettingsServiceBean.Key.IdentifierGenerationStyle, "randomString");
String shoulder = settingsService.getValueForKey(SettingsServiceBean.Key.Shoulder, "");
switch (identifierType) {
case "randomString":
return generateIdentifierAsRandomString(dataset, idServiceBean, shoulder);
case "sequentialNumber":
return generateIdentifierAsSequentialNumber(dataset, idServiceBean, shoulder);
default:
/* Should we throw an exception instead?? -- L.A. 4.6.2 */
return generateIdentifierAsRandomString(dataset, idServiceBean, shoulder);
}
}
private String generateIdentifierAsRandomString(Dataset dataset, GlobalIdServiceBean idServiceBean, String shoulder) {
String identifier = null;
do {
identifier = shoulder + RandomStringUtils.randomAlphanumeric(6).toUpperCase();
} while (!isIdentifierLocallyUnique(identifier, dataset));
return identifier;
}
private String generateIdentifierAsSequentialNumber(Dataset dataset, GlobalIdServiceBean idServiceBean, String shoulder) {
String identifier;
do {
StoredProcedureQuery query = this.em.createNamedStoredProcedureQuery("Dataset.generateIdentifierAsSequentialNumber");
query.execute();
Integer identifierNumeric = (Integer) query.getOutputParameterValue(1);
// some diagnostics here maybe - is it possible to determine that it's failing
// because the stored procedure hasn't been created in the database?
if (identifierNumeric == null) {
return null;
}
identifier = shoulder + identifierNumeric.toString();
} while (!isIdentifierLocallyUnique(identifier, dataset));
return identifier;
}
/**
* Check that a identifier entered by the user is unique (not currently used
* for any other study in this Dataverse Network) also check for duplicate
* in EZID if needed
* @param userIdentifier
* @param dataset
* @param persistentIdSvc
* @return {@code true} if the identifier is unique, {@code false} otherwise.
*/
public boolean isIdentifierUnique(String userIdentifier, Dataset dataset, GlobalIdServiceBean persistentIdSvc) {
if ( ! isIdentifierLocallyUnique(userIdentifier, dataset) ) return false; // duplication found in local database
// not in local DB, look in the persistent identifier service
try {
return ! persistentIdSvc.alreadyExists(dataset);
} catch (Exception e){
//we can live with failure - means identifier not found remotely
}
return true;
}
public boolean isIdentifierLocallyUnique(Dataset dataset) {
return isIdentifierLocallyUnique(dataset.getIdentifier(), dataset);
}
public boolean isIdentifierLocallyUnique(String identifier, Dataset dataset) {
return em.createNamedQuery("Dataset.findByIdentifierAuthorityProtocol")
.setParameter("identifier", identifier)
.setParameter("authority", dataset.getAuthority())
.setParameter("protocol", dataset.getProtocol())
.getResultList().isEmpty();
}
public Long getMaximumExistingDatafileIdentifier(Dataset dataset) {
//Cannot rely on the largest table id having the greatest identifier counter
long zeroFiles = new Long(0);
Long retVal = zeroFiles;
Long testVal;
List<Object> idResults;
Long dsId = dataset.getId();
if (dsId != null) {
try {
idResults = em.createNamedQuery("Dataset.findIdentifierByOwnerId")
.setParameter("ownerId", dsId).getResultList();
} catch (NoResultException ex) {
logger.log(Level.FINE, "No files found in dataset id {0}. Returning a count of zero.", dsId);
return zeroFiles;
}
if (idResults != null) {
for (Object raw: idResults){
String identifier = (String) raw;
identifier = identifier.substring(identifier.lastIndexOf("/") + 1);
testVal = new Long(identifier) ;
if (testVal > retVal){
retVal = testVal;
}
}
}
}
return retVal;
}
public DatasetVersion storeVersion( DatasetVersion dsv ) {
em.persist(dsv);
return dsv;
}
public DatasetVersionUser getDatasetVersionUser(DatasetVersion version, User user) {
TypedQuery<DatasetVersionUser> query = em.createNamedQuery("DatasetVersionUser.findByVersionIdAndUserId", DatasetVersionUser.class);
query.setParameter("versionId", version.getId());
String identifier = user.getIdentifier();
identifier = identifier.startsWith("@") ? identifier.substring(1) : identifier;
AuthenticatedUser au = authentication.getAuthenticatedUser(identifier);
query.setParameter("userId", au.getId());
try {
return query.getSingleResult();
} catch (javax.persistence.NoResultException e) {
return null;
}
}
public boolean checkDatasetLock(Long datasetId) {
TypedQuery<DatasetLock> lockCounter = em.createNamedQuery("DatasetLock.getLocksByDatasetId", DatasetLock.class);
lockCounter.setParameter("datasetId", datasetId);
lockCounter.setMaxResults(1);
List<DatasetLock> lock = lockCounter.getResultList();
return lock.size()>0;
}
public List<DatasetLock> getDatasetLocksByUser( AuthenticatedUser user) {
TypedQuery<DatasetLock> query = em.createNamedQuery("DatasetLock.getLocksByAuthenticatedUserId", DatasetLock.class);
query.setParameter("authenticatedUserId", user.getId());
try {
return query.getResultList();
} catch (javax.persistence.NoResultException e) {
return null;
}
}
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public DatasetLock addDatasetLock(Dataset dataset, DatasetLock lock) {
lock.setDataset(dataset);
dataset.addLock(lock);
lock.setStartTime( new Date() );
em.persist(lock);
//em.merge(dataset);
return lock;
}
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) /*?*/
public DatasetLock addDatasetLock(Long datasetId, DatasetLock.Reason reason, Long userId, String info) {
Dataset dataset = em.find(Dataset.class, datasetId);
AuthenticatedUser user = null;
if (userId != null) {
user = em.find(AuthenticatedUser.class, userId);
}
// Check if the dataset is already locked for this reason:
// (to prevent multiple, duplicate locks on the dataset!)
DatasetLock lock = dataset.getLockFor(reason);
if (lock != null) {
return lock;
}
// Create new:
lock = new DatasetLock(reason, user);
lock.setDataset(dataset);
lock.setInfo(info);
lock.setStartTime(new Date());
if (userId != null) {
lock.setUser(user);
if (user.getDatasetLocks() == null) {
user.setDatasetLocks(new ArrayList<>());
}
user.getDatasetLocks().add(lock);
}
return addDatasetLock(dataset, lock);
}
/**
* Removes all {@link DatasetLock}s for the dataset whose id is passed and reason
* is {@code aReason}.
* @param dataset the dataset whose locks (for {@code aReason}) will be removed.
* @param aReason The reason of the locks that will be removed.
*/
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void removeDatasetLocks(Dataset dataset, DatasetLock.Reason aReason) {
if ( dataset != null ) {
new HashSet<>(dataset.getLocks()).stream()
.filter( l -> l.getReason() == aReason )
.forEach( lock -> {
lock = em.merge(lock);
dataset.removeLock(lock);
AuthenticatedUser user = lock.getUser();
user.getDatasetLocks().remove(lock);
em.remove(lock);
});
}
}
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void updateDatasetLock(DatasetLock datasetLock) {
em.merge(datasetLock);
}
/*
getTitleFromLatestVersion methods use native query to return a dataset title
There are two versions:
1) The version with datasetId param only will return the title regardless of version state
2)The version with the param 'includeDraft' boolean will return the most recently published title if the param is set to false
If no Title found return empty string - protects against calling with
include draft = false with no published version
*/
public String getTitleFromLatestVersion(Long datasetId){
return getTitleFromLatestVersion(datasetId, true);
}
public String getTitleFromLatestVersion(Long datasetId, boolean includeDraft){
String whereDraft = "";
//This clause will exclude draft versions from the select
if (!includeDraft) {
whereDraft = " and v.versionstate !='DRAFT' ";
}
try {
return (String) em.createNativeQuery("select dfv.value from dataset d "
+ " join datasetversion v on d.id = v.dataset_id "
+ " join datasetfield df on v.id = df.datasetversion_id "
+ " join datasetfieldvalue dfv on df.id = dfv.datasetfield_id "
+ " join datasetfieldtype dft on df.datasetfieldtype_id = dft.id "
+ " where dft.name = '" + DatasetFieldConstant.title + "' and v.dataset_id =" + datasetId
+ whereDraft
+ " order by v.versionnumber desc, v.minorVersionNumber desc limit 1 "
+ ";").getSingleResult();
} catch (Exception ex) {
logger.log(Level.INFO, "exception trying to get title from latest version: {0}", ex);
return "";
}
}
public Dataset getDatasetByHarvestInfo(Dataverse dataverse, String harvestIdentifier) {
String queryStr = "SELECT d FROM Dataset d, DvObject o WHERE d.id = o.id AND o.owner.id = " + dataverse.getId() + " and d.harvestIdentifier = '" + harvestIdentifier + "'";
Query query = em.createQuery(queryStr);
List resultList = query.getResultList();
Dataset dataset = null;
if (resultList.size() > 1) {
throw new EJBException("More than one dataset found in the dataverse (id= " + dataverse.getId() + "), with harvestIdentifier= " + harvestIdentifier);
}
if (resultList.size() == 1) {
dataset = (Dataset) resultList.get(0);
}
return dataset;
}
public Long getDatasetVersionCardImage(Long versionId, User user) {
if (versionId == null) {
return null;
}
return null;
}
/**
* Used to identify and properly display Harvested objects on the dataverse page.
*
* @param datasetIds
* @return
*/
public Map<Long, String> getArchiveDescriptionsForHarvestedDatasets(Set<Long> datasetIds){
if (datasetIds == null || datasetIds.size() < 1) {
return null;
}
String datasetIdStr = Strings.join(datasetIds, ", ");
String qstr = "SELECT d.id, h.archiveDescription FROM harvestingClient h, dataset d WHERE d.harvestingClient_id = h.id AND d.id IN (" + datasetIdStr + ")";
List<Object[]> searchResults;
try {
searchResults = em.createNativeQuery(qstr).getResultList();
} catch (Exception ex) {
searchResults = null;
}
if (searchResults == null) {
return null;
}
Map<Long, String> ret = new HashMap<>();
for (Object[] result : searchResults) {
Long dsId;
if (result[0] != null) {
try {
dsId = (Long)result[0];
} catch (Exception ex) {
dsId = null;
}
if (dsId == null) {
continue;
}
ret.put(dsId, (String)result[1]);
}
}
return ret;
}
public boolean isDatasetCardImageAvailable(DatasetVersion datasetVersion, User user) {
if (datasetVersion == null) {
return false;
}
// First, check if this dataset has a designated thumbnail image:
if (datasetVersion.getDataset() != null) {
DataFile dataFile = datasetVersion.getDataset().getThumbnailFile();
if (dataFile != null) {
return ImageThumbConverter.isThumbnailAvailable(dataFile, 48);
}
}
// If not, we'll try to use one of the files in this dataset version:
// (the first file with an available thumbnail, really)
List<FileMetadata> fileMetadatas = datasetVersion.getFileMetadatas();
for (FileMetadata fileMetadata : fileMetadatas) {
DataFile dataFile = fileMetadata.getDataFile();
// TODO: use permissionsWrapper here - ?
// (we are looking up these download permissions on individual files,
// true, and those are unique... but the wrapper may be able to save
// us some queries when it determines the download permission on the
// dataset as a whole? -- L.A. 4.2.1
if (fileService.isThumbnailAvailable(dataFile) && permissionService.userOn(user, dataFile).has(Permission.DownloadFile)) { //, user)) {
return true;
}
}
return false;
}
// reExportAll *forces* a reexport on all published datasets; whether they
// have the "last export" time stamp set or not.
@Asynchronous
public void reExportAllAsync() {
exportAllDatasets(true);
}
public void reExportAll() {
exportAllDatasets(true);
}
// exportAll() will try to export the yet unexported datasets (it will honor
// and trust the "last export" time stamp).
@Asynchronous
public void exportAllAsync() {
exportAllDatasets(false);
}
public void exportAll() {
exportAllDatasets(false);
}
public void exportAllDatasets(boolean forceReExport) {
Integer countAll = 0;
Integer countSuccess = 0;
Integer countError = 0;
String logTimestamp = logFormatter.format(new Date());
Logger exportLogger = Logger.getLogger("edu.harvard.iq.dataverse.harvest.client.DatasetServiceBean." + "ExportAll" + logTimestamp);
String logFileName = "../logs" + File.separator + "export_" + logTimestamp + ".log";
FileHandler fileHandler;
boolean fileHandlerSuceeded;
try {
fileHandler = new FileHandler(logFileName);
exportLogger.setUseParentHandlers(false);
fileHandlerSuceeded = true;
} catch (IOException | SecurityException ex) {
Logger.getLogger(DatasetServiceBean.class.getName()).log(Level.SEVERE, null, ex);
return;
}
if (fileHandlerSuceeded) {
exportLogger.addHandler(fileHandler);
} else {
exportLogger = logger;
}
exportLogger.info("Starting an export all job");
for (Long datasetId : findAllLocalDatasetIds()) {
// Potentially, there's a godzillion datasets in this Dataverse.
// This is why we go through the list of ids here, and instantiate
// only one dataset at a time.
Dataset dataset = this.find(datasetId);
if (dataset != null) {
// Accurate "is published?" test - ?
// Answer: Yes, it is! We can't trust dataset.isReleased() alone; because it is a dvobject method
// that returns (publicationDate != null). And "publicationDate" is essentially
// "the first publication date"; that stays the same as versions get
// published and/or deaccessioned. But in combination with !isDeaccessioned()
// it is indeed an accurate test.
if (dataset.isReleased() && dataset.getReleasedVersion() != null && !dataset.isDeaccessioned()) {
// can't trust dataset.getPublicationDate(), no.
Date publicationDate = dataset.getReleasedVersion().getReleaseTime(); // we know this dataset has a non-null released version! Maybe not - SEK 8/19 (We do now! :)
if (forceReExport || (publicationDate != null
&& (dataset.getLastExportTime() == null
|| dataset.getLastExportTime().before(publicationDate)))) {
countAll++;
try {
recordService.exportAllFormatsInNewTransaction(dataset);
exportLogger.info("Success exporting dataset: " + dataset.getDisplayName() + " " + dataset.getGlobalIdString());
countSuccess++;
} catch (Exception ex) {
exportLogger.info("Error exporting dataset: " + dataset.getDisplayName() + " " + dataset.getGlobalIdString() + "; " + ex.getMessage());
countError++;
}
}
}
}
}
exportLogger.info("Datasets processed: " + countAll.toString());
exportLogger.info("Datasets exported successfully: " + countSuccess.toString());
exportLogger.info("Datasets failures: " + countError.toString());
exportLogger.info("Finished export-all job.");
if (fileHandlerSuceeded) {
fileHandler.close();
}
}
//get a string to add to save success message
//depends on dataset state and user privleges
public String getReminderString(Dataset dataset, boolean canPublishDataset) {
if(!dataset.isReleased() ){
//messages for draft state.
if (canPublishDataset){
return BundleUtil.getStringFromBundle("dataset.message.publish.remind.draft");
} else {
return BundleUtil.getStringFromBundle("dataset.message.submit.remind.draft");
}
} else{
//messages for new version - post-publish
if (canPublishDataset){
return BundleUtil.getStringFromBundle("dataset.message.publish.remind.version");
} else {
return BundleUtil.getStringFromBundle("dataset.message.submit.remind.version");
}
}
}
public void updateLastExportTimeStamp(Long datasetId) {
Date now = new Date();
em.createNativeQuery("UPDATE Dataset SET lastExportTime='"+now.toString()+"' WHERE id="+datasetId).executeUpdate();
}
public Dataset setNonDatasetFileAsThumbnail(Dataset dataset, InputStream inputStream) {
if (dataset == null) {
logger.fine("In setNonDatasetFileAsThumbnail but dataset is null! Returning null.");
return null;
}
if (inputStream == null) {
logger.fine("In setNonDatasetFileAsThumbnail but inputStream is null! Returning null.");
return null;
}
dataset = DatasetUtil.persistDatasetLogoToStorageAndCreateThumbnails(dataset, inputStream);
dataset.setThumbnailFile(null);
return merge(dataset);
}
public Dataset setDatasetFileAsThumbnail(Dataset dataset, DataFile datasetFileThumbnailToSwitchTo) {
if (dataset == null) {
logger.fine("In setDatasetFileAsThumbnail but dataset is null! Returning null.");
return null;
}
if (datasetFileThumbnailToSwitchTo == null) {
logger.fine("In setDatasetFileAsThumbnail but dataset is null! Returning null.");
return null;
}
DatasetUtil.deleteDatasetLogo(dataset);
dataset.setThumbnailFile(datasetFileThumbnailToSwitchTo);
dataset.setUseGenericThumbnail(false);
return merge(dataset);
}
public Dataset removeDatasetThumbnail(Dataset dataset) {
if (dataset == null) {
logger.fine("In removeDatasetThumbnail but dataset is null! Returning null.");
return null;
}
DatasetUtil.deleteDatasetLogo(dataset);
dataset.setThumbnailFile(null);
dataset.setUseGenericThumbnail(true);
return merge(dataset);
}
// persist assigned thumbnail in a single one-field-update query:
// (the point is to avoid doing an em.merge() on an entire dataset object...)
public void assignDatasetThumbnailByNativeQuery(Long datasetId, Long dataFileId) {
try {
em.createNativeQuery("UPDATE dataset SET thumbnailfile_id=" + dataFileId + " WHERE id=" + datasetId).executeUpdate();
} catch (Exception ex) {
// it's ok to just ignore...
}
}
public void assignDatasetThumbnailByNativeQuery(Dataset dataset, DataFile dataFile) {
try {
em.createNativeQuery("UPDATE dataset SET thumbnailfile_id=" + dataFile.getId() + " WHERE id=" + dataset.getId()).executeUpdate();
} catch (Exception ex) {
// it's ok to just ignore...
}
}
public WorkflowComment addWorkflowComment(WorkflowComment workflowComment) {
em.persist(workflowComment);
return workflowComment;
}
public void markWorkflowCommentAsRead(WorkflowComment workflowComment) {
workflowComment.setToBeShown(false);
em.merge(workflowComment);
}
/**
* This method used to throw CommandException, which was pretty pointless
* seeing how it's called asynchronously. As of v5.0 any CommanExceptiom
* thrown by the FinalizeDatasetPublicationCommand below will be caught
* and we'll log it as a warning - which is the best we can do at this point.
* Any failure notifications to users should be sent from inside the command.
*/
@Asynchronous
@TransactionAttribute(TransactionAttributeType.SUPPORTS)
public void callFinalizePublishCommandAsynchronously(Long datasetId, CommandContext ctxt, DataverseRequest request, boolean isPidPrePublished) {
// Since we are calling the next command asynchronously anyway - sleep here
// for a few seconds, just in case, to make sure the database update of
// the dataset initiated by the PublishDatasetCommand has finished,
// to avoid any concurrency/optimistic lock issues.
// Aug. 2020/v5.0: It MAY be working consistently without any
// sleep here, after the call the method has been moved to the onSuccess()
// portion of the PublishDatasetCommand. I'm going to leave the 1 second
// sleep below, for just in case reasons: -- L.A.
try {
Thread.sleep(1000);
} catch (Exception ex) {
logger.warning("Failed to sleep for a second.");
}
logger.fine("Running FinalizeDatasetPublicationCommand, asynchronously");
Dataset theDataset = find(datasetId);
try {
commandEngine.submit(new FinalizeDatasetPublicationCommand(theDataset, request, isPidPrePublished));
} catch (CommandException cex) {
logger.warning("CommandException caught when executing the asynchronous portion of the Dataset Publication Command.");
}
}
/*
Experimental asynchronous method for requesting persistent identifiers for
datafiles. We decided not to run this method on upload/create (so files
will not have persistent ids while in draft; when the draft is published,
we will force obtaining persistent ids for all the files in the version.
If we go back to trying to register global ids on create, care will need to
be taken to make sure the asynchronous changes below are not conflicting with
the changes from file ingest (which may be happening in parallel, also
asynchronously). We would also need to lock the dataset (similarly to how
tabular ingest logs the dataset), to prevent the user from publishing the
version before all the identifiers get assigned - otherwise more conflicts
are likely. (It sounds like it would make sense to treat these two tasks -
persistent identifiers for files and ingest - as one post-upload job, so that
they can be run in sequence). -- L.A. Mar. 2018
*/
@Asynchronous
public void obtainPersistentIdentifiersForDatafiles(Dataset dataset) {
GlobalIdServiceBean idServiceBean = GlobalIdServiceBean.getBean(dataset.getProtocol(), commandEngine.getContext());
//If the Id type is sequential and Dependent then write file idenitifiers outside the command
String datasetIdentifier = dataset.getIdentifier();
Long maxIdentifier = null;
if (systemConfig.isDataFilePIDSequentialDependent()) {
maxIdentifier = getMaximumExistingDatafileIdentifier(dataset);
}
for (DataFile datafile : dataset.getFiles()) {
logger.info("Obtaining persistent id for datafile id=" + datafile.getId());
if (datafile.getIdentifier() == null || datafile.getIdentifier().isEmpty()) {
logger.info("Obtaining persistent id for datafile id=" + datafile.getId());
if (maxIdentifier != null) {
maxIdentifier++;
datafile.setIdentifier(datasetIdentifier + "/" + maxIdentifier.toString());
} else {
datafile.setIdentifier(fileService.generateDataFileIdentifier(datafile, idServiceBean));
}
if (datafile.getProtocol() == null) {
datafile.setProtocol(settingsService.getValueForKey(SettingsServiceBean.Key.Protocol, ""));
}
if (datafile.getAuthority() == null) {
datafile.setAuthority(settingsService.getValueForKey(SettingsServiceBean.Key.Authority, ""));
}
logger.info("identifier: " + datafile.getIdentifier());
String doiRetString;
try {
logger.log(Level.FINE, "creating identifier");
doiRetString = idServiceBean.createIdentifier(datafile);
} catch (Throwable e) {
logger.log(Level.WARNING, "Exception while creating Identifier: " + e.getMessage(), e);
doiRetString = "";
}
// Check return value to make sure registration succeeded
if (!idServiceBean.registerWhenPublished() && doiRetString.contains(datafile.getIdentifier())) {
datafile.setIdentifierRegistered(true);
datafile.setGlobalIdCreateTime(new Date());
}
DataFile merged = em.merge(datafile);
merged = null;
}
}
}
public long findStorageSize(Dataset dataset) throws IOException {
return findStorageSize(dataset, false, GetDatasetStorageSizeCommand.Mode.STORAGE, null);
}
public long findStorageSize(Dataset dataset, boolean countCachedExtras) throws IOException {
return findStorageSize(dataset, countCachedExtras, GetDatasetStorageSizeCommand.Mode.STORAGE, null);
}
/**
* Returns the total byte size of the files in this dataset
*
* @param dataset
* @param countCachedExtras boolean indicating if the cached disposable extras should also be counted
* @param mode String indicating whether we are getting the result for storage (entire dataset) or download version based
* @param version optional param for dataset version
* @return total size
* @throws IOException if it can't access the objects via StorageIO
* (in practice, this can only happen when called with countCachedExtras=true; when run in the
* default mode, the method doesn't need to access the storage system, as the
* sizes of the main files are recorded in the database)
*/
public long findStorageSize(Dataset dataset, boolean countCachedExtras, GetDatasetStorageSizeCommand.Mode mode, DatasetVersion version) throws IOException {
long total = 0L;
if (dataset.isHarvested()) {
return 0L;
}
List<DataFile> filesToTally = new ArrayList();
if (version == null || (mode != null && mode.equals("storage"))){
filesToTally = dataset.getFiles();
} else {
List <FileMetadata> fmds = version.getFileMetadatas();
for (FileMetadata fmd : fmds){
filesToTally.add(fmd.getDataFile());
}
}
//CACHED EXTRAS FOR DOWNLOAD?
for (DataFile datafile : filesToTally) {
total += datafile.getFilesize();
if (!countCachedExtras) {
if (datafile.isTabularData()) {
// count the size of the stored original, in addition to the main tab-delimited file:
Long originalFileSize = datafile.getDataTable().getOriginalFileSize();
if (originalFileSize != null) {
total += originalFileSize;
}
}
} else {
StorageIO<DataFile> storageIO = datafile.getStorageIO();
for (String cachedFileTag : storageIO.listAuxObjects()) {
total += storageIO.getAuxObjectSize(cachedFileTag);
}
}
}
// and finally,
if (countCachedExtras) {
// count the sizes of the files cached for the dataset itself
// (i.e., the metadata exports):
StorageIO<Dataset> datasetSIO = DataAccess.getStorageIO(dataset);
for (String[] exportProvider : ExportService.getInstance().getExportersLabels()) {
String exportLabel = "export_" + exportProvider[1] + ".cached";
try {
total += datasetSIO.getAuxObjectSize(exportLabel);
} catch (IOException ioex) {
// safe to ignore; object not cached
}
}
}
return total;
}
/**
* An optimized method for deleting a harvested dataset.
*
* @param dataset
* @param request DataverseRequest (for initializing the DestroyDatasetCommand)
* @param hdLogger logger object (in practice, this will be a separate log file created for a specific harvesting job)
*/
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void deleteHarvestedDataset(Dataset dataset, DataverseRequest request, Logger hdLogger) {
// Purge all the SOLR documents associated with this client from the
// index server:
indexService.deleteHarvestedDocuments(dataset);
try {
// files from harvested datasets are removed unceremoniously,
// directly in the database. no need to bother calling the
// DeleteFileCommand on them.
for (DataFile harvestedFile : dataset.getFiles()) {
DataFile merged = em.merge(harvestedFile);
em.remove(merged);
harvestedFile = null;
}
dataset.setFiles(null);
Dataset merged = em.merge(dataset);
commandEngine.submit(new DestroyDatasetCommand(merged, request));
hdLogger.info("Successfully destroyed the dataset");
} catch (Exception ex) {
hdLogger.warning("Failed to destroy the dataset");
}
}
}
| 1 | 44,861 | Hmm, I'm surprised if we don't already have a "find all unpublished datasets" method but I don't really know. Maybe @scolapasta knows. | IQSS-dataverse | java |
@@ -598,6 +598,12 @@ class Python3Checker(checkers.BaseChecker):
"variables will be deleted outside of the "
"comprehension.",
),
+ "C1601": (
+ "Consider using Python 3 style super() without arguments",
+ "old-style-super",
+ "Emitted when calling the super builtin with the current class "
+ "and instance. On Python 3 these arguments are the default.",
+ ),
}
_bad_builtins = frozenset( | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2014-2019 Claudiu Popa <[email protected]>
# Copyright (c) 2014-2015 Brett Cannon <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Pavel Roskin <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2015 Cosmin Poieana <[email protected]>
# Copyright (c) 2015 Viorel Stirbu <[email protected]>
# Copyright (c) 2016, 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2016-2017 Roy Williams <[email protected]>
# Copyright (c) 2016 Roy Williams <[email protected]>
# Copyright (c) 2016 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Erik <[email protected]>
# Copyright (c) 2017-2018 Ville Skyttä <[email protected]>
# Copyright (c) 2017 Daniel Miller <[email protected]>
# Copyright (c) 2017 hippo91 <[email protected]>
# Copyright (c) 2017 ahirnish <[email protected]>
# Copyright (c) 2018-2020 Anthony Sottile <[email protected]>
# Copyright (c) 2018 sbagan <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Aivar Annamaa <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2018 gaurikholkar <[email protected]>
# Copyright (c) 2019 Nick Drozd <[email protected]>
# Copyright (c) 2019 Hugues Bruant <[email protected]>
# Copyright (c) 2019 Gabriel R Sezefredo <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 bluesheeptoken <[email protected]>
# Copyright (c) 2020 Athos Ribeiro <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Check Python 2 code for Python 2/3 source-compatible issues."""
import itertools
import re
import tokenize
from collections import namedtuple
import astroid
from astroid import bases
from pylint import checkers, interfaces
from pylint.checkers import utils
from pylint.checkers.utils import find_try_except_wrapper_node, node_ignores_exception
from pylint.constants import WarningScope
from pylint.interfaces import INFERENCE, INFERENCE_FAILURE
_ZERO = re.compile("^0+$")
def _is_old_octal(literal):
if _ZERO.match(literal):
return False
if re.match(r"0\d+", literal):
try:
int(literal, 8)
except ValueError:
return False
return True
return None
def _inferred_value_is_dict(value):
if isinstance(value, astroid.Dict):
return True
return isinstance(value, astroid.Instance) and "dict" in value.basenames
def _infer_if_relevant_attr(node, whitelist):
return node.expr.infer() if node.attrname in whitelist else []
def _is_builtin(node):
return getattr(node, "name", None) in ("__builtin__", "builtins")
_ACCEPTS_ITERATOR = {
"iter",
"list",
"tuple",
"sorted",
"set",
"sum",
"any",
"all",
"enumerate",
"dict",
"filter",
"reversed",
"max",
"min",
"frozenset",
"OrderedDict",
}
ATTRIBUTES_ACCEPTS_ITERATOR = {"join", "from_iterable"}
_BUILTIN_METHOD_ACCEPTS_ITERATOR = {
"builtins.list.extend",
"builtins.dict.update",
"builtins.set.update",
}
DICT_METHODS = {"items", "keys", "values"}
def _in_iterating_context(node):
"""Check if the node is being used as an iterator.
Definition is taken from lib2to3.fixer_util.in_special_context().
"""
parent = node.parent
# Since a call can't be the loop variant we only need to know if the node's
# parent is a 'for' loop to know it's being used as the iterator for the
# loop.
if isinstance(parent, astroid.For):
return True
# Need to make sure the use of the node is in the iterator part of the
# comprehension.
if isinstance(parent, astroid.Comprehension):
if parent.iter == node:
return True
# Various built-ins can take in an iterable or list and lead to the same
# value.
elif isinstance(parent, astroid.Call):
if isinstance(parent.func, astroid.Name):
if parent.func.name in _ACCEPTS_ITERATOR:
return True
elif isinstance(parent.func, astroid.Attribute):
if parent.func.attrname in ATTRIBUTES_ACCEPTS_ITERATOR:
return True
inferred = utils.safe_infer(parent.func)
if inferred:
if inferred.qname() in _BUILTIN_METHOD_ACCEPTS_ITERATOR:
return True
root = inferred.root()
if root and root.name == "itertools":
return True
# If the call is in an unpacking, there's no need to warn,
# since it can be considered iterating.
elif isinstance(parent, astroid.Assign) and isinstance(
parent.targets[0], (astroid.List, astroid.Tuple)
):
if len(parent.targets[0].elts) > 1:
return True
# If the call is in a containment check, we consider that to
# be an iterating context
elif (
isinstance(parent, astroid.Compare)
and len(parent.ops) == 1
and parent.ops[0][0] in ["in", "not in"]
):
return True
# Also if it's an `yield from`, that's fair
elif isinstance(parent, astroid.YieldFrom):
return True
if isinstance(parent, astroid.Starred):
return True
return False
def _is_conditional_import(node):
"""Checks if an import node is in the context of a conditional.
"""
parent = node.parent
return isinstance(
parent, (astroid.TryExcept, astroid.ExceptHandler, astroid.If, astroid.IfExp)
)
Branch = namedtuple("Branch", ["node", "is_py2_only"])
class Python3Checker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
enabled = False
name = "python3"
msgs = {
# Errors for what will syntactically break in Python 3, warnings for
# everything else.
"E1601": (
"print statement used",
"print-statement",
"Used when a print statement is used "
"(`print` is a function in Python 3)",
),
"E1602": (
"Parameter unpacking specified",
"parameter-unpacking",
"Used when parameter unpacking is specified for a function"
"(Python 3 doesn't allow it)",
),
"E1603": (
"Implicit unpacking of exceptions is not supported in Python 3",
"unpacking-in-except",
"Python3 will not allow implicit unpacking of "
"exceptions in except clauses. "
"See http://www.python.org/dev/peps/pep-3110/",
{"old_names": [("W0712", "old-unpacking-in-except")]},
),
"E1604": (
"Use raise ErrorClass(args) instead of raise ErrorClass, args.",
"old-raise-syntax",
"Used when the alternate raise syntax "
"'raise foo, bar' is used "
"instead of 'raise foo(bar)'.",
{"old_names": [("W0121", "old-old-raise-syntax")]},
),
"E1605": (
"Use of the `` operator",
"backtick",
'Used when the deprecated "``" (backtick) operator is used '
"instead of the str() function.",
{"scope": WarningScope.NODE, "old_names": [("W0333", "old-backtick")]},
),
"E1609": (
"Import * only allowed at module level",
"import-star-module-level",
"Used when the import star syntax is used somewhere "
"else than the module level.",
{"maxversion": (3, 0)},
),
"W1601": (
"apply built-in referenced",
"apply-builtin",
"Used when the apply built-in function is referenced "
"(missing from Python 3)",
),
"W1602": (
"basestring built-in referenced",
"basestring-builtin",
"Used when the basestring built-in function is referenced "
"(missing from Python 3)",
),
"W1603": (
"buffer built-in referenced",
"buffer-builtin",
"Used when the buffer built-in function is referenced "
"(missing from Python 3)",
),
"W1604": (
"cmp built-in referenced",
"cmp-builtin",
"Used when the cmp built-in function is referenced "
"(missing from Python 3)",
),
"W1605": (
"coerce built-in referenced",
"coerce-builtin",
"Used when the coerce built-in function is referenced "
"(missing from Python 3)",
),
"W1606": (
"execfile built-in referenced",
"execfile-builtin",
"Used when the execfile built-in function is referenced "
"(missing from Python 3)",
),
"W1607": (
"file built-in referenced",
"file-builtin",
"Used when the file built-in function is referenced "
"(missing from Python 3)",
),
"W1608": (
"long built-in referenced",
"long-builtin",
"Used when the long built-in function is referenced "
"(missing from Python 3)",
),
"W1609": (
"raw_input built-in referenced",
"raw_input-builtin",
"Used when the raw_input built-in function is referenced "
"(missing from Python 3)",
),
"W1610": (
"reduce built-in referenced",
"reduce-builtin",
"Used when the reduce built-in function is referenced "
"(missing from Python 3)",
),
"W1611": (
"StandardError built-in referenced",
"standarderror-builtin",
"Used when the StandardError built-in function is referenced "
"(missing from Python 3)",
),
"W1612": (
"unicode built-in referenced",
"unicode-builtin",
"Used when the unicode built-in function is referenced "
"(missing from Python 3)",
),
"W1613": (
"xrange built-in referenced",
"xrange-builtin",
"Used when the xrange built-in function is referenced "
"(missing from Python 3)",
),
"W1614": (
"__coerce__ method defined",
"coerce-method",
"Used when a __coerce__ method is defined "
"(method is not used by Python 3)",
),
"W1615": (
"__delslice__ method defined",
"delslice-method",
"Used when a __delslice__ method is defined "
"(method is not used by Python 3)",
),
"W1616": (
"__getslice__ method defined",
"getslice-method",
"Used when a __getslice__ method is defined "
"(method is not used by Python 3)",
),
"W1617": (
"__setslice__ method defined",
"setslice-method",
"Used when a __setslice__ method is defined "
"(method is not used by Python 3)",
),
"W1618": (
"import missing `from __future__ import absolute_import`",
"no-absolute-import",
"Used when an import is not accompanied by "
"``from __future__ import absolute_import`` "
"(default behaviour in Python 3)",
),
"W1619": (
"division w/o __future__ statement",
"old-division",
"Used for non-floor division w/o a float literal or "
"``from __future__ import division`` "
"(Python 3 returns a float for int division unconditionally)",
),
"W1620": (
"Calling a dict.iter*() method",
"dict-iter-method",
"Used for calls to dict.iterkeys(), itervalues() or iteritems() "
"(Python 3 lacks these methods)",
),
"W1621": (
"Calling a dict.view*() method",
"dict-view-method",
"Used for calls to dict.viewkeys(), viewvalues() or viewitems() "
"(Python 3 lacks these methods)",
),
"W1622": (
"Called a next() method on an object",
"next-method-called",
"Used when an object's next() method is called "
"(Python 3 uses the next() built-in function)",
),
"W1623": (
"Assigning to a class's __metaclass__ attribute",
"metaclass-assignment",
"Used when a metaclass is specified by assigning to __metaclass__ "
"(Python 3 specifies the metaclass as a class statement argument)",
),
"W1624": (
"Indexing exceptions will not work on Python 3",
"indexing-exception",
"Indexing exceptions will not work on Python 3. Use "
"`exception.args[index]` instead.",
{"old_names": [("W0713", "old-indexing-exception")]},
),
"W1625": (
"Raising a string exception",
"raising-string",
"Used when a string exception is raised. This will not "
"work on Python 3.",
{"old_names": [("W0701", "old-raising-string")]},
),
"W1626": (
"reload built-in referenced",
"reload-builtin",
"Used when the reload built-in function is referenced "
"(missing from Python 3). You can use instead imp.reload "
"or importlib.reload.",
),
"W1627": (
"__oct__ method defined",
"oct-method",
"Used when an __oct__ method is defined "
"(method is not used by Python 3)",
),
"W1628": (
"__hex__ method defined",
"hex-method",
"Used when a __hex__ method is defined (method is not used by Python 3)",
),
"W1629": (
"__nonzero__ method defined",
"nonzero-method",
"Used when a __nonzero__ method is defined "
"(method is not used by Python 3)",
),
"W1630": (
"__cmp__ method defined",
"cmp-method",
"Used when a __cmp__ method is defined (method is not used by Python 3)",
),
# 'W1631': replaced by W1636
"W1632": (
"input built-in referenced",
"input-builtin",
"Used when the input built-in is referenced "
"(backwards-incompatible semantics in Python 3)",
),
"W1633": (
"round built-in referenced",
"round-builtin",
"Used when the round built-in is referenced "
"(backwards-incompatible semantics in Python 3)",
),
"W1634": (
"intern built-in referenced",
"intern-builtin",
"Used when the intern built-in is referenced "
"(Moved to sys.intern in Python 3)",
),
"W1635": (
"unichr built-in referenced",
"unichr-builtin",
"Used when the unichr built-in is referenced (Use chr in Python 3)",
),
"W1636": (
"map built-in referenced when not iterating",
"map-builtin-not-iterating",
"Used when the map built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
{"old_names": [("W1631", "implicit-map-evaluation")]},
),
"W1637": (
"zip built-in referenced when not iterating",
"zip-builtin-not-iterating",
"Used when the zip built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1638": (
"range built-in referenced when not iterating",
"range-builtin-not-iterating",
"Used when the range built-in is referenced in a non-iterating "
"context (returns a range in Python 3)",
),
"W1639": (
"filter built-in referenced when not iterating",
"filter-builtin-not-iterating",
"Used when the filter built-in is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1640": (
"Using the cmp argument for list.sort / sorted",
"using-cmp-argument",
"Using the cmp argument for list.sort or the sorted "
"builtin should be avoided, since it was removed in "
"Python 3. Using either `key` or `functools.cmp_to_key` "
"should be preferred.",
),
"W1641": (
"Implementing __eq__ without also implementing __hash__",
"eq-without-hash",
"Used when a class implements __eq__ but not __hash__. In Python 2, objects "
"get object.__hash__ as the default implementation, in Python 3 objects get "
"None as their default __hash__ implementation if they also implement __eq__.",
),
"W1642": (
"__div__ method defined",
"div-method",
"Used when a __div__ method is defined. Using `__truediv__` and setting"
"__div__ = __truediv__ should be preferred."
"(method is not used by Python 3)",
),
"W1643": (
"__idiv__ method defined",
"idiv-method",
"Used when an __idiv__ method is defined. Using `__itruediv__` and setting"
"__idiv__ = __itruediv__ should be preferred."
"(method is not used by Python 3)",
),
"W1644": (
"__rdiv__ method defined",
"rdiv-method",
"Used when a __rdiv__ method is defined. Using `__rtruediv__` and setting"
"__rdiv__ = __rtruediv__ should be preferred."
"(method is not used by Python 3)",
),
"W1645": (
"Exception.message removed in Python 3",
"exception-message-attribute",
"Used when the message attribute is accessed on an Exception. Use "
"str(exception) instead.",
),
"W1646": (
"non-text encoding used in str.decode",
"invalid-str-codec",
"Used when using str.encode or str.decode with a non-text encoding. Use "
"codecs module to handle arbitrary codecs.",
),
"W1647": (
"sys.maxint removed in Python 3",
"sys-max-int",
"Used when accessing sys.maxint. Use sys.maxsize instead.",
),
"W1648": (
"Module moved in Python 3",
"bad-python3-import",
"Used when importing a module that no longer exists in Python 3.",
),
"W1649": (
"Accessing a deprecated function on the string module",
"deprecated-string-function",
"Used when accessing a string function that has been deprecated in Python 3.",
),
"W1650": (
"Using str.translate with deprecated deletechars parameters",
"deprecated-str-translate-call",
"Used when using the deprecated deletechars parameters from str.translate. Use "
"re.sub to remove the desired characters ",
),
"W1651": (
"Accessing a deprecated function on the itertools module",
"deprecated-itertools-function",
"Used when accessing a function on itertools that has been removed in Python 3.",
),
"W1652": (
"Accessing a deprecated fields on the types module",
"deprecated-types-field",
"Used when accessing a field on types that has been removed in Python 3.",
),
"W1653": (
"next method defined",
"next-method-defined",
"Used when a next method is defined that would be an iterator in Python 2 but "
"is treated as a normal function in Python 3.",
),
"W1654": (
"dict.items referenced when not iterating",
"dict-items-not-iterating",
"Used when dict.items is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1655": (
"dict.keys referenced when not iterating",
"dict-keys-not-iterating",
"Used when dict.keys is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1656": (
"dict.values referenced when not iterating",
"dict-values-not-iterating",
"Used when dict.values is referenced in a non-iterating "
"context (returns an iterator in Python 3)",
),
"W1657": (
"Accessing a removed attribute on the operator module",
"deprecated-operator-function",
"Used when accessing a field on operator module that has been "
"removed in Python 3.",
),
"W1658": (
"Accessing a removed attribute on the urllib module",
"deprecated-urllib-function",
"Used when accessing a field on urllib module that has been "
"removed or moved in Python 3.",
),
"W1659": (
"Accessing a removed xreadlines attribute",
"xreadlines-attribute",
"Used when accessing the xreadlines() function on a file stream, "
"removed in Python 3.",
),
"W1660": (
"Accessing a removed attribute on the sys module",
"deprecated-sys-function",
"Used when accessing a field on sys module that has been "
"removed in Python 3.",
),
"W1661": (
"Using an exception object that was bound by an except handler",
"exception-escape",
"Emitted when using an exception, that was bound in an except "
"handler, outside of the except handler. On Python 3 these "
"exceptions will be deleted once they get out "
"of the except handler.",
),
"W1662": (
"Using a variable that was bound inside a comprehension",
"comprehension-escape",
"Emitted when using a variable, that was bound in a comprehension "
"handler, outside of the comprehension itself. On Python 3 these "
"variables will be deleted outside of the "
"comprehension.",
),
}
_bad_builtins = frozenset(
[
"apply",
"basestring",
"buffer",
"cmp",
"coerce",
"execfile",
"file",
"input", # Not missing, but incompatible semantics
"intern",
"long",
"raw_input",
"reduce",
"round", # Not missing, but incompatible semantics
"StandardError",
"unichr",
"unicode",
"xrange",
"reload",
]
)
_unused_magic_methods = frozenset(
[
"__coerce__",
"__delslice__",
"__getslice__",
"__setslice__",
"__oct__",
"__hex__",
"__nonzero__",
"__cmp__",
"__div__",
"__idiv__",
"__rdiv__",
]
)
_invalid_encodings = frozenset(
[
"base64_codec",
"base64",
"base_64",
"bz2_codec",
"bz2",
"hex_codec",
"hex",
"quopri_codec",
"quopri",
"quotedprintable",
"quoted_printable",
"uu_codec",
"uu",
"zlib_codec",
"zlib",
"zip",
"rot13",
"rot_13",
]
)
_bad_python3_module_map = {
"sys-max-int": {"sys": frozenset(["maxint"])},
"deprecated-itertools-function": {
"itertools": frozenset(
["izip", "ifilter", "imap", "izip_longest", "ifilterfalse"]
)
},
"deprecated-types-field": {
"types": frozenset(
[
"EllipsisType",
"XRangeType",
"ComplexType",
"StringType",
"TypeType",
"LongType",
"UnicodeType",
"ClassType",
"BufferType",
"StringTypes",
"NotImplementedType",
"NoneType",
"InstanceType",
"FloatType",
"SliceType",
"UnboundMethodType",
"ObjectType",
"IntType",
"TupleType",
"ListType",
"DictType",
"FileType",
"DictionaryType",
"BooleanType",
"DictProxyType",
]
)
},
"bad-python3-import": frozenset(
[
"anydbm",
"BaseHTTPServer",
"__builtin__",
"CGIHTTPServer",
"ConfigParser",
"copy_reg",
"cPickle",
"cStringIO",
"Cookie",
"cookielib",
"dbhash",
"dumbdbm",
"dumbdb",
"Dialog",
"DocXMLRPCServer",
"FileDialog",
"FixTk",
"gdbm",
"htmlentitydefs",
"HTMLParser",
"httplib",
"markupbase",
"Queue",
"repr",
"robotparser",
"ScrolledText",
"SimpleDialog",
"SimpleHTTPServer",
"SimpleXMLRPCServer",
"StringIO",
"dummy_thread",
"SocketServer",
"test.test_support",
"Tkinter",
"Tix",
"Tkconstants",
"tkColorChooser",
"tkCommonDialog",
"Tkdnd",
"tkFileDialog",
"tkFont",
"tkMessageBox",
"tkSimpleDialog",
"UserList",
"UserString",
"whichdb",
"_winreg",
"xmlrpclib",
"audiodev",
"Bastion",
"bsddb185",
"bsddb3",
"Canvas",
"cfmfile",
"cl",
"commands",
"compiler",
"dircache",
"dl",
"exception",
"fpformat",
"htmllib",
"ihooks",
"imageop",
"imputil",
"linuxaudiodev",
"md5",
"mhlib",
"mimetools",
"MimeWriter",
"mimify",
"multifile",
"mutex",
"new",
"popen2",
"posixfile",
"pure",
"rexec",
"rfc822",
"sets",
"sha",
"sgmllib",
"sre",
"stringold",
"sunaudio",
"sv",
"test.testall",
"thread",
"timing",
"toaiff",
"user",
"urllib2",
"urlparse",
]
),
"deprecated-string-function": {
"string": frozenset(
[
"maketrans",
"atof",
"atoi",
"atol",
"capitalize",
"expandtabs",
"find",
"rfind",
"index",
"rindex",
"count",
"lower",
"letters",
"split",
"rsplit",
"splitfields",
"join",
"joinfields",
"lstrip",
"rstrip",
"strip",
"swapcase",
"translate",
"upper",
"ljust",
"rjust",
"center",
"zfill",
"replace",
"lowercase",
"letters",
"uppercase",
"atol_error",
"atof_error",
"atoi_error",
"index_error",
]
)
},
"deprecated-operator-function": {"operator": frozenset({"div"})},
"deprecated-urllib-function": {
"urllib": frozenset(
{
"addbase",
"addclosehook",
"addinfo",
"addinfourl",
"always_safe",
"basejoin",
"ftpcache",
"ftperrors",
"ftpwrapper",
"getproxies",
"getproxies_environment",
"getproxies_macosx_sysconf",
"main",
"noheaders",
"pathname2url",
"proxy_bypass",
"proxy_bypass_environment",
"proxy_bypass_macosx_sysconf",
"quote",
"quote_plus",
"reporthook",
"splitattr",
"splithost",
"splitnport",
"splitpasswd",
"splitport",
"splitquery",
"splittag",
"splittype",
"splituser",
"splitvalue",
"unquote",
"unquote_plus",
"unwrap",
"url2pathname",
"urlcleanup",
"urlencode",
"urlopen",
"urlretrieve",
}
)
},
"deprecated-sys-function": {"sys": frozenset({"exc_clear"})},
}
_deprecated_attrs = frozenset(
itertools.chain.from_iterable(
attr
for module_map in _bad_python3_module_map.values()
if isinstance(module_map, dict)
for attr in module_map.values()
)
)
_relevant_call_attrs = (
DICT_METHODS | _deprecated_attrs | {"encode", "decode", "translate"}
)
_python_2_tests = frozenset(
[
astroid.extract_node(x).repr_tree()
for x in [
"sys.version_info[0] == 2",
"sys.version_info[0] < 3",
"sys.version_info == (2, 7)",
"sys.version_info <= (2, 7)",
"sys.version_info < (3, 0)",
]
]
)
def __init__(self, *args, **kwargs):
self._future_division = False
self._future_absolute_import = False
self._modules_warned_about = set()
self._branch_stack = []
super().__init__(*args, **kwargs)
# pylint: disable=keyword-arg-before-vararg, arguments-differ
def add_message(self, msg_id, always_warn=False, *args, **kwargs):
if always_warn or not (
self._branch_stack and self._branch_stack[-1].is_py2_only
):
super().add_message(msg_id, *args, **kwargs)
def _is_py2_test(self, node):
if isinstance(node.test, astroid.Attribute) and isinstance(
node.test.expr, astroid.Name
):
if node.test.expr.name == "six" and node.test.attrname == "PY2":
return True
elif (
isinstance(node.test, astroid.Compare)
and node.test.repr_tree() in self._python_2_tests
):
return True
return False
def visit_if(self, node):
self._branch_stack.append(Branch(node, self._is_py2_test(node)))
def leave_if(self, node):
assert self._branch_stack.pop().node == node
def visit_ifexp(self, node):
self._branch_stack.append(Branch(node, self._is_py2_test(node)))
def leave_ifexp(self, node):
assert self._branch_stack.pop().node == node
def visit_module(self, node): # pylint: disable=unused-argument
"""Clear checker state after previous module."""
self._future_division = False
self._future_absolute_import = False
def visit_functiondef(self, node):
if node.is_method():
if node.name in self._unused_magic_methods:
method_name = node.name
if node.name.startswith("__"):
method_name = node.name[2:-2]
self.add_message(method_name + "-method", node=node)
elif node.name == "next":
# If there is a method named `next` declared, if it is invokable
# with zero arguments then it implements the Iterator protocol.
# This means if the method is an instance method or a
# classmethod 1 argument should cause a failure, if it is a
# staticmethod 0 arguments should cause a failure.
failing_arg_count = 1
if utils.decorated_with(node, [bases.BUILTINS + ".staticmethod"]):
failing_arg_count = 0
if len(node.args.args) == failing_arg_count:
self.add_message("next-method-defined", node=node)
@utils.check_messages("parameter-unpacking")
def visit_arguments(self, node):
for arg in node.args:
if isinstance(arg, astroid.Tuple):
self.add_message("parameter-unpacking", node=arg)
@utils.check_messages("comprehension-escape")
def visit_listcomp(self, node):
names = {
generator.target.name
for generator in node.generators
if isinstance(generator.target, astroid.AssignName)
}
scope = node.parent.scope()
scope_names = scope.nodes_of_class(astroid.Name, skip_klass=astroid.FunctionDef)
has_redefined_assign_name = any(
assign_name
for assign_name in scope.nodes_of_class(
astroid.AssignName, skip_klass=astroid.FunctionDef
)
if assign_name.name in names and assign_name.lineno > node.lineno
)
if has_redefined_assign_name:
return
emitted_for_names = set()
scope_names = list(scope_names)
for scope_name in scope_names:
if (
scope_name.name not in names
or scope_name.lineno <= node.lineno
or scope_name.name in emitted_for_names
or scope_name.scope() == node
):
continue
emitted_for_names.add(scope_name.name)
self.add_message("comprehension-escape", node=scope_name)
def visit_name(self, node):
"""Detect when a "bad" built-in is referenced."""
found_node, _ = node.lookup(node.name)
if not _is_builtin(found_node):
return
if node.name not in self._bad_builtins:
return
if node_ignores_exception(node) or isinstance(
find_try_except_wrapper_node(node), astroid.ExceptHandler
):
return
message = node.name.lower() + "-builtin"
self.add_message(message, node=node)
@utils.check_messages("print-statement")
def visit_print(self, node):
self.add_message("print-statement", node=node, always_warn=True)
def _warn_if_deprecated(self, node, module, attributes, report_on_modules=True):
for message, module_map in self._bad_python3_module_map.items():
if module in module_map and module not in self._modules_warned_about:
if isinstance(module_map, frozenset):
if report_on_modules:
self._modules_warned_about.add(module)
self.add_message(message, node=node)
elif attributes and module_map[module].intersection(attributes):
self.add_message(message, node=node)
def visit_importfrom(self, node):
if node.modname == "__future__":
for name, _ in node.names:
if name == "division":
self._future_division = True
elif name == "absolute_import":
self._future_absolute_import = True
else:
if not self._future_absolute_import:
if self.linter.is_message_enabled("no-absolute-import"):
self.add_message("no-absolute-import", node=node)
self._future_absolute_import = True
if not _is_conditional_import(node) and not node.level:
self._warn_if_deprecated(node, node.modname, {x[0] for x in node.names})
if node.names[0][0] == "*":
if self.linter.is_message_enabled("import-star-module-level"):
if not isinstance(node.scope(), astroid.Module):
self.add_message("import-star-module-level", node=node)
def visit_import(self, node):
if not self._future_absolute_import:
if self.linter.is_message_enabled("no-absolute-import"):
self.add_message("no-absolute-import", node=node)
self._future_absolute_import = True
if not _is_conditional_import(node):
for name, _ in node.names:
self._warn_if_deprecated(node, name, None)
@utils.check_messages("metaclass-assignment")
def visit_classdef(self, node):
if "__metaclass__" in node.locals:
self.add_message("metaclass-assignment", node=node)
locals_and_methods = set(node.locals).union(x.name for x in node.mymethods())
if "__eq__" in locals_and_methods and "__hash__" not in locals_and_methods:
self.add_message("eq-without-hash", node=node)
@utils.check_messages("old-division")
def visit_binop(self, node):
if not self._future_division and node.op == "/":
for arg in (node.left, node.right):
inferred = utils.safe_infer(arg)
# If we can infer the object and that object is not an int, bail out.
if inferred and not (
(
isinstance(inferred, astroid.Const)
and isinstance(inferred.value, int)
)
or (
isinstance(inferred, astroid.Instance)
and inferred.name == "int"
)
):
break
else:
self.add_message("old-division", node=node)
def _check_cmp_argument(self, node):
# Check that the `cmp` argument is used
kwargs = []
if isinstance(node.func, astroid.Attribute) and node.func.attrname == "sort":
inferred = utils.safe_infer(node.func.expr)
if not inferred:
return
builtins_list = "{}.list".format(bases.BUILTINS)
if isinstance(inferred, astroid.List) or inferred.qname() == builtins_list:
kwargs = node.keywords
elif isinstance(node.func, astroid.Name) and node.func.name == "sorted":
inferred = utils.safe_infer(node.func)
if not inferred:
return
builtins_sorted = "{}.sorted".format(bases.BUILTINS)
if inferred.qname() == builtins_sorted:
kwargs = node.keywords
for kwarg in kwargs or []:
if kwarg.arg == "cmp":
self.add_message("using-cmp-argument", node=node)
return
@staticmethod
def _is_constant_string_or_name(node):
if isinstance(node, astroid.Const):
return isinstance(node.value, str)
return isinstance(node, astroid.Name)
@staticmethod
def _is_none(node):
return isinstance(node, astroid.Const) and node.value is None
@staticmethod
def _has_only_n_positional_args(node, number_of_args):
return len(node.args) == number_of_args and all(node.args) and not node.keywords
@staticmethod
def _could_be_string(inferred_types):
confidence = INFERENCE if inferred_types else INFERENCE_FAILURE
for inferred_type in inferred_types:
if inferred_type is astroid.Uninferable:
confidence = INFERENCE_FAILURE
elif not (
isinstance(inferred_type, astroid.Const)
and isinstance(inferred_type.value, str)
):
return None
return confidence
def visit_call(self, node):
self._check_cmp_argument(node)
if isinstance(node.func, astroid.Attribute):
inferred_types = set()
try:
for inferred_receiver in _infer_if_relevant_attr(
node.func, self._relevant_call_attrs
):
if inferred_receiver is astroid.Uninferable:
continue
inferred_types.add(inferred_receiver)
if isinstance(inferred_receiver, astroid.Module):
self._warn_if_deprecated(
node,
inferred_receiver.name,
{node.func.attrname},
report_on_modules=False,
)
if (
_inferred_value_is_dict(inferred_receiver)
and node.func.attrname in DICT_METHODS
):
if not _in_iterating_context(node):
checker = "dict-{}-not-iterating".format(node.func.attrname)
self.add_message(checker, node=node)
except astroid.InferenceError:
pass
if node.args:
is_str_confidence = self._could_be_string(inferred_types)
if is_str_confidence:
if (
node.func.attrname in ("encode", "decode")
and len(node.args) >= 1
and node.args[0]
):
first_arg = node.args[0]
self._validate_encoding(first_arg, node)
if (
node.func.attrname == "translate"
and self._has_only_n_positional_args(node, 2)
and self._is_none(node.args[0])
and self._is_constant_string_or_name(node.args[1])
):
# The above statement looking for calls of the form:
#
# foo.translate(None, 'abc123')
#
# or
#
# foo.translate(None, some_variable)
#
# This check is somewhat broad and _may_ have some false positives, but
# after checking several large codebases it did not have any false
# positives while finding several real issues. This call pattern seems
# rare enough that the trade off is worth it.
self.add_message(
"deprecated-str-translate-call",
node=node,
confidence=is_str_confidence,
)
return
if node.keywords:
return
if node.func.attrname == "next":
self.add_message("next-method-called", node=node)
elif node.func.attrname in ("iterkeys", "itervalues", "iteritems"):
self.add_message("dict-iter-method", node=node)
elif node.func.attrname in ("viewkeys", "viewvalues", "viewitems"):
self.add_message("dict-view-method", node=node)
elif isinstance(node.func, astroid.Name):
found_node = node.func.lookup(node.func.name)[0]
if _is_builtin(found_node):
if node.func.name in ("filter", "map", "range", "zip"):
if not _in_iterating_context(node):
checker = "{}-builtin-not-iterating".format(node.func.name)
self.add_message(checker, node=node)
if node.func.name == "open" and node.keywords:
kwargs = node.keywords
for kwarg in kwargs or []:
if kwarg.arg == "encoding":
self._validate_encoding(kwarg.value, node)
break
def _validate_encoding(self, encoding, node):
if isinstance(encoding, astroid.Const):
value = encoding.value
if value in self._invalid_encodings:
self.add_message("invalid-str-codec", node=node)
@utils.check_messages("indexing-exception")
def visit_subscript(self, node):
""" Look for indexing exceptions. """
try:
for inferred in node.value.infer():
if not isinstance(inferred, astroid.Instance):
continue
if utils.inherit_from_std_ex(inferred):
self.add_message("indexing-exception", node=node)
except astroid.InferenceError:
return
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_attribute(node)
def visit_delattr(self, node):
self.visit_attribute(node)
@utils.check_messages("exception-message-attribute", "xreadlines-attribute")
def visit_attribute(self, node):
"""Look for removed attributes"""
if node.attrname == "xreadlines":
self.add_message("xreadlines-attribute", node=node)
return
exception_message = "message"
try:
for inferred in _infer_if_relevant_attr(
node, self._deprecated_attrs | {exception_message}
):
if isinstance(inferred, astroid.Instance) and utils.inherit_from_std_ex(
inferred
):
if node.attrname == exception_message:
# Exceptions with .message clearly defined are an exception
if exception_message in inferred.instance_attrs:
continue
self.add_message("exception-message-attribute", node=node)
if isinstance(inferred, astroid.Module):
self._warn_if_deprecated(
node, inferred.name, {node.attrname}, report_on_modules=False
)
except astroid.InferenceError:
return
@utils.check_messages("unpacking-in-except", "comprehension-escape")
def visit_excepthandler(self, node):
"""Visit an except handler block and check for exception unpacking."""
def _is_used_in_except_block(node, block):
current = node
while current and current is not block:
current = current.parent
return current is not None
if isinstance(node.name, (astroid.Tuple, astroid.List)):
self.add_message("unpacking-in-except", node=node)
return
if not node.name:
return
# Find any names
scope = node.parent.scope()
scope_names = scope.nodes_of_class(astroid.Name, skip_klass=astroid.FunctionDef)
scope_names = list(scope_names)
potential_leaked_names = [
scope_name
for scope_name in scope_names
if scope_name.name == node.name.name
and scope_name.lineno > node.lineno
and not _is_used_in_except_block(scope_name, node)
]
reassignments_for_same_name = {
assign_name.lineno
for assign_name in scope.nodes_of_class(
astroid.AssignName, skip_klass=astroid.FunctionDef
)
if assign_name.name == node.name.name
}
for leaked_name in potential_leaked_names:
if any(
node.lineno < elem < leaked_name.lineno
for elem in reassignments_for_same_name
):
continue
self.add_message("exception-escape", node=leaked_name)
@utils.check_messages("backtick")
def visit_repr(self, node):
self.add_message("backtick", node=node)
@utils.check_messages("raising-string", "old-raise-syntax")
def visit_raise(self, node):
"""Visit a raise statement and check for raising
strings or old-raise-syntax.
"""
# Ignore empty raise.
if node.exc is None:
return
expr = node.exc
if self._check_raise_value(node, expr):
return
try:
value = next(astroid.unpack_infer(expr))
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
if isinstance(expr, astroid.Const):
value = expr.value
if isinstance(value, str):
self.add_message("raising-string", node=node)
return True
return None
class Python3TokenChecker(checkers.BaseTokenChecker):
__implements__ = interfaces.ITokenChecker
name = "python3"
enabled = False
msgs = {
"E1606": (
"Use of long suffix",
"long-suffix",
'Used when "l" or "L" is used to mark a long integer. '
"This will not work in Python 3, since `int` and `long` "
"types have merged.",
{"maxversion": (3, 0)},
),
"E1607": (
"Use of the <> operator",
"old-ne-operator",
'Used when the deprecated "<>" operator is used instead '
'of "!=". This is removed in Python 3.',
{"maxversion": (3, 0), "old_names": [("W0331", "old-old-ne-operator")]},
),
"E1608": (
"Use of old octal literal",
"old-octal-literal",
"Used when encountering the old octal syntax, "
"removed in Python 3. To use the new syntax, "
"prepend 0o on the number.",
{"maxversion": (3, 0)},
),
"E1610": (
"Non-ascii bytes literals not supported in 3.x",
"non-ascii-bytes-literal",
"Used when non-ascii bytes literals are found in a program. "
"They are no longer supported in Python 3.",
{"maxversion": (3, 0)},
),
}
def process_tokens(self, tokens):
for idx, (tok_type, token, start, _, _) in enumerate(tokens):
if tok_type == tokenize.NUMBER:
if token.lower().endswith("l"):
# This has a different semantic than lowercase-l-suffix.
self.add_message("long-suffix", line=start[0])
elif _is_old_octal(token):
self.add_message("old-octal-literal", line=start[0])
if tokens[idx][1] == "<>":
self.add_message("old-ne-operator", line=tokens[idx][2][0])
if tok_type == tokenize.STRING and token.startswith("b"):
if any(elem for elem in token if ord(elem) > 127):
self.add_message("non-ascii-bytes-literal", line=start[0])
def register(linter):
linter.register_checker(Python3Checker(linter))
linter.register_checker(Python3TokenChecker(linter))
| 1 | 11,964 | The checks in this file are disabled by default since they are meant for Python 3 porting, and this one does not have to do with Python 3 porting at all. | PyCQA-pylint | py |
@@ -0,0 +1,18 @@
+// Copyright (c) .NET Foundation. All rights reserved.
+// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Core.Features
+{
+ /// <summary>
+ /// Feature to set the minimum data rate at which the response should be sent.
+ /// </summary>
+ public interface IHttpMinResponseDataRateFeature
+ {
+ /// <summary>
+ /// The minimum data rate in bytes/second at which the response should be sent.
+ /// Setting this property to null indicates no minimum data rate should be enforced.
+ /// This limit has no effect on upgraded connections which are always unlimited.
+ /// </summary>
+ MinDataRate MinDataRate { get; set; }
+ }
+} | 1 | 1 | 13,638 | I would change "should be sent" to "must be received". | aspnet-KestrelHttpServer | .cs |
|
@@ -24,6 +24,8 @@ import (
"github.com/openebs/maya/pkg/storagepool"
)
+
+// DeleteStoragePool receives StoragePoolClaim object and deletes it.
func DeleteStoragePool(spcGot *v1alpha1.StoragePoolClaim) error {
// Business logic for deletion of storagepool
glog.Infof("Storagepool delete event received for storagepoolclaim %s", spcGot.Name) | 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spc
import (
"fmt"
"github.com/golang/glog"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/storagepool"
)
func DeleteStoragePool(spcGot *v1alpha1.StoragePoolClaim) error {
// Business logic for deletion of storagepool
glog.Infof("Storagepool delete event received for storagepoolclaim %s", spcGot.Name)
casTemplateName := spcGot.Annotations[string(v1alpha1.SPDeleteCASTemplateCK)]
// Create an empty CasPool object
pool := &v1alpha1.CasPool{}
// Fill the name in CasPool object
// This object contains pool information for performing storagepool deletion
// The information used here is the storagepoolclaim name
pool.StoragePoolClaim = spcGot.Name
// Fill the cas template name that will be used for deletion
pool.CasDeleteTemplate = casTemplateName
storagepoolOps, err := storagepool.NewCasPoolOperation(pool)
if err != nil {
return fmt.Errorf("NewCasPoolOperation failed error '%s'", err.Error())
}
_, err = storagepoolOps.Delete()
if err != nil {
return fmt.Errorf("Failed to delete cas template based storagepool: error '%s'", err.Error())
}
glog.Infof("Cas template based storagepool deleted successfully: name '%s'", spcGot.Name)
return nil
}
| 1 | 10,130 | Please put it in following way : `// DeleteStoragePool receives StoragePoolClaim delete event and calls the required handlers to delete other pool related resources.` | openebs-maya | go |
@@ -58,4 +58,16 @@ public final class RestConst {
public static final String CONSUMER_HEADER = "servicecomb-rest-consumer-header";
public static final String READ_STREAM_PART = "servicecomb-readStreamPart";
+
+ public static final String UPLOAD_DIR = "cse.uploads.directory";
+
+ // limit of one upload file, only available for servlet rest transport
+ public static final String UPLOAD_MAX_FILE_SIZE = "cse.uploads.maxFileSize";
+
+ // limit of upload request body
+ public static final String UPLOAD_MAX_SIZE = "cse.uploads.maxSize";
+
+ // the size threshold after which files will be written to disk
+ // only available for servlet rest transport
+ public static final String UPLOAD_FILE_SIZE_THRESHOLD = "cse.uploads.fileSizeThreshold";
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.common.rest;
public final class RestConst {
private RestConst() {
}
public static final String REST_CLIENT_REQUEST_PATH = "rest-client-request-path";
public static final String SWAGGER_REST_OPERATION = "swaggerRestOperation";
public static final String REST = "rest";
public static final String SCHEME = "cse";
public static final String URI_PREFIX = SCHEME + "://";
// in HttpServletRequest attribute
public static final String PATH_PARAMETERS = "servicecomb-paths";
// in HttpServletRequest attribute
public static final String BODY_PARAMETER = "servicecomb-body";
// in HttpServletRequest attribute
public static final String FORM_PARAMETERS = "servicecomb-forms";
//in invocation response
public static final String INVOCATION_HANDLER_RESPONSE = "servicecomb-invocation-hanlder-response";
//in invocation response
public static final String INVOCATION_HANDLER_PROCESSOR = "servicecomb-invocation-hanlder-processor";
//in invocation response
public static final String INVOCATION_HANDLER_REQUESTCLIENT = "servicecomb-invocation-hanlder-requestclient";
public static final String REST_PRODUCER_INVOCATION = "servicecomb-rest-producer-invocation";
public static final String REST_INVOCATION_CONTEXT = "servicecomb-rest-invocation-context";
public static final String REST_REQUEST = "servicecomb-rest-request";
public static final String CONSUMER_HEADER = "servicecomb-rest-consumer-header";
public static final String READ_STREAM_PART = "servicecomb-readStreamPart";
}
| 1 | 9,813 | It's better to use servicecomb now. | apache-servicecomb-java-chassis | java |
@@ -58,12 +58,13 @@ var PolicyChain = policies.Chain{
type controller struct {
// the policies to use to define readiness - named here to make testing simpler
- policyChain policies.Chain
- certificateLister cmlisters.CertificateLister
- certificateRequestLister cmlisters.CertificateRequestLister
- secretLister corelisters.SecretLister
- client cmclient.Interface
- gatherer *policies.Gatherer
+ policyChain policies.Chain
+ certificateLister cmlisters.CertificateLister
+ certificateRequestLister cmlisters.CertificateRequestLister
+ secretLister corelisters.SecretLister
+ client cmclient.Interface
+ gatherer *policies.Gatherer
+ defaultRenewBeforeExpiryDuration time.Duration
}
func NewController( | 1 | /*
Copyright 2020 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package readiness
import (
"context"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
cmclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
cminformers "github.com/jetstack/cert-manager/pkg/client/informers/externalversions"
cmlisters "github.com/jetstack/cert-manager/pkg/client/listers/certmanager/v1"
controllerpkg "github.com/jetstack/cert-manager/pkg/controller"
"github.com/jetstack/cert-manager/pkg/controller/certificates"
"github.com/jetstack/cert-manager/pkg/controller/certificates/trigger/policies"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/util/pki"
"github.com/jetstack/cert-manager/pkg/util/predicate"
)
const (
ControllerName = "CertificateReadiness"
)
var PolicyChain = policies.Chain{
policies.SecretDoesNotExist,
policies.SecretHasData,
policies.SecretPublicKeysMatch,
policies.CurrentCertificateRequestValidForSpec,
policies.CurrentCertificateHasExpired,
}
type controller struct {
// the policies to use to define readiness - named here to make testing simpler
policyChain policies.Chain
certificateLister cmlisters.CertificateLister
certificateRequestLister cmlisters.CertificateRequestLister
secretLister corelisters.SecretLister
client cmclient.Interface
gatherer *policies.Gatherer
}
func NewController(
log logr.Logger,
client cmclient.Interface,
factory informers.SharedInformerFactory,
cmFactory cminformers.SharedInformerFactory,
chain policies.Chain,
) (*controller, workqueue.RateLimitingInterface, []cache.InformerSynced) {
// create a queue used to queue up items to be processed
queue := workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(time.Second*1, time.Second*30), ControllerName)
// obtain references to all the informers used by this controller
certificateInformer := cmFactory.Certmanager().V1().Certificates()
certificateRequestInformer := cmFactory.Certmanager().V1().CertificateRequests()
secretsInformer := factory.Core().V1().Secrets()
certificateInformer.Informer().AddEventHandler(&controllerpkg.QueuingEventHandler{Queue: queue})
// When a CertificateRequest resource changes, enqueue the Certificate resource that owns it.
certificateRequestInformer.Informer().AddEventHandler(&controllerpkg.BlockingEventHandler{
WorkFunc: certificates.EnqueueCertificatesForResourceUsingPredicates(log, queue, certificateInformer.Lister(), labels.Everything(), predicate.ResourceOwnerOf),
})
// When a Secret resource changes, enqueue any Certificate resources that name it as spec.secretName.
secretsInformer.Informer().AddEventHandler(&controllerpkg.BlockingEventHandler{
// Trigger reconciles on changes to the Secret named `spec.secretName`
WorkFunc: certificates.EnqueueCertificatesForResourceUsingPredicates(log, queue, certificateInformer.Lister(), labels.Everything(),
predicate.ExtractResourceName(predicate.CertificateSecretName)),
})
// build a list of InformerSynced functions that will be returned by the Register method.
// the controller will only begin processing items once all of these informers have synced.
mustSync := []cache.InformerSynced{
certificateRequestInformer.Informer().HasSynced,
secretsInformer.Informer().HasSynced,
certificateInformer.Informer().HasSynced,
}
return &controller{
policyChain: chain,
certificateLister: certificateInformer.Lister(),
certificateRequestLister: certificateRequestInformer.Lister(),
secretLister: secretsInformer.Lister(),
client: client,
gatherer: &policies.Gatherer{
CertificateRequestLister: certificateRequestInformer.Lister(),
SecretLister: secretsInformer.Lister(),
},
}, queue, mustSync
}
func (c *controller) ProcessItem(ctx context.Context, key string) error {
log := logf.FromContext(ctx).WithValues("key", key)
ctx = logf.NewContext(ctx, log)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.Error(err, "invalid resource key passed to ProcessItem")
return nil
}
crt, err := c.certificateLister.Certificates(namespace).Get(name)
if apierrors.IsNotFound(err) {
log.Error(err, "certificate not found for key")
return nil
}
if err != nil {
return err
}
input, err := c.gatherer.DataForCertificate(ctx, crt)
if err != nil {
return err
}
condition := readyCondition(c.policyChain, input)
crt = crt.DeepCopy()
apiutil.SetCertificateCondition(crt, condition.Type, condition.Status, condition.Reason, condition.Message)
switch {
case input.Secret != nil && input.Secret.Data != nil:
x509cert, err := pki.DecodeX509CertificateBytes(input.Secret.Data[corev1.TLSCertKey])
if err != nil {
// clear status fields if we cannot decode the certificate bytes
crt.Status.NotAfter = nil
crt.Status.NotBefore = nil
crt.Status.RenewalTime = nil
break
}
notBefore := metav1.NewTime(x509cert.NotBefore)
notAfter := metav1.NewTime(x509cert.NotAfter)
crt.Status.NotBefore = ¬Before
crt.Status.NotAfter = ¬After
// calculate how long before the certificate expiry time the certificate
// should be renewed
renewBefore := certificates.RenewBeforeExpiryDuration(crt.Status.NotBefore.Time, crt.Status.NotAfter.Time, crt.Spec.RenewBefore)
renewalTime := metav1.NewTime(notAfter.Add(-1 * renewBefore))
crt.Status.RenewalTime = &renewalTime
default:
// clear status fields if the secret does not have any data
crt.Status.NotAfter = nil
crt.Status.NotBefore = nil
crt.Status.RenewalTime = nil
}
_, err = c.client.CertmanagerV1().Certificates(crt.Namespace).UpdateStatus(ctx, crt, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
}
func readyCondition(chain policies.Chain, input policies.Input) cmapi.CertificateCondition {
reason, message, reissue := chain.Evaluate(input)
if !reissue {
return cmapi.CertificateCondition{
Type: cmapi.CertificateConditionReady,
Status: cmmeta.ConditionTrue,
Reason: "Ready",
Message: "Certificate is up to date and has not expired",
}
}
return cmapi.CertificateCondition{
Type: cmapi.CertificateConditionReady,
Status: cmmeta.ConditionFalse,
Reason: reason,
Message: message,
}
}
// controllerWrapper wraps the `controller` structure to make it implement
// the controllerpkg.queueingController interface
type controllerWrapper struct {
*controller
}
func (c *controllerWrapper) Register(ctx *controllerpkg.Context) (workqueue.RateLimitingInterface, []cache.InformerSynced, error) {
// construct a new named logger to be reused throughout the controller
log := logf.FromContext(ctx.RootContext, ControllerName)
ctrl, queue, mustSync := NewController(log,
ctx.CMClient,
ctx.KubeSharedInformerFactory,
ctx.SharedInformerFactory,
PolicyChain,
)
c.controller = ctrl
return queue, mustSync, nil
}
func init() {
controllerpkg.Register(ControllerName, func(ctx *controllerpkg.Context) (controllerpkg.Interface, error) {
return controllerpkg.NewBuilder(ctx, ControllerName).
For(&controllerWrapper{}).
Complete()
})
}
| 1 | 24,222 | Make the flag derived default available in the readiness controller | jetstack-cert-manager | go |
@@ -278,6 +278,10 @@ func TestReconcileClusterSync_NoWorkToDo(t *testing.T) {
}),
),
},
+ {
+ name: "syncset pause",
+ cd: cdBuilder(scheme).GenericOptions(testgeneric.WithAnnotation(constants.SyncsetPauseAnnotation, "true")).Build(),
+ },
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) { | 1 | package clustersync
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/utils/pointer"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
hiveintv1alpha1 "github.com/openshift/hive/pkg/apis/hiveinternal/v1alpha1"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/hive/pkg/remoteclient"
remoteclientmock "github.com/openshift/hive/pkg/remoteclient/mock"
"github.com/openshift/hive/pkg/resource"
resourcemock "github.com/openshift/hive/pkg/resource/mock"
hiveassert "github.com/openshift/hive/pkg/test/assert"
testcd "github.com/openshift/hive/pkg/test/clusterdeployment"
testcs "github.com/openshift/hive/pkg/test/clustersync"
testgeneric "github.com/openshift/hive/pkg/test/generic"
testsecret "github.com/openshift/hive/pkg/test/secret"
testselectorsyncset "github.com/openshift/hive/pkg/test/selectorsyncset"
testsyncset "github.com/openshift/hive/pkg/test/syncset"
)
const (
testNamespace = "test-namespace"
testCDName = "test-cluster-deployment"
testCDUID = "test-cluster-deployment-uid"
testClusterSyncName = testCDName
testClusterSyncUID = "test-cluster-sync-uid"
testLeaseName = testCDName
)
var (
timeInThePast = metav1.NewTime(time.Date(2020, 1, 2, 3, 4, 5, 0, time.Local))
)
type reconcileTest struct {
logger log.FieldLogger
c client.Client
r *ReconcileClusterSync
mockCtrl *gomock.Controller
mockResourceHelper *resourcemock.MockHelper
mockRemoteClientBuilder *remoteclientmock.MockBuilder
expectedFailedMessage string
// A zero LastTransitionTime indicates that the time should be set to now.
// A FirstSuccessTime that points to a zero time indicates that the time should be set to now.
expectedSyncSetStatuses []hiveintv1alpha1.SyncStatus
expectedSelectorSyncSetStatuses []hiveintv1alpha1.SyncStatus
expectUnchangedLeaseRenewTime bool
expectRequeue bool
expectNoWorkDone bool
}
func newReconcileTest(t *testing.T, mockCtrl *gomock.Controller, scheme *runtime.Scheme, existing ...runtime.Object) *reconcileTest {
logger := log.New()
logger.SetLevel(log.DebugLevel)
c := &clientWrapper{fake.NewFakeClientWithScheme(scheme, existing...)}
mockResourceHelper := resourcemock.NewMockHelper(mockCtrl)
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
r := &ReconcileClusterSync{
Client: c,
logger: logger,
reapplyInterval: defaultReapplyInterval,
resourceHelperBuilder: func(rc *rest.Config, _ log.FieldLogger) (resource.Helper, error) {
return mockResourceHelper, nil
},
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder {
return mockRemoteClientBuilder
},
}
return &reconcileTest{
logger: logger,
c: c,
r: r,
mockCtrl: mockCtrl,
mockResourceHelper: mockResourceHelper,
mockRemoteClientBuilder: mockRemoteClientBuilder,
}
}
func (rt *reconcileTest) run(t *testing.T) {
if !rt.expectNoWorkDone {
rt.mockRemoteClientBuilder.EXPECT().RESTConfig().Return(&rest.Config{}, nil)
}
var origLeaseRenewTime metav1.MicroTime
if rt.expectUnchangedLeaseRenewTime {
lease := &hiveintv1alpha1.ClusterSyncLease{}
rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeaseName}, lease)
origLeaseRenewTime = lease.Spec.RenewTime
}
reconcileRequest := reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: testNamespace,
Name: testCDName,
},
}
startTime := time.Now()
timeSinceOrigLeaseRenewTime := time.Since(origLeaseRenewTime.Time)
result, err := rt.r.Reconcile(reconcileRequest)
require.NoError(t, err, "unexpected error from Reconcile")
endTime := time.Now()
startTime = startTime.Truncate(time.Second)
endTime = endTime.Add(time.Second).Truncate(time.Second)
if rt.expectNoWorkDone {
assert.False(t, result.Requeue, "expected no requeue")
assert.Zero(t, result.RequeueAfter, "expected no requeue after")
err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeaseName}, &hiveintv1alpha1.ClusterSyncLease{})
assert.True(t, apierrors.IsNotFound(err), "expected no lease")
err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testClusterSyncName}, &hiveintv1alpha1.ClusterSync{})
assert.True(t, apierrors.IsNotFound(err), "expected no ClusterSync")
return
}
assert.True(t, result.Requeue, "expected requeue to be true")
if rt.expectRequeue {
assert.Zero(t, result.RequeueAfter, "unexpected requeue after")
} else {
var minRequeueAfter, maxRequeueAfter float64
if rt.expectUnchangedLeaseRenewTime {
minRequeueAfter = (defaultReapplyInterval - timeSinceOrigLeaseRenewTime).Seconds()
maxRequeueAfter = minRequeueAfter + defaultReapplyInterval.Seconds()*reapplyIntervalJitter + endTime.Sub(startTime).Seconds()
} else {
minRequeueAfter = (defaultReapplyInterval - endTime.Sub(startTime)).Seconds()
maxRequeueAfter = defaultReapplyInterval.Seconds() * (1 + reapplyIntervalJitter)
}
assert.GreaterOrEqual(t, result.RequeueAfter.Seconds(), minRequeueAfter, "requeue after too small")
assert.LessOrEqual(t, result.RequeueAfter.Seconds(), maxRequeueAfter, "requeue after too large")
}
lease := &hiveintv1alpha1.ClusterSyncLease{}
err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeaseName}, lease)
require.NoError(t, err, "unexpected error getting lease for ClusterSync")
if rt.expectUnchangedLeaseRenewTime {
assert.Equal(t, origLeaseRenewTime, lease.Spec.RenewTime, "expected lease renew time to be unchanged")
} else {
if renewTime := lease.Spec.RenewTime; assert.NotNil(t, renewTime, "expected renew time to be set") {
hiveassert.BetweenTimes(t, renewTime.Time, startTime, endTime, "unexpected renew time")
}
}
clusterSync := &hiveintv1alpha1.ClusterSync{}
err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testClusterSyncName}, clusterSync)
require.NoError(t, err, "unexpected error getting ClusterSync")
expectedOwnerReferenceFromClusterSync := metav1.OwnerReference{
APIVersion: hivev1.SchemeGroupVersion.String(),
Kind: "ClusterDeployment",
Name: testCDName,
UID: testCDUID,
BlockOwnerDeletion: pointer.BoolPtr(true),
}
assert.Contains(t, clusterSync.OwnerReferences, expectedOwnerReferenceFromClusterSync, "expected owner reference from ClusterSync to ClusterDeployment")
expectedOwnerReferenceFromLease := metav1.OwnerReference{
APIVersion: hiveintv1alpha1.SchemeGroupVersion.String(),
Kind: "ClusterSync",
Name: testClusterSyncName,
UID: testClusterSyncUID,
BlockOwnerDeletion: pointer.BoolPtr(true),
}
assert.Contains(t, lease.OwnerReferences, expectedOwnerReferenceFromLease, "expected owner reference from ClusterSyncLease to ClusterSync")
var syncFailedCond *hiveintv1alpha1.ClusterSyncCondition
for i, cond := range clusterSync.Status.Conditions {
if cond.Type == hiveintv1alpha1.ClusterSyncFailed {
syncFailedCond = &clusterSync.Status.Conditions[i]
break
}
}
assert.NotNil(t, syncFailedCond, "expected a sync failed condition")
expectedConditionStatus := corev1.ConditionTrue
expectedConditionMessage := rt.expectedFailedMessage
if expectedConditionMessage == "" {
expectedConditionStatus = corev1.ConditionFalse
expectedConditionMessage = "All SyncSets and SelectorSyncSets have been applied to the cluster"
}
assert.Equal(t, string(expectedConditionStatus), string(syncFailedCond.Status), "unexpected sync failed status")
assert.Equal(t, expectedConditionMessage, syncFailedCond.Message, "unexpected sync failed message")
areSyncStatusesEqual(t, "syncset", rt.expectedSyncSetStatuses, clusterSync.Status.SyncSets, startTime, endTime)
areSyncStatusesEqual(t, "selectorsyncset", rt.expectedSelectorSyncSetStatuses, clusterSync.Status.SelectorSyncSets, startTime, endTime)
}
func areSyncStatusesEqual(t *testing.T, syncSetType string, expectedStatuses, actualStatuses []hiveintv1alpha1.SyncStatus, startTime, endTime time.Time) {
if !assert.Equalf(t, len(expectedStatuses), len(actualStatuses), "unexpected number of %s statuses", syncSetType) {
return
}
for i, expectedStatus := range expectedStatuses {
if expectedStatus.LastTransitionTime.IsZero() {
actual := actualStatuses[i].LastTransitionTime
hiveassert.BetweenTimes(t, actual.Time, startTime, endTime, "expected %s status %d to have LastTransitionTime of now", syncSetType, i)
expectedStatuses[i].LastTransitionTime = actual
}
if expectedStatus.FirstSuccessTime != nil && expectedStatus.FirstSuccessTime.IsZero() {
if actualStatuses[i].FirstSuccessTime != nil {
actual := actualStatuses[i].FirstSuccessTime
hiveassert.BetweenTimes(t, actual.Time, startTime, endTime, "expected %s status %d to have FirstSuccessTime of now", syncSetType, i)
*expectedStatuses[i].FirstSuccessTime = *actualStatuses[i].FirstSuccessTime
}
}
}
assert.Equalf(t, expectedStatuses, actualStatuses, "unexpected %s statuses", syncSetType)
}
func TestReconcileClusterSync_NewClusterDeployment(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build())
rt.mockRemoteClientBuilder.EXPECT().RESTConfig().Return(&rest.Config{}, nil)
reconcileRequest := reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: testNamespace,
Name: testCDName,
},
}
result, err := rt.r.Reconcile(reconcileRequest)
require.NoError(t, err, "unexpected error from Reconcile")
assert.Equal(t, result, reconcile.Result{}, "unexpected result from reconcile")
err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeaseName}, &hiveintv1alpha1.ClusterSyncLease{})
assert.True(t, apierrors.IsNotFound(err), "expected no lease")
err = rt.c.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testClusterSyncName}, &hiveintv1alpha1.ClusterSync{})
assert.Nil(t, err, "expected there to be a ClusterSync")
}
func TestReconcileClusterSync_NoWorkToDo(t *testing.T) {
scheme := newScheme()
cases := []struct {
name string
cd *hivev1.ClusterDeployment
}{
{
name: "no ClusterDeployment",
cd: nil,
},
{
name: "deleted ClusterDeployment",
cd: cdBuilder(scheme).GenericOptions(testgeneric.Deleted()).Build(),
},
{
name: "unreachable",
cd: cdBuilder(scheme).Build(
testcd.WithCondition(hivev1.ClusterDeploymentCondition{
Type: hivev1.UnreachableCondition,
Status: corev1.ConditionTrue,
}),
),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
var existing []runtime.Object
if tc.cd != nil {
existing = append(existing, tc.cd)
}
rt := newReconcileTest(t, mockCtrl, scheme, existing...)
rt.expectNoWorkDone = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_ApplyResource(t *testing.T) {
cases := []struct {
applyMode hivev1.SyncSetResourceApplyMode
includeResourcesToDelete bool
}{
{
applyMode: hivev1.UpsertResourceApplyMode,
includeResourcesToDelete: false,
},
{
applyMode: hivev1.SyncResourceApplyMode,
includeResourcesToDelete: true,
},
}
for _, tc := range cases {
t.Run(string(tc.applyMode), func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("dest-namespace", "dest-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithApplyMode(tc.applyMode),
testsyncset.WithResources(resourceToApply),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
expectedSyncStatusBuilder := newSyncStatusBuilder("test-syncset")
if tc.includeResourcesToDelete {
expectedSyncStatusBuilder = expectedSyncStatusBuilder.Options(
withResourcesToDelete(testConfigMapRef("dest-namespace", "dest-name")),
)
}
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{expectedSyncStatusBuilder.Build()}
rt.run(t)
})
}
}
func TestReconcileClusterSync_ApplySecret(t *testing.T) {
cases := []struct {
applyMode hivev1.SyncSetResourceApplyMode
includeResourcesToDelete bool
}{
{
applyMode: hivev1.UpsertResourceApplyMode,
includeResourcesToDelete: false,
},
{
applyMode: hivev1.SyncResourceApplyMode,
includeResourcesToDelete: true,
},
}
for _, tc := range cases {
t.Run(string(tc.applyMode), func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithApplyMode(tc.applyMode),
testsyncset.WithSecrets(
testSecretMapping("test-secret", "dest-namespace", "dest-name"),
),
)
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet, srcSecret)
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace("dest-namespace"),
testgeneric.WithName("dest-name"),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
expectedSyncStatusBuilder := newSyncStatusBuilder("test-syncset")
if tc.includeResourcesToDelete {
expectedSyncStatusBuilder = expectedSyncStatusBuilder.Options(
withResourcesToDelete(testSecretRef("dest-namespace", "dest-name")),
)
}
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{expectedSyncStatusBuilder.Build()}
rt.run(t)
})
}
}
func TestReconcileClusterSync_ApplyPatch(t *testing.T) {
cases := []struct {
applyMode hivev1.SyncSetResourceApplyMode
}{
{applyMode: hivev1.UpsertResourceApplyMode},
{applyMode: hivev1.SyncResourceApplyMode},
}
for _, tc := range cases {
t.Run(string(tc.applyMode), func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithApplyMode(tc.applyMode),
testsyncset.WithPatches(hivev1.SyncObjectPatch{
APIVersion: "v1",
Kind: "ConfigMap",
Namespace: "dest-namespace",
Name: "dest-name",
PatchType: "patch-type",
Patch: "test-patch",
}),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet)
rt.mockResourceHelper.EXPECT().Patch(
types.NamespacedName{Namespace: "dest-namespace", Name: "dest-name"},
"ConfigMap",
"v1",
[]byte("test-patch"),
"patch-type",
).Return(nil)
rt.expectedSyncSetStatuses = append(rt.expectedSyncSetStatuses, buildSyncStatus("test-syncset"))
rt.run(t)
})
}
}
func TestReconcileClusterSync_ApplyAllTypes(t *testing.T) {
cases := []struct {
applyMode hivev1.SyncSetResourceApplyMode
includeResourcesToDelete bool
}{
{
applyMode: hivev1.UpsertResourceApplyMode,
includeResourcesToDelete: false,
},
{
applyMode: hivev1.SyncResourceApplyMode,
includeResourcesToDelete: true,
},
}
for _, tc := range cases {
t.Run(string(tc.applyMode), func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("resource-namespace", "resource-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithApplyMode(tc.applyMode),
testsyncset.WithResources(resourceToApply),
testsyncset.WithSecrets(
testSecretMapping("test-secret", "secret-namespace", "secret-name"),
),
testsyncset.WithPatches(hivev1.SyncObjectPatch{
APIVersion: "patch-api/v1",
Kind: "PatchKind",
Namespace: "patch-namespace",
Name: "patch-name",
PatchType: "patch-type",
Patch: "test-patch",
}),
)
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet, srcSecret)
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace("secret-namespace"),
testgeneric.WithName("secret-name"),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
rt.mockResourceHelper.EXPECT().Patch(
types.NamespacedName{Namespace: "patch-namespace", Name: "patch-name"},
"PatchKind",
"patch-api/v1",
[]byte("test-patch"),
"patch-type",
).Return(nil)
expectedSyncStatusBuilder := newSyncStatusBuilder("test-syncset")
if tc.includeResourcesToDelete {
expectedSyncStatusBuilder = expectedSyncStatusBuilder.Options(
withResourcesToDelete(
testConfigMapRef("resource-namespace", "resource-name"),
testSecretRef("secret-namespace", "secret-name"),
),
)
}
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{expectedSyncStatusBuilder.Build()}
rt.run(t)
})
}
}
func TestReconcileClusterSync_Reapply(t *testing.T) {
cases := []struct {
name string
noSyncLease bool
renewTime time.Time
expectApply bool
}{
{
name: "too soon",
renewTime: time.Now().Add(-time.Hour),
expectApply: false,
},
{
name: "time for reapply",
renewTime: time.Now().Add(-3 * time.Hour),
expectApply: true,
},
{
name: "no sync lease",
noSyncLease: true,
expectApply: true,
},
{
name: "sync lease with no renew time",
expectApply: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("dest-namespace", "dest-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(resourceToApply),
)
existing := []runtime.Object{
cdBuilder(scheme).Build(),
clusterSyncBuilder(scheme).Build(
testcs.WithSyncSetStatus(buildSyncStatus("test-syncset",
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
),
)),
syncSet,
}
if !tc.noSyncLease {
existing = append(existing, buildSyncLease(tc.renewTime))
}
rt := newReconcileTest(t, mockCtrl, scheme, existing...)
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{
buildSyncStatus("test-syncset", withTransitionInThePast(), withFirstSuccessTimeInThePast()),
}
if tc.expectApply {
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
} else {
rt.expectUnchangedLeaseRenewTime = true
}
rt.run(t)
})
}
}
func TestReconcileClusterSync_NewSyncSetApplied(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
existingResource := testConfigMap("dest-namespace", "dest-name")
existingSyncSet := testsyncset.FullBuilder(testNamespace, "existing-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(existingResource),
)
newResource := testConfigMap("other-namespace", "other-name")
newSyncSet := testsyncset.FullBuilder(testNamespace, "new-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(newResource),
)
clusterSync := clusterSyncBuilder(scheme).Build(testcs.WithSyncSetStatus(
buildSyncStatus("existing-syncset", withTransitionInThePast(), withFirstSuccessTimeInThePast()),
))
lease := buildSyncLease(time.Now().Add(-1 * time.Hour))
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), existingSyncSet, newSyncSet, clusterSync, lease)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(newResource)).Return(resource.CreatedApplyResult, nil)
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{
buildSyncStatus("existing-syncset", withTransitionInThePast(), withFirstSuccessTimeInThePast()),
buildSyncStatus("new-syncset"),
}
rt.expectUnchangedLeaseRenewTime = true
rt.run(t)
}
func TestReconcileClusterSync_SyncSetDeleted(t *testing.T) {
cases := []struct {
name string
includeResourcesToDelete bool
expectDelete bool
}{
{
name: "upsert",
includeResourcesToDelete: false,
expectDelete: false,
},
{
name: "sync",
includeResourcesToDelete: true,
expectDelete: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
existingSyncStatusBuilder := newSyncStatusBuilder("test-syncset").Options(
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
)
if tc.includeResourcesToDelete {
existingSyncStatusBuilder = existingSyncStatusBuilder.Options(
withResourcesToDelete(testConfigMapRef("dest-namespace", "dest-name")),
)
}
clusterSync := clusterSyncBuilder(scheme).Build(testcs.WithSyncSetStatus(existingSyncStatusBuilder.Build()))
lease := buildSyncLease(time.Now().Add(-1 * time.Hour))
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSync, lease)
if tc.expectDelete {
rt.mockResourceHelper.EXPECT().
Delete("v1", "ConfigMap", "dest-namespace", "dest-name").
Return(nil)
}
rt.expectUnchangedLeaseRenewTime = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_ResourceRemovedFromSyncSet(t *testing.T) {
cases := []struct {
name string
includeResourcesToDelete bool
expectDelete bool
}{
{
name: "upsert",
includeResourcesToDelete: false,
expectDelete: false,
},
{
name: "sync",
includeResourcesToDelete: true,
expectDelete: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("dest-namespace", "retained-resource")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(2),
testsyncset.WithResources(resourceToApply),
)
existingSyncStatusBuilder := newSyncStatusBuilder("test-syncset").Options(
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
)
if tc.includeResourcesToDelete {
existingSyncStatusBuilder = existingSyncStatusBuilder.Options(withResourcesToDelete(
testConfigMapRef("dest-namespace", "deleted-resource"),
testConfigMapRef("dest-namespace", "retained-resource"),
))
}
clusterSync := clusterSyncBuilder(scheme).Build(testcs.WithSyncSetStatus(existingSyncStatusBuilder.Build()))
lease := buildSyncLease(time.Now().Add(-1 * time.Hour))
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), syncSet, clusterSync, lease)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
if tc.expectDelete {
rt.mockResourceHelper.EXPECT().
Delete("v1", "ConfigMap", "dest-namespace", "deleted-resource").
Return(nil)
}
expectedSyncStatusBuilder := newSyncStatusBuilder("test-syncset").Options(
withObservedGeneration(2),
withFirstSuccessTimeInThePast(),
)
if tc.includeResourcesToDelete {
expectedSyncStatusBuilder = expectedSyncStatusBuilder.Options(withResourcesToDelete(
testConfigMapRef("dest-namespace", "retained-resource"),
))
}
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{expectedSyncStatusBuilder.Build()}
rt.expectUnchangedLeaseRenewTime = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_ErrorApplyingResource(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("dest-namespace", "dest-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(resourceToApply),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).
Return(resource.ApplyResult(""), errors.New("test apply error"))
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult("failed to apply resource 0: test apply error"),
withNoFirstSuccessTime(),
)}
rt.expectRequeue = true
rt.run(t)
}
func TestReconcileClusterSync_ErrorDecodingResource(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
)
syncSet.Spec.Resources = []runtime.RawExtension{{Raw: []byte("{}")}}
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet)
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult("failed to decode resource 0: error unmarshaling JSON: while decoding JSON: Object 'Kind' is missing in '{}'"),
withNoFirstSuccessTime(),
)}
rt.run(t)
}
func TestReconcileClusterSync_ErrorApplyingSecret(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithSecrets(
testSecretMapping("test-secret", "dest-namespace", "dest-name"),
),
)
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet, srcSecret)
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace("dest-namespace"),
testgeneric.WithName("dest-name"),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).
Return(resource.ApplyResult(""), errors.New("test apply error"))
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult("failed to apply secret 0: test apply error"),
withNoFirstSuccessTime(),
)}
rt.expectRequeue = true
rt.run(t)
}
func TestReconcileClusterSync_ErrorApplyingPatch(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithPatches(hivev1.SyncObjectPatch{
APIVersion: "v1",
Kind: "ConfigMap",
Namespace: "dest-namespace",
Name: "dest-name",
PatchType: "patch-type",
Patch: "test-patch",
}),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet)
rt.mockResourceHelper.EXPECT().Patch(
types.NamespacedName{Namespace: "dest-namespace", Name: "dest-name"},
"ConfigMap",
"v1",
[]byte("test-patch"),
"patch-type",
).Return(errors.New("test patch error"))
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult("failed to apply patch 0: test patch error"),
withNoFirstSuccessTime(),
)}
rt.expectRequeue = true
rt.run(t)
}
func TestReconcileClusterSync_SkipAfterFailingResource(t *testing.T) {
cases := []struct {
name string
successfulResources int
successfulSecrets int
successfulPatches int
failureMessage string
}{
{
name: "resource 0 fails",
successfulResources: 0,
failureMessage: "failed to apply resource 0: test apply error",
},
{
name: "resource 1 fails",
successfulResources: 1,
failureMessage: "failed to apply resource 1: test apply error",
},
{
name: "resource 2 fails",
successfulResources: 2,
failureMessage: "failed to apply resource 2: test apply error",
},
{
name: "secret 0 fails",
successfulResources: 3,
successfulSecrets: 0,
failureMessage: "failed to apply secret 0: test apply error",
},
{
name: "secret 1 fails",
successfulResources: 3,
successfulSecrets: 1,
failureMessage: "failed to apply secret 1: test apply error",
},
{
name: "secret 2 fails",
successfulResources: 3,
successfulSecrets: 2,
failureMessage: "failed to apply secret 2: test apply error",
},
{
name: "patch 0 fails",
successfulResources: 3,
successfulSecrets: 3,
successfulPatches: 0,
failureMessage: "failed to apply patch 0: test patch error",
},
{
name: "patch 1 fails",
successfulResources: 3,
successfulSecrets: 3,
successfulPatches: 1,
failureMessage: "failed to apply patch 1: test patch error",
},
{
name: "patch 2 fails",
successfulResources: 3,
successfulSecrets: 3,
successfulPatches: 2,
failureMessage: "failed to apply patch 2: test patch error",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourcesToApply := make([]hivev1.MetaRuntimeObject, 3)
for i := range resourcesToApply {
resourcesToApply[i] = testConfigMap(
fmt.Sprintf("resource-namespace-%d", i),
fmt.Sprintf("resource-name-%d", i),
)
}
srcSecrets := make([]*corev1.Secret, 3)
for i := range srcSecrets {
srcSecrets[i] = testsecret.FullBuilder(testNamespace, fmt.Sprintf("test-secret-%d", i), scheme).Build(
testsecret.WithDataKeyValue(
fmt.Sprintf("test-key-%d", i),
[]byte(fmt.Sprintf("test-data-%d", i)),
),
)
}
secretsToApply := make([]*corev1.Secret, len(srcSecrets))
for i := range secretsToApply {
secretsToApply[i] = testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace(fmt.Sprintf("secret-namespace-%d", i)),
testgeneric.WithName(fmt.Sprintf("secret-name-%d", i)),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue(
fmt.Sprintf("test-key-%d", i),
[]byte(fmt.Sprintf("test-data-%d", i)),
),
)
}
patchesToApply := make([]hivev1.SyncObjectPatch, 3)
for i := range patchesToApply {
patchesToApply[i] = hivev1.SyncObjectPatch{
APIVersion: "v1",
Kind: "ConfigMap",
Namespace: fmt.Sprintf("patch-namespace-%d", i),
Name: fmt.Sprintf("patch-name-%d", i),
PatchType: fmt.Sprintf("patch-type-%d", i),
Patch: fmt.Sprintf("test-patch-%d", i),
}
}
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(resourcesToApply...),
testsyncset.WithSecrets(
testSecretMapping("test-secret-0", "secret-namespace-0", "secret-name-0"),
testSecretMapping("test-secret-1", "secret-namespace-1", "secret-name-1"),
testSecretMapping("test-secret-2", "secret-namespace-2", "secret-name-2"),
),
testsyncset.WithPatches(patchesToApply...),
)
existing := []runtime.Object{cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet}
for _, s := range srcSecrets {
existing = append(existing, s)
}
rt := newReconcileTest(t, mockCtrl, scheme, existing...)
var resourceHelperCalls []*gomock.Call
for i := 0; i < tc.successfulResources; i++ {
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourcesToApply[i])).
Return(resource.CreatedApplyResult, nil))
}
if tc.successfulResources < len(resourcesToApply) {
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourcesToApply[tc.successfulResources])).
Return(resource.ApplyResult(""), errors.New("test apply error")))
}
for i := 0; i < tc.successfulSecrets; i++ {
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretsToApply[i])).
Return(resource.CreatedApplyResult, nil))
}
if tc.successfulResources == len(resourcesToApply) && tc.successfulSecrets < len(srcSecrets) {
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretsToApply[tc.successfulSecrets])).
Return(resource.ApplyResult(""), errors.New("test apply error")))
}
for i := 0; i < tc.successfulPatches; i++ {
patch := patchesToApply[i]
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Patch(
types.NamespacedName{Namespace: patch.Namespace, Name: patch.Name},
patch.Kind,
patch.APIVersion,
[]byte(patch.Patch),
patch.PatchType,
).Return(nil))
}
if tc.successfulResources == len(resourcesToApply) && tc.successfulSecrets == len(secretsToApply) && tc.successfulPatches < len(patchesToApply) {
patch := patchesToApply[tc.successfulPatches]
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Patch(
types.NamespacedName{Namespace: patch.Namespace, Name: patch.Name},
patch.Kind,
patch.APIVersion,
[]byte(patch.Patch),
patch.PatchType,
).Return(errors.New("test patch error")))
}
gomock.InOrder(resourceHelperCalls...)
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult(tc.failureMessage),
withNoFirstSuccessTime(),
)}
rt.expectRequeue = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_ResourcesToDeleteAreOrdered(t *testing.T) {
scheme := newScheme()
resourcesToApply := []hivev1.MetaRuntimeObject{
testConfigMap("namespace-A", "name-A"),
testConfigMap("namespace-A", "name-B"),
testConfigMap("namespace-B", "name-A"),
&corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "namespace-A",
Name: "name-A",
},
},
&hivev1.ClusterDeployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "hive.openshift.io/v1",
Kind: "ClusterDeployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "namespace-A",
Name: "name-A",
},
},
}
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
secretMappings := []hivev1.SecretMapping{
testSecretMapping("test-secret", "namespace-A", "name-A"),
testSecretMapping("test-secret", "namespace-A", "name-B"),
testSecretMapping("test-secret", "namespace-B", "name-A"),
}
permutation := 0
roa := make([]interface{}, len(resourcesToApply))
for i, r := range resourcesToApply {
roa[i] = r
}
sm := make([]interface{}, len(secretMappings))
for i, m := range secretMappings {
sm[i] = m
}
permute(roa, func(roa []interface{}) {
permute(sm, func(sm []interface{}) {
resourcesToApply = make([]hivev1.MetaRuntimeObject, len(roa))
for i, r := range roa {
resourcesToApply[i] = r.(hivev1.MetaRuntimeObject)
}
secretMappings = make([]hivev1.SecretMapping, len(sm))
for i, m := range sm {
secretMappings[i] = m.(hivev1.SecretMapping)
}
t.Run(fmt.Sprintf("permutation %03d", permutation), func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(2),
testsyncset.WithApplyMode(hivev1.SyncResourceApplyMode),
testsyncset.WithResources(resourcesToApply...),
testsyncset.WithSecrets(secretMappings...),
)
clusterSync := clusterSyncBuilder(scheme).Build(
testcs.WithSyncSetStatus(buildSyncStatus("test-syncset",
withResourcesToDelete(
testConfigMapRef("namespace-A", "resource-failing-to-delete-A"),
testConfigMapRef("namespace-A", "resource-failing-to-delete-B"),
testConfigMapRef("namespace-B", "resource-failing-to-delete-A"),
),
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
)),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSync, syncSet, srcSecret)
var resourceHelperCalls []*gomock.Call
for _, r := range resourcesToApply {
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(r)).
Return(resource.CreatedApplyResult, nil))
}
for _, s := range secretMappings {
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace(s.TargetRef.Namespace),
testgeneric.WithName(s.TargetRef.Name),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).
Return(resource.CreatedApplyResult, nil))
}
resourceHelperCalls = append(resourceHelperCalls,
rt.mockResourceHelper.EXPECT().Delete("v1", "ConfigMap", "namespace-A", "resource-failing-to-delete-A").
Return(errors.New("error deleting resource")),
rt.mockResourceHelper.EXPECT().Delete("v1", "ConfigMap", "namespace-A", "resource-failing-to-delete-B").
Return(errors.New("error deleting resource")),
rt.mockResourceHelper.EXPECT().Delete("v1", "ConfigMap", "namespace-B", "resource-failing-to-delete-A").
Return(errors.New("error deleting resource")),
)
gomock.InOrder(resourceHelperCalls...)
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withObservedGeneration(2),
withFailureResult("[Failed to delete v1, Kind=ConfigMap namespace-A/resource-failing-to-delete-A: error deleting resource, Failed to delete v1, Kind=ConfigMap namespace-A/resource-failing-to-delete-B: error deleting resource, Failed to delete v1, Kind=ConfigMap namespace-B/resource-failing-to-delete-A: error deleting resource]"),
withFirstSuccessTimeInThePast(),
withResourcesToDelete(
hiveintv1alpha1.SyncResourceReference{
APIVersion: "hive.openshift.io/v1",
Kind: "ClusterDeployment",
Namespace: "namespace-A",
Name: "name-A",
},
testConfigMapRef("namespace-A", "name-A"),
testConfigMapRef("namespace-A", "name-B"),
testConfigMapRef("namespace-A", "resource-failing-to-delete-A"),
testConfigMapRef("namespace-A", "resource-failing-to-delete-B"),
testConfigMapRef("namespace-B", "name-A"),
testConfigMapRef("namespace-B", "resource-failing-to-delete-A"),
testSecretRef("namespace-A", "name-A"),
testSecretRef("namespace-A", "name-B"),
testSecretRef("namespace-B", "name-A"),
hiveintv1alpha1.SyncResourceReference{
APIVersion: "v1",
Kind: "Service",
Namespace: "namespace-A",
Name: "name-A",
},
),
)}
rt.expectRequeue = true
rt.run(t)
})
permutation++
})
})
}
func TestReconcileClusterSync_FailingSyncSetDoesNotBlockOtherSyncSets(t *testing.T) {
cases := []struct {
name string
failingSyncSet int
}{
{
name: "resource 0 fails",
failingSyncSet: 0,
},
{
name: "resource 1 fails",
failingSyncSet: 1,
},
{
name: "resource 2 fails",
failingSyncSet: 2,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourcesToApply := make([]hivev1.MetaRuntimeObject, 3)
for i := range resourcesToApply {
resourcesToApply[i] = testConfigMap(
fmt.Sprintf("resource-namespace-%d", i),
fmt.Sprintf("resource-name-%d", i),
)
}
syncSets := make([]*hivev1.SyncSet, len(resourcesToApply))
for i := range resourcesToApply {
syncSets[i] = testsyncset.FullBuilder(testNamespace, fmt.Sprintf("test-syncset-%d", i), scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(resourcesToApply[i]),
)
}
existing := []runtime.Object{cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build()}
for _, r := range resourcesToApply {
existing = append(existing, r)
}
for _, s := range syncSets {
existing = append(existing, s)
}
rt := newReconcileTest(t, mockCtrl, scheme, existing...)
for i, r := range resourcesToApply {
if i == tc.failingSyncSet {
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(r)).
Return(resource.ApplyResult(""), errors.New("test apply error"))
} else {
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(r)).
Return(resource.CreatedApplyResult, nil)
}
}
rt.expectedFailedMessage = fmt.Sprintf("SyncSet test-syncset-%d is failing", tc.failingSyncSet)
for i, s := range syncSets {
expectedSyncSetStatusBuilder := newSyncStatusBuilder(s.Name)
if i == tc.failingSyncSet {
expectedSyncSetStatusBuilder = expectedSyncSetStatusBuilder.Options(
withFailureResult("failed to apply resource 0: test apply error"),
withNoFirstSuccessTime(),
)
}
rt.expectedSyncSetStatuses = append(rt.expectedSyncSetStatuses, expectedSyncSetStatusBuilder.Build())
}
rt.expectRequeue = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_FailureMessage(t *testing.T) {
cases := []struct {
name string
failingSyncSets int
failingSelectorSyncSets int
expectedFailedMessage string
}{
{
name: "multiple failing syncsets",
failingSyncSets: 3,
expectedFailedMessage: "SyncSets test-syncset-0, test-syncset-1, test-syncset-2 are failing",
},
{
name: "multiple failing selectorsyncsets",
failingSelectorSyncSets: 3,
expectedFailedMessage: "SelectorSyncSets test-selectorsyncset-0, test-selectorsyncset-1, test-selectorsyncset-2 are failing",
},
{
name: "one failing syncset and selectorsyncset",
failingSyncSets: 1,
failingSelectorSyncSets: 1,
expectedFailedMessage: "SyncSet test-syncset-0 and SelectorSyncSet test-selectorsyncset-0 are failing",
},
{
name: "multiple failing syncsets and selectorsyncsets",
failingSyncSets: 3,
failingSelectorSyncSets: 3,
expectedFailedMessage: "SyncSets test-syncset-0, test-syncset-1, test-syncset-2 and SelectorSyncSets test-selectorsyncset-0, test-selectorsyncset-1, test-selectorsyncset-2 are failing",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSets := make([]runtime.Object, tc.failingSyncSets)
for i := range syncSets {
syncSets[i] = testsyncset.FullBuilder(testNamespace, fmt.Sprintf("test-syncset-%d", i), scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(
testConfigMap(fmt.Sprintf("syncset-namespace-%d", i), fmt.Sprintf("syncset-name-%d", i)),
),
)
}
selectorSyncSets := make([]runtime.Object, tc.failingSelectorSyncSets)
for i := range selectorSyncSets {
selectorSyncSets[i] = testselectorsyncset.FullBuilder(fmt.Sprintf("test-selectorsyncset-%d", i), scheme).Build(
testselectorsyncset.WithLabelSelector("test-label-key", "test-label-value"),
testselectorsyncset.WithGeneration(1),
testselectorsyncset.WithResources(
testConfigMap(fmt.Sprintf("selectorsyncset-namespace-%d", i), fmt.Sprintf("selectorsyncset-name-%d", i)),
),
)
}
existing := []runtime.Object{
cdBuilder(scheme).Build(testcd.WithLabel("test-label-key", "test-label-value")),
clusterSyncBuilder(scheme).Build(),
}
existing = append(existing, syncSets...)
existing = append(existing, selectorSyncSets...)
rt := newReconcileTest(t, mockCtrl, scheme, existing...)
rt.mockResourceHelper.EXPECT().Apply(gomock.Any()).
Return(resource.ApplyResult(""), errors.New("test apply error")).
Times(tc.failingSyncSets + tc.failingSelectorSyncSets)
rt.expectedFailedMessage = tc.expectedFailedMessage
if tc.failingSyncSets > 0 {
rt.expectedSyncSetStatuses = make([]hiveintv1alpha1.SyncStatus, tc.failingSyncSets)
for i := range rt.expectedSyncSetStatuses {
rt.expectedSyncSetStatuses[i] = buildSyncStatus(fmt.Sprintf("test-syncset-%d", i),
withFailureResult("failed to apply resource 0: test apply error"),
withNoFirstSuccessTime(),
)
}
}
if tc.failingSelectorSyncSets > 0 {
rt.expectedSelectorSyncSetStatuses = make([]hiveintv1alpha1.SyncStatus, tc.failingSelectorSyncSets)
for i := range rt.expectedSelectorSyncSetStatuses {
rt.expectedSelectorSyncSetStatuses[i] = buildSyncStatus(fmt.Sprintf("test-selectorsyncset-%d", i),
withFailureResult("failed to apply resource 0: test apply error"),
withNoFirstSuccessTime(),
)
}
}
rt.expectRequeue = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_PartialApply(t *testing.T) {
cases := []struct {
name string
existingSyncStatus hiveintv1alpha1.SyncStatus
expectedSyncStatus hiveintv1alpha1.SyncStatus
}{
{
name: "last apply failed",
existingSyncStatus: buildSyncStatus("test-syncset",
withObservedGeneration(2),
withFailureResult("existing failure"),
withTransitionInThePast(),
),
expectedSyncStatus: buildSyncStatus("test-syncset",
withObservedGeneration(2),
),
},
{
name: "syncset generation changed",
existingSyncStatus: buildSyncStatus("test-syncset",
withObservedGeneration(1),
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
),
expectedSyncStatus: buildSyncStatus("test-syncset",
withObservedGeneration(2),
withFirstSuccessTimeInThePast(),
),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("dest-namespace", "dest-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(2),
testsyncset.WithResources(resourceToApply),
)
clusterSync := clusterSyncBuilder(scheme).Build(testcs.WithSyncSetStatus(tc.existingSyncStatus))
syncLease := buildSyncLease(time.Now())
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), syncSet, clusterSync, syncLease)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{tc.expectedSyncStatus}
rt.expectUnchangedLeaseRenewTime = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_ErrorDeleting(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
existingSyncStatus := buildSyncStatus("test-syncset",
withResourcesToDelete(testConfigMapRef("dest-namespace", "dest-name")),
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
)
clusterSync := clusterSyncBuilder(scheme).Build(testcs.WithSyncSetStatus(existingSyncStatus))
lease := buildSyncLease(time.Now().Add(-1 * time.Hour))
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSync, lease)
rt.mockResourceHelper.EXPECT().
Delete("v1", "ConfigMap", "dest-namespace", "dest-name").
Return(errors.New("error deleting resource"))
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withObservedGeneration(0),
withFailureResult("Failed to delete v1, Kind=ConfigMap dest-namespace/dest-name: error deleting resource"),
withResourcesToDelete(testConfigMapRef("dest-namespace", "dest-name")),
withFirstSuccessTimeInThePast(),
)}
rt.expectUnchangedLeaseRenewTime = true
rt.expectRequeue = true
rt.run(t)
}
func TestReconcileClusterSync_DeleteErrorDoesNotBlockOtherDeletes(t *testing.T) {
cases := []struct {
name string
syncSetRemoved bool
}{
{
name: "removed syncset",
syncSetRemoved: true,
},
{
name: "removed resource",
syncSetRemoved: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
existingSyncStatus := buildSyncStatus("test-syncset",
withResourcesToDelete(
testConfigMapRef("dest-namespace", "failing-resource"),
testConfigMapRef("dest-namespace", "successful-resource"),
),
withTransitionInThePast(),
withFirstSuccessTimeInThePast(),
)
clusterSync := clusterSyncBuilder(scheme).Build(testcs.WithSyncSetStatus(existingSyncStatus))
lease := buildSyncLease(time.Now().Add(-1 * time.Hour))
existing := []runtime.Object{cdBuilder(scheme).Build(), clusterSync, lease}
if !tc.syncSetRemoved {
existing = append(existing,
testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(2),
),
)
}
rt := newReconcileTest(t, mockCtrl, scheme, existing...)
rt.mockResourceHelper.EXPECT().
Delete("v1", "ConfigMap", "dest-namespace", "failing-resource").
Return(errors.New("error deleting resource"))
rt.mockResourceHelper.EXPECT().
Delete("v1", "ConfigMap", "dest-namespace", "successful-resource").
Return(nil)
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
expectedSyncSetStatusBuilder := newSyncStatusBuilder("test-syncset").Options(
withFailureResult("Failed to delete v1, Kind=ConfigMap dest-namespace/failing-resource: error deleting resource"),
withResourcesToDelete(
testConfigMapRef("dest-namespace", "failing-resource"),
),
withFirstSuccessTimeInThePast(),
)
if tc.syncSetRemoved {
expectedSyncSetStatusBuilder = expectedSyncSetStatusBuilder.Options(withObservedGeneration(0))
} else {
expectedSyncSetStatusBuilder = expectedSyncSetStatusBuilder.Options(withObservedGeneration(2))
}
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{expectedSyncSetStatusBuilder.Build()}
rt.expectUnchangedLeaseRenewTime = true
rt.expectRequeue = true
rt.run(t)
})
}
}
func TestReconcileClusterSync_ApplyBehavior(t *testing.T) {
cases := []struct {
applyBehavior hivev1.SyncSetApplyBehavior
}{
{
applyBehavior: hivev1.ApplySyncSetApplyBehavior,
},
{
applyBehavior: hivev1.CreateOnlySyncSetApplyBehavior,
},
{
applyBehavior: hivev1.CreateOrUpdateSyncSetApplyBehavior,
},
}
for _, tc := range cases {
t.Run(string(tc.applyBehavior), func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("resource-namespace", "resource-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithApplyBehavior(tc.applyBehavior),
testsyncset.WithResources(resourceToApply),
testsyncset.WithSecrets(
testSecretMapping("test-secret", "secret-namespace", "secret-name"),
),
testsyncset.WithPatches(hivev1.SyncObjectPatch{
APIVersion: "patch-api/v1",
Kind: "PatchKind",
Namespace: "patch-namespace",
Name: "patch-name",
PatchType: "patch-type",
Patch: "test-patch",
}),
)
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet, srcSecret)
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace("secret-namespace"),
testgeneric.WithName("secret-name"),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
switch tc.applyBehavior {
case hivev1.ApplySyncSetApplyBehavior:
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
case hivev1.CreateOnlySyncSetApplyBehavior:
rt.mockResourceHelper.EXPECT().Create(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.mockResourceHelper.EXPECT().Create(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
case hivev1.CreateOrUpdateSyncSetApplyBehavior:
rt.mockResourceHelper.EXPECT().CreateOrUpdate(newApplyMatcher(resourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.mockResourceHelper.EXPECT().CreateOrUpdate(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
}
rt.mockResourceHelper.EXPECT().Patch(
types.NamespacedName{Namespace: "patch-namespace", Name: "patch-name"},
"PatchKind",
"patch-api/v1",
[]byte("test-patch"),
"patch-type",
).Return(nil)
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset")}
rt.run(t)
})
}
}
func TestReconcileClusterSync_IgnoreNotApplicableSyncSets(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSetResourceToApply := testConfigMap("dest-namespace", "resource-from-applicable-syncset")
applicableSyncSet := testsyncset.FullBuilder(testNamespace, "applicable-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(syncSetResourceToApply),
)
nonApplicableSyncSet := testsyncset.FullBuilder(testNamespace, "non-applicable-syncset", scheme).Build(
testsyncset.ForClusterDeployments("other-cd"),
testsyncset.WithGeneration(1),
testsyncset.WithResources(
testConfigMap("dest-namespace", "resource-from-non-applicable-syncset"),
),
)
selectorSyncSetResourceToApply := testConfigMap("dest-namespace", "resource-from-applicable-selectorsyncset")
applicableSelectorSyncSet := testselectorsyncset.FullBuilder("applicable-selectorsyncset", scheme).Build(
testselectorsyncset.WithLabelSelector("test-label-key", "test-label-value"),
testselectorsyncset.WithGeneration(1),
testselectorsyncset.WithResources(selectorSyncSetResourceToApply),
)
nonApplicableSelectorSyncSet := testselectorsyncset.FullBuilder("non-applicable-selectorsyncset", scheme).Build(
testselectorsyncset.WithLabelSelector("test-label-key", "other-label-value"),
testselectorsyncset.WithGeneration(1),
testselectorsyncset.WithResources(
testConfigMap("dest-namespace", "resource-from-non-applicable-selectorsyncset"),
),
)
rt := newReconcileTest(t, mockCtrl, scheme,
cdBuilder(scheme).Build(testcd.WithLabel("test-label-key", "test-label-value")),
clusterSyncBuilder(scheme).Build(),
applicableSyncSet,
nonApplicableSyncSet,
applicableSelectorSyncSet,
nonApplicableSelectorSyncSet,
)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(syncSetResourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(selectorSyncSetResourceToApply)).Return(resource.CreatedApplyResult, nil)
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("applicable-syncset")}
rt.expectedSelectorSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("applicable-selectorsyncset")}
rt.run(t)
}
func TestReconcileClusterSync_ApplySecretForSelectorSyncSet(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
cd := cdBuilder(scheme).Build(testcd.WithLabel("test-label-key", "test-label-value"))
selectorSyncSet := testselectorsyncset.FullBuilder("test-selectorsyncset", scheme).Build(
testselectorsyncset.WithLabelSelector("test-label-key", "test-label-value"),
testselectorsyncset.WithGeneration(1),
testselectorsyncset.WithSecrets(
hivev1.SecretMapping{
SourceRef: hivev1.SecretReference{Namespace: "src-namespace", Name: "src-name"},
TargetRef: hivev1.SecretReference{Namespace: "dest-namespace", Name: "dest-name"},
},
),
)
srcSecret := testsecret.FullBuilder("src-namespace", "src-name", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cd, clusterSyncBuilder(scheme).Build(), selectorSyncSet, srcSecret)
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace("dest-namespace"),
testgeneric.WithName("dest-name"),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
rt.expectedSelectorSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-selectorsyncset")}
rt.run(t)
}
func TestReconcileClusterSync_MissingSecretNamespaceForSelectorSyncSet(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
cd := cdBuilder(scheme).Build(testcd.WithLabel("test-label-key", "test-label-value"))
selectorSyncSet := testselectorsyncset.FullBuilder("test-selectorsyncset", scheme).Build(
testselectorsyncset.WithLabelSelector("test-label-key", "test-label-value"),
testselectorsyncset.WithGeneration(1),
testselectorsyncset.WithSecrets(
testSecretMapping("test-secret", "dest-namespace", "dest-name"),
),
)
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cd, clusterSyncBuilder(scheme).Build(), selectorSyncSet, srcSecret)
rt.expectedFailedMessage = "SelectorSyncSet test-selectorsyncset is failing"
rt.expectedSelectorSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-selectorsyncset",
withFailureResult("source namespace missing for secret 0"),
withNoFirstSuccessTime(),
)}
rt.run(t)
}
func TestReconcileClusterSync_ValidSecretNamespaceForSyncSet(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithSecrets(
hivev1.SecretMapping{
SourceRef: hivev1.SecretReference{Namespace: testNamespace, Name: "test-secret"},
TargetRef: hivev1.SecretReference{Namespace: "dest-namespace", Name: "dest-name"},
},
),
)
srcSecret := testsecret.FullBuilder(testNamespace, "test-secret", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet, srcSecret)
secretToApply := testsecret.BasicBuilder().GenericOptions(
testgeneric.WithNamespace("dest-namespace"),
testgeneric.WithName("dest-name"),
testgeneric.WithTypeMeta(scheme),
).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(secretToApply)).Return(resource.CreatedApplyResult, nil)
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset")}
rt.run(t)
}
func TestReconcileClusterSync_InvalidSecretNamespaceForSyncSet(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithSecrets(
hivev1.SecretMapping{
SourceRef: hivev1.SecretReference{Namespace: "src-namespace", Name: "src-name"},
TargetRef: hivev1.SecretReference{Namespace: "dest-namespace", Name: "dest-name"},
},
),
)
srcSecret := testsecret.FullBuilder("src-namespace", "src-name", scheme).Build(
testsecret.WithDataKeyValue("test-key", []byte("test-data")),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet, srcSecret)
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult("source in wrong namespace for secret 0"),
withNoFirstSuccessTime(),
)}
rt.run(t)
}
func TestReconcileClusterSync_MissingSourceSecret(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithSecrets(
testSecretMapping("test-secret", "dest-namespace", "dest-name"),
),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), clusterSyncBuilder(scheme).Build(), syncSet)
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult(`failed to read secret 0: secrets "test-secret" not found`),
withNoFirstSuccessTime(),
)}
rt.expectRequeue = true
rt.run(t)
}
func TestReconcileClusterSync_ConditionNotMutatedWhenMessageNotChanged(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
scheme := newScheme()
resourceToApply := testConfigMap("dest-namespace", "dest-name")
syncSet := testsyncset.FullBuilder(testNamespace, "test-syncset", scheme).Build(
testsyncset.ForClusterDeployments(testCDName),
testsyncset.WithGeneration(1),
testsyncset.WithResources(resourceToApply),
)
existingClusterSync := clusterSyncBuilder(scheme).Build(
testcs.WithSyncSetStatus(buildSyncStatus("test-syncset",
withFailureResult("failed to apply"),
withTransitionInThePast(),
)),
testcs.WithCondition(hiveintv1alpha1.ClusterSyncCondition{
Type: hiveintv1alpha1.ClusterSyncFailed,
Status: corev1.ConditionTrue,
Reason: "Failure",
Message: "SyncSet test-syncset is failing",
LastTransitionTime: timeInThePast,
LastProbeTime: timeInThePast,
}),
)
rt := newReconcileTest(t, mockCtrl, scheme, cdBuilder(scheme).Build(), existingClusterSync, syncSet)
rt.mockResourceHelper.EXPECT().Apply(newApplyMatcher(resourceToApply)).
Return(resource.ApplyResult(""), errors.New("test apply error"))
rt.expectedFailedMessage = "SyncSet test-syncset is failing"
rt.expectedSyncSetStatuses = []hiveintv1alpha1.SyncStatus{buildSyncStatus("test-syncset",
withFailureResult("failed to apply resource 0: test apply error"),
withNoFirstSuccessTime(),
)}
rt.expectRequeue = true
rt.run(t)
actualClusterSync := &hiveintv1alpha1.ClusterSync{}
err := rt.c.Get(context.Background(), types.NamespacedName{Namespace: testNamespace, Name: testClusterSyncName}, actualClusterSync)
require.NoError(t, err, "unexpected error getting ClusterSync")
require.Len(t, actualClusterSync.Status.Conditions, 1, "expected exactly 1 condition")
cond := actualClusterSync.Status.Conditions[0]
require.Equal(t, hiveintv1alpha1.ClusterSyncFailed, cond.Type, "expected Failed condition")
require.Equal(t, string(corev1.ConditionTrue), string(cond.Status), "expected Failed condition to be true")
assert.Equal(t, timeInThePast, cond.LastTransitionTime, "expected no change in last transition time")
assert.Equal(t, timeInThePast, cond.LastProbeTime, "expected no change in last probe time")
}
func newScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
hivev1.AddToScheme(scheme)
hiveintv1alpha1.AddToScheme(scheme)
corev1.AddToScheme(scheme)
return scheme
}
func cdBuilder(scheme *runtime.Scheme) testcd.Builder {
return testcd.FullBuilder(testNamespace, testCDName, scheme).
GenericOptions(
testgeneric.WithUID(testCDUID),
).
Options(
testcd.Installed(),
testcd.WithCondition(hivev1.ClusterDeploymentCondition{
Type: hivev1.UnreachableCondition,
Status: corev1.ConditionFalse,
}),
)
}
func clusterSyncBuilder(scheme *runtime.Scheme) testcs.Builder {
return testcs.FullBuilder(testNamespace, testClusterSyncName, scheme).GenericOptions(
testgeneric.WithUID(testClusterSyncUID),
testgeneric.WithOwnerReference(cdBuilder(scheme).Build()),
)
}
func buildSyncLease(t time.Time) *hiveintv1alpha1.ClusterSyncLease {
return &hiveintv1alpha1.ClusterSyncLease{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testLeaseName,
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "hiveinternal.openshift.io/v1alpha1",
Kind: "ClusterSync",
Name: testClusterSyncName,
UID: testClusterSyncUID,
BlockOwnerDeletion: pointer.BoolPtr(true),
}},
},
Spec: hiveintv1alpha1.ClusterSyncLeaseSpec{
RenewTime: metav1.NewMicroTime(t),
},
}
}
func testConfigMap(namespace, name string) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
}
}
func testConfigMapRef(namespace, name string) hiveintv1alpha1.SyncResourceReference {
return hiveintv1alpha1.SyncResourceReference{
APIVersion: "v1",
Kind: "ConfigMap",
Namespace: namespace,
Name: name,
}
}
func testSecretMapping(srcName, destNamespace, destName string) hivev1.SecretMapping {
return hivev1.SecretMapping{
SourceRef: hivev1.SecretReference{
Name: srcName,
},
TargetRef: hivev1.SecretReference{
Namespace: destNamespace,
Name: destName,
},
}
}
func testSecretRef(namespace, name string) hiveintv1alpha1.SyncResourceReference {
return hiveintv1alpha1.SyncResourceReference{
APIVersion: "v1",
Kind: "Secret",
Namespace: namespace,
Name: name,
}
}
type applyMatcher struct {
resource *unstructured.Unstructured
}
func newApplyMatcher(resource hivev1.MetaRuntimeObject) gomock.Matcher {
resourceAsJSON, err := json.Marshal(resource)
if err != nil {
panic(errors.Wrap(err, "could not marshal resource to JSON"))
}
u := &unstructured.Unstructured{}
if err := json.Unmarshal(resourceAsJSON, u); err != nil {
panic(errors.Wrap(err, "could not unmarshal as unstructured"))
}
labels := u.GetLabels()
if labels == nil {
labels = map[string]string{}
}
labels[constants.HiveManagedLabel] = "true"
u.SetLabels(labels)
return &applyMatcher{resource: u}
}
func (m *applyMatcher) Matches(x interface{}) bool {
rawData, ok := x.([]byte)
if !ok {
return false
}
u := &unstructured.Unstructured{}
if err := json.Unmarshal(rawData, u); err != nil {
return false
}
return reflect.DeepEqual(u, m.resource)
}
func (m *applyMatcher) String() string {
return fmt.Sprintf(
"is %s %s in %s",
m.resource.GetObjectKind().GroupVersionKind(),
m.resource.GetName(),
m.resource.GetNamespace(),
)
}
func (m *applyMatcher) Got(got interface{}) string {
switch t := got.(type) {
case []byte:
return string(t)
default:
return fmt.Sprintf("%v", t)
}
}
func permute(x []interface{}, foo func([]interface{})) {
switch l := len(x); l {
case 0:
case 1:
foo(x)
default:
for i := 0; i < l; i++ {
x[0], x[i] = x[i], x[0]
permute(x[1:], func(y []interface{}) {
foo(append(x[0:1], y...))
})
x[0], x[i] = x[i], x[0]
}
}
}
type syncStatusOption func(syncStatus *hiveintv1alpha1.SyncStatus)
type syncStatusBuilder struct {
name string
options []syncStatusOption
}
func newSyncStatusBuilder(name string) *syncStatusBuilder {
return &syncStatusBuilder{name: name}
}
func (b *syncStatusBuilder) Build(opts ...syncStatusOption) hiveintv1alpha1.SyncStatus {
return buildSyncStatus(b.name, append(b.options, opts...)...)
}
func (b *syncStatusBuilder) Options(opts ...syncStatusOption) *syncStatusBuilder {
return &syncStatusBuilder{
name: b.name,
options: append(b.options, opts...),
}
}
func buildSyncStatus(name string, opts ...syncStatusOption) hiveintv1alpha1.SyncStatus {
syncStatus := &hiveintv1alpha1.SyncStatus{
Name: name,
ObservedGeneration: 1,
Result: hiveintv1alpha1.SuccessSyncSetResult,
FirstSuccessTime: &metav1.Time{},
}
for _, opt := range opts {
opt(syncStatus)
}
return *syncStatus
}
func withObservedGeneration(observedGeneration int64) syncStatusOption {
return func(syncStatus *hiveintv1alpha1.SyncStatus) {
syncStatus.ObservedGeneration = observedGeneration
}
}
func withFailureResult(message string) syncStatusOption {
return func(syncStatus *hiveintv1alpha1.SyncStatus) {
syncStatus.Result = hiveintv1alpha1.FailureSyncSetResult
syncStatus.FailureMessage = message
}
}
func withResourcesToDelete(resourcesToDelete ...hiveintv1alpha1.SyncResourceReference) syncStatusOption {
return func(syncStatus *hiveintv1alpha1.SyncStatus) {
syncStatus.ResourcesToDelete = resourcesToDelete
}
}
func withTransitionInThePast() syncStatusOption {
return func(syncStatus *hiveintv1alpha1.SyncStatus) {
syncStatus.LastTransitionTime = timeInThePast
}
}
func withNoFirstSuccessTime() syncStatusOption {
return func(syncStatus *hiveintv1alpha1.SyncStatus) {
syncStatus.FirstSuccessTime = nil
}
}
func withFirstSuccessTimeInThePast() syncStatusOption {
return func(syncStatus *hiveintv1alpha1.SyncStatus) {
syncStatus.FirstSuccessTime = &timeInThePast
}
}
| 1 | 14,535 | Confirmed this test fails when the fix is reverted | openshift-hive | go |
@@ -38,6 +38,9 @@ func NewCollector(options *LogOptions) *Collector {
// Archive creates ZIP archive containing all node log files.
func (c *Collector) Archive() (outputFilepath string, err error) {
+ if c.options.Filepath == "" {
+ return "", errors.New("file logging is disabled, can't retrieve logs")
+ }
filepaths, err := c.logFilepaths()
if err != nil {
return "", err | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logconfig
import (
"io/ioutil"
"path"
"strings"
"github.com/mholt/archiver"
"github.com/pkg/errors"
)
// Collector collects node logs.
type Collector struct {
options *LogOptions
}
// NewCollector creates a Collector instance.
func NewCollector(options *LogOptions) *Collector {
return &Collector{options: options}
}
// Archive creates ZIP archive containing all node log files.
func (c *Collector) Archive() (outputFilepath string, err error) {
filepaths, err := c.logFilepaths()
if err != nil {
return "", err
}
zip := archiver.NewZip()
zip.OverwriteExisting = true
zipFilepath := c.options.Filepath + ".zip"
err = zip.Archive(filepaths, zipFilepath)
if err != nil {
return "", errors.Wrap(err, "could not create log archive")
}
return zipFilepath, nil
}
func (c *Collector) logFilepaths() (result []string, err error) {
filename := path.Base(c.options.Filepath)
dir := path.Dir(c.options.Filepath)
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, errors.Wrap(err, "failed to read directory: "+dir)
}
for _, f := range files {
if strings.Contains(f.Name(), filename) {
result = append(result, path.Join(dir, f.Name()))
}
}
return result, nil
}
| 1 | 15,293 | This method is still possible insecure as it may archive not only logs files. I suggest to filter file names to ensure that they start with `.mysterium-log*`. | mysteriumnetwork-node | go |
@@ -14,6 +14,15 @@ class BaseDenseHead(BaseModule, metaclass=ABCMeta):
def __init__(self, init_cfg=None):
super(BaseDenseHead, self).__init__(init_cfg)
+ def init_weights(self):
+ super(BaseDenseHead, self).init_weights()
+ # avoid init_cfg overwrite the initialization of `conv_offset`
+ for m in self.modules():
+ # DeformConv2dPack, ModulatedDeformConv2dPack
+ if hasattr(m, 'conv_offset'):
+ m.conv_offset.weight.data.zero_()
+ m.conv_offset.bias.data.zero_()
+
@abstractmethod
def loss(self, **kwargs):
"""Compute losses of the head.""" | 1 | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
from mmcv.ops import batched_nms
from mmcv.runner import BaseModule, force_fp32
from mmdet.core.utils import filter_scores_and_topk, select_single_mlvl
class BaseDenseHead(BaseModule, metaclass=ABCMeta):
"""Base class for DenseHeads."""
def __init__(self, init_cfg=None):
super(BaseDenseHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
"""Compute losses of the head."""
pass
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
score_factors=None,
img_metas=None,
cfg=None,
rescale=False,
with_nms=True,
**kwargs):
"""Transform network outputs of a batch into bbox results.
Note: When score_factors is not None, the cls_scores are
usually multiplied by it then obtain the real score used in NMS,
such as CenterNess in FCOS, IoU branch in ATSS.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
score_factors (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Default None.
img_metas (list[dict], Optional): Image meta info. Default None.
cfg (mmcv.Config, Optional): Test / postprocessing configuration,
if None, test_cfg would be used. Default None.
rescale (bool): If True, return boxes in original image space.
Default False.
with_nms (bool): If True, do nms before return boxes.
Default True.
Returns:
list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the predicted class label of
the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds)
if score_factors is None:
# e.g. Retina, FreeAnchor, Foveabox, etc.
with_score_factors = False
else:
# e.g. FCOS, PAA, ATSS, AutoAssign, etc.
with_score_factors = True
assert len(cls_scores) == len(score_factors)
num_levels = len(cls_scores)
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].device,
device=cls_scores[0].device)
result_list = []
for img_id in range(len(img_metas)):
img_meta = img_metas[img_id]
cls_score_list = select_single_mlvl(cls_scores, img_id)
bbox_pred_list = select_single_mlvl(bbox_preds, img_id)
if with_score_factors:
score_factor_list = select_single_mlvl(score_factors, img_id)
else:
score_factor_list = [None for _ in range(num_levels)]
results = self._get_bboxes_single(cls_score_list, bbox_pred_list,
score_factor_list, mlvl_priors,
img_meta, cfg, rescale, with_nms,
**kwargs)
result_list.append(results)
return result_list
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
score_factor_list,
mlvl_priors,
img_meta,
cfg,
rescale=False,
with_nms=True,
**kwargs):
"""Transform outputs of a single image into bbox predictions.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image, each item has shape
(num_priors * 1, H, W).
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid. In all
anchor-based methods, it has shape (num_priors, 4). In
all anchor-free methods, it has shape (num_priors, 2)
when `with_stride=True`, otherwise it still has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple[Tensor]: Results of detected bboxes and labels. If with_nms
is False and mlvl_score_factor is None, return mlvl_bboxes and
mlvl_scores, else return mlvl_bboxes, mlvl_scores and
mlvl_score_factor. Usually with_nms is False is used for aug
test. If with_nms is True, then return the following format
- det_bboxes (Tensor): Predicted bboxes with shape \
[num_bboxes, 5], where the first 4 columns are bounding \
box positions (tl_x, tl_y, br_x, br_y) and the 5-th \
column are scores between 0 and 1.
- det_labels (Tensor): Predicted labels of the corresponding \
box with shape [num_bboxes].
"""
if score_factor_list[0] is None:
# e.g. Retina, FreeAnchor, etc.
with_score_factors = False
else:
# e.g. FCOS, PAA, ATSS, etc.
with_score_factors = True
cfg = self.test_cfg if cfg is None else cfg
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bboxes = []
mlvl_scores = []
mlvl_labels = []
if with_score_factors:
mlvl_score_factors = []
else:
mlvl_score_factors = None
for level_idx, (cls_score, bbox_pred, score_factor, priors) in \
enumerate(zip(cls_score_list, bbox_pred_list,
score_factor_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
if with_score_factors:
score_factor = score_factor.permute(1, 2,
0).reshape(-1).sigmoid()
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
scores = cls_score.softmax(-1)[:, :-1]
# After https://github.com/open-mmlab/mmdetection/pull/6268/,
# this operation keeps fewer bboxes under the same `nms_pre`.
# There is no difference in performance for most models. If you
# find a slight drop in performance, you can set a larger
# `nms_pre` than before.
results = filter_scores_and_topk(
scores, cfg.score_thr, nms_pre,
dict(bbox_pred=bbox_pred, priors=priors))
scores, labels, keep_idxs, filtered_results = results
bbox_pred = filtered_results['bbox_pred']
priors = filtered_results['priors']
if with_score_factors:
score_factor = score_factor[keep_idxs]
bboxes = self.bbox_coder.decode(
priors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
if with_score_factors:
mlvl_score_factors.append(score_factor)
return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes,
img_meta['scale_factor'], cfg, rescale,
with_nms, mlvl_score_factors, **kwargs)
def _bbox_post_process(self,
mlvl_scores,
mlvl_labels,
mlvl_bboxes,
scale_factor,
cfg,
rescale=False,
with_nms=True,
mlvl_score_factors=None,
**kwargs):
"""bbox post-processing method.
The boxes would be rescaled to the original image scale and do
the nms operation. Usually with_nms is False is used for aug test.
Args:
mlvl_scores (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_bboxes, ).
mlvl_labels (list[Tensor]): Box class labels from all scale
levels of a single image, each item has shape
(num_bboxes, ).
mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale
levels of a single image, each item has shape (num_bboxes, 4).
scale_factor (ndarray, optional): Scale factor of the image arange
as (w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
mlvl_score_factors (list[Tensor], optional): Score factor from
all scale levels of a single image, each item has shape
(num_bboxes, ). Default: None.
Returns:
tuple[Tensor]: Results of detected bboxes and labels. If with_nms
is False and mlvl_score_factor is None, return mlvl_bboxes and
mlvl_scores, else return mlvl_bboxes, mlvl_scores and
mlvl_score_factor. Usually with_nms is False is used for aug
test. If with_nms is True, then return the following format
- det_bboxes (Tensor): Predicted bboxes with shape \
[num_bboxes, 5], where the first 4 columns are bounding \
box positions (tl_x, tl_y, br_x, br_y) and the 5-th \
column are scores between 0 and 1.
- det_labels (Tensor): Predicted labels of the corresponding \
box with shape [num_bboxes].
"""
assert len(mlvl_scores) == len(mlvl_bboxes) == len(mlvl_labels)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
mlvl_labels = torch.cat(mlvl_labels)
if mlvl_score_factors is not None:
# TODO: Add sqrt operation in order to be consistent with
# the paper.
mlvl_score_factors = torch.cat(mlvl_score_factors)
mlvl_scores = mlvl_scores * mlvl_score_factors
if with_nms:
if mlvl_bboxes.numel() == 0:
det_bboxes = torch.cat([mlvl_bboxes, mlvl_scores[:, None]], -1)
return det_bboxes, mlvl_labels
det_bboxes, keep_idxs = batched_nms(mlvl_bboxes, mlvl_scores,
mlvl_labels, cfg.nms)
det_bboxes = det_bboxes[:cfg.max_per_img]
det_labels = mlvl_labels[keep_idxs][:cfg.max_per_img]
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores, mlvl_labels
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""
Args:
x (list[Tensor]): Features from FPN.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
Returns:
tuple:
losses: (dict[str, Tensor]): A dictionary of loss components.
proposal_list (list[Tensor]): Proposals of each image.
"""
outs = self(x)
if gt_labels is None:
loss_inputs = outs + (gt_bboxes, img_metas)
else:
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
if proposal_cfg is None:
return losses
else:
proposal_list = self.get_bboxes(
*outs, img_metas=img_metas, cfg=proposal_cfg)
return losses, proposal_list
def simple_test(self, feats, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is ``bboxes`` with shape (n, 5),
where 5 represent (tl_x, tl_y, br_x, br_y, score).
The shape of the second tensor in the tuple is ``labels``
with shape (n, ).
"""
return self.simple_test_bboxes(feats, img_metas, rescale=rescale)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def onnx_export(self,
cls_scores,
bbox_preds,
score_factors=None,
img_metas=None,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W).
score_factors (list[Tensor]): score_factors for each s
cale level with shape (N, num_points * 1, H, W).
Default: None.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc. Default: None.
with_nms (bool): Whether apply nms to the bboxes. Default: True.
Returns:
tuple[Tensor, Tensor] | list[tuple]: When `with_nms` is True,
it is tuple[Tensor, Tensor], first tensor bboxes with shape
[N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score)
and second element is class labels of shape [N, num_det].
When `with_nms` is False, first tensor is bboxes with
shape [N, num_det, 4], second tensor is raw score has
shape [N, num_det, num_classes].
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=bbox_preds[0].dtype,
device=bbox_preds[0].device)
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
assert len(
img_metas
) == 1, 'Only support one input image while in exporting to ONNX'
img_shape = img_metas[0]['img_shape_for_onnx']
cfg = self.test_cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)
device = cls_scores[0].device
batch_size = cls_scores[0].shape[0]
# convert to tensor to keep tracing
nms_pre_tensor = torch.tensor(
cfg.get('nms_pre', -1), device=device, dtype=torch.long)
# e.g. Retina, FreeAnchor, etc.
if score_factors is None:
with_score_factors = False
mlvl_score_factor = [None for _ in range(num_levels)]
else:
# e.g. FCOS, PAA, ATSS, etc.
with_score_factors = True
mlvl_score_factor = [
score_factors[i].detach() for i in range(num_levels)
]
mlvl_score_factors = []
mlvl_batch_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, score_factors, priors in zip(
mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor,
mlvl_priors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3,
1).reshape(batch_size, -1,
self.cls_out_channels)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
nms_pre_score = scores
else:
scores = scores.softmax(-1)
nms_pre_score = scores
if with_score_factors:
score_factors = score_factors.permute(0, 2, 3, 1).reshape(
batch_size, -1).sigmoid()
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(batch_size, -1, 4)
priors = priors.expand(batch_size, -1, priors.size(-1))
# Get top-k predictions
from mmdet.core.export import get_k_for_topk
nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1])
if nms_pre > 0:
if with_score_factors:
nms_pre_score = (nms_pre_score * score_factors[..., None])
else:
nms_pre_score = nms_pre_score
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = nms_pre_score.max(-1)
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
max_scores, _ = nms_pre_score[..., :-1].max(-1)
_, topk_inds = max_scores.topk(nms_pre)
batch_inds = torch.arange(
batch_size, device=bbox_pred.device).view(
-1, 1).expand_as(topk_inds).long()
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
transformed_inds = bbox_pred.shape[1] * batch_inds + topk_inds
priors = priors.reshape(
-1, priors.size(-1))[transformed_inds, :].reshape(
batch_size, -1, priors.size(-1))
bbox_pred = bbox_pred.reshape(-1,
4)[transformed_inds, :].reshape(
batch_size, -1, 4)
scores = scores.reshape(
-1, self.cls_out_channels)[transformed_inds, :].reshape(
batch_size, -1, self.cls_out_channels)
if with_score_factors:
score_factors = score_factors.reshape(
-1, 1)[transformed_inds].reshape(batch_size, -1)
bboxes = self.bbox_coder.decode(
priors, bbox_pred, max_shape=img_shape)
mlvl_batch_bboxes.append(bboxes)
mlvl_scores.append(scores)
if with_score_factors:
mlvl_score_factors.append(score_factors)
batch_bboxes = torch.cat(mlvl_batch_bboxes, dim=1)
batch_scores = torch.cat(mlvl_scores, dim=1)
if with_score_factors:
batch_score_factors = torch.cat(mlvl_score_factors, dim=1)
# Replace multiclass_nms with ONNX::NonMaxSuppression in deployment
from mmdet.core.export import add_dummy_nms_for_onnx
if not self.use_sigmoid_cls:
batch_scores = batch_scores[..., :self.num_classes]
if with_score_factors:
batch_scores = batch_scores * (batch_score_factors.unsqueeze(2))
if with_nms:
max_output_boxes_per_class = cfg.nms.get(
'max_output_boxes_per_class', 200)
iou_threshold = cfg.nms.get('iou_threshold', 0.5)
score_threshold = cfg.score_thr
nms_pre = cfg.get('deploy_nms_pre', -1)
return add_dummy_nms_for_onnx(batch_bboxes, batch_scores,
max_output_boxes_per_class,
iou_threshold, score_threshold,
nms_pre, cfg.max_per_img)
else:
return batch_bboxes, batch_scores
| 1 | 26,694 | DCN should not be able to use `init_cfg`. | open-mmlab-mmdetection | py |
@@ -28,7 +28,7 @@ namespace OpenTelemetry.Shims.OpenTracing
{
if (!spanContext.IsValid)
{
- throw new ArgumentException(nameof(spanContext));
+ throw new ArgumentException($"{nameof(spanContext)} must be valid.");
}
this.SpanContext = spanContext; | 1 | // <copyright file="SpanContextShim.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using global::OpenTracing;
namespace OpenTelemetry.Shims.OpenTracing
{
public sealed class SpanContextShim : ISpanContext
{
private readonly IEnumerable<KeyValuePair<string, string>> baggage;
public SpanContextShim(in Trace.SpanContext spanContext, IEnumerable<KeyValuePair<string, string>> baggage = null)
{
if (!spanContext.IsValid)
{
throw new ArgumentException(nameof(spanContext));
}
this.SpanContext = spanContext;
this.baggage = baggage;
}
public Trace.SpanContext SpanContext { get; private set; }
/// <inheritdoc/>
public string TraceId => this.SpanContext.TraceId.ToString();
/// <inheritdoc/>
public string SpanId => this.SpanContext.SpanId.ToString();
/// <inheritdoc/>
public IEnumerable<KeyValuePair<string, string>> GetBaggageItems()
=> this.baggage;
}
}
| 1 | 16,575 | It is a bit weird to see a message like "something must be valid". | open-telemetry-opentelemetry-dotnet | .cs |
@@ -119,10 +119,10 @@ public class Standalone implements CliCommand {
LoggingOptions loggingOptions = new LoggingOptions(config);
loggingOptions.configureLogging();
- LOG.info("Logging configured.");
+ LOG.finest("Logging configured.");
DistributedTracer tracer = loggingOptions.getTracer();
- LOG.info("Using tracer: " + tracer);
+ LOG.finest("Using tracer: " + tracer);
GlobalDistributedTracer.setInstance(tracer);
EventBusConfig events = new EventBusConfig(config); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.commands;
import com.google.auto.service.AutoService;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.ParameterException;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.cli.CliCommand;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.grid.config.AnnotatedConfig;
import org.openqa.selenium.grid.config.CompoundConfig;
import org.openqa.selenium.grid.config.ConcatenatingConfig;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.config.EnvConfig;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.grid.distributor.local.LocalDistributor;
import org.openqa.selenium.grid.docker.DockerFlags;
import org.openqa.selenium.grid.docker.DockerOptions;
import org.openqa.selenium.grid.log.LoggingOptions;
import org.openqa.selenium.grid.node.Node;
import org.openqa.selenium.grid.node.config.NodeOptions;
import org.openqa.selenium.grid.node.local.LocalNode;
import org.openqa.selenium.grid.router.Router;
import org.openqa.selenium.grid.server.BaseServer;
import org.openqa.selenium.grid.server.BaseServerFlags;
import org.openqa.selenium.grid.server.BaseServerOptions;
import org.openqa.selenium.grid.server.EventBusConfig;
import org.openqa.selenium.grid.server.EventBusFlags;
import org.openqa.selenium.grid.server.HelpFlags;
import org.openqa.selenium.grid.server.Server;
import org.openqa.selenium.grid.server.W3CCommandHandler;
import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap;
import org.openqa.selenium.grid.web.CombinedHandler;
import org.openqa.selenium.grid.web.RoutableHttpClientFactory;
import org.openqa.selenium.grid.web.Routes;
import org.openqa.selenium.net.NetworkUtils;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.tracing.DistributedTracer;
import org.openqa.selenium.remote.tracing.GlobalDistributedTracer;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.logging.Logger;
@AutoService(CliCommand.class)
public class Standalone implements CliCommand {
public static final Logger LOG = Logger.getLogger("selenium");
@Override
public String getName() {
return "standalone";
}
@Override
public String getDescription() {
return "The selenium server, running everything in-process.";
}
@Override
public Executable configure(String... args) {
HelpFlags help = new HelpFlags();
BaseServerFlags baseFlags = new BaseServerFlags(4444);
EventBusFlags eventFlags = new EventBusFlags();
DockerFlags dockerFlags = new DockerFlags();
StandaloneFlags standaloneFlags = new StandaloneFlags();
JCommander commander = JCommander.newBuilder()
.programName("standalone")
.addObject(baseFlags)
.addObject(help)
.addObject(eventFlags)
.addObject(dockerFlags)
.addObject(standaloneFlags)
.build();
return () -> {
try {
commander.parse(args);
} catch (ParameterException e) {
System.err.println(e.getMessage());
commander.usage();
return;
}
if (help.displayHelp(commander, System.out)) {
return;
}
Config config = new CompoundConfig(
new EnvConfig(),
new ConcatenatingConfig("selenium", '.', System.getProperties()),
new AnnotatedConfig(help),
new AnnotatedConfig(baseFlags),
new AnnotatedConfig(dockerFlags),
new AnnotatedConfig(standaloneFlags),
new AnnotatedConfig(eventFlags),
new DefaultStandaloneConfig());
LoggingOptions loggingOptions = new LoggingOptions(config);
loggingOptions.configureLogging();
LOG.info("Logging configured.");
DistributedTracer tracer = loggingOptions.getTracer();
LOG.info("Using tracer: " + tracer);
GlobalDistributedTracer.setInstance(tracer);
EventBusConfig events = new EventBusConfig(config);
EventBus bus = events.getEventBus();
String hostName;
try {
hostName = new NetworkUtils().getNonLoopbackAddressOfThisMachine();
} catch (WebDriverException e) {
hostName = "localhost";
}
int port = config.getInt("server", "port")
.orElseThrow(() -> new IllegalArgumentException("No port to use configured"));
URI localhost = null;
try {
localhost = new URI("http", null, hostName, port, null, null, null);
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
CombinedHandler combinedHandler = new CombinedHandler();
HttpClient.Factory clientFactory = new RoutableHttpClientFactory(
localhost.toURL(),
combinedHandler,
HttpClient.Factory.createDefault());
SessionMap sessions = new LocalSessionMap(tracer, bus);
combinedHandler.addHandler(sessions);
Distributor distributor = new LocalDistributor(tracer, bus, clientFactory, sessions);
combinedHandler.addHandler(distributor);
Router router = new Router(tracer, clientFactory, sessions, distributor);
LocalNode.Builder nodeBuilder = LocalNode.builder(
tracer,
bus,
clientFactory,
localhost)
.maximumConcurrentSessions(Runtime.getRuntime().availableProcessors() * 3);
new NodeOptions(config).configure(clientFactory, nodeBuilder);
new DockerOptions(config).configure(clientFactory, nodeBuilder);
Node node = nodeBuilder.build();
combinedHandler.addHandler(node);
distributor.add(node);
Server<?> server = new BaseServer<>(new BaseServerOptions(config));
server.addRoute(Routes.matching(router).using(router).decorateWith(W3CCommandHandler::new));
server.start();
};
}
}
| 1 | 16,466 | Again. This is part of the start up of the server, and the diagnostics are helpful. | SeleniumHQ-selenium | py |
@@ -139,6 +139,12 @@ describe Travis::Build::Script::R, :sexp do
echo: true, timing: true]
end
+ it 'skips PDF manual when LaTeX is disabled' do
+ data[:config][:latex] = false
+ should include_sexp [:cmd, /.*R CMD check.* --no-manual.*/,
+ echo: true, timing: true]
+ end
+
describe 'bioc configuration is optional' do
it 'does not install bioc if not required' do
should_not include_sexp [:cmd, /.*biocLite.*/, | 1 | require 'spec_helper'
describe Travis::Build::Script::R, :sexp do
let (:data) { payload_for(:push, :r) }
let (:script) { described_class.new(data) }
subject { script.sexp }
it_behaves_like 'a build script sexp'
it 'normalizes bioc-devel correctly' do
data[:config][:r] = 'bioc-devel'
should include_sexp [:export, ['TRAVIS_R_VERSION', '3.3.1']]
should include_sexp [:cmd, %r{source\(\"https://bioconductor.org/biocLite.R\"\)},
assert: true, echo: true, timing: true, retry: true]
should include_sexp [:cmd, %r{useDevel\(TRUE\)},
assert: true, echo: true, timing: true, retry: true]
end
it 'normalizes bioc-release correctly' do
data[:config][:r] = 'bioc-release'
should include_sexp [:cmd, %r{source\(\"https://bioconductor.org/biocLite.R\"\)},
assert: true, echo: true, timing: true, retry: true]
should include_sexp [:export, ['TRAVIS_R_VERSION', '3.3.1']]
end
it 'r_packages works with a single package set' do
data[:config][:r_packages] = 'test'
should include_sexp [:cmd, %r{install\.packages\(c\(\"test\"\)\)},
assert: true, echo: true, timing: true]
end
it 'r_packages works with multiple packages set' do
data[:config][:r_packages] = ['test', 'test2']
should include_sexp [:cmd, %r{install\.packages\(c\(\"test\", \"test2\"\)\)},
assert: true, echo: true, timing: true]
end
it 'exports TRAVIS_R_VERSION' do
data[:config][:r] = '3.3.0'
should include_sexp [:export, ['TRAVIS_R_VERSION', '3.3.0']]
end
it 'downloads and installs latest R' do
should include_sexp [:cmd, %r{^curl.*https://s3.amazonaws.com/rstudio-travis/R-3.3.1.xz},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads and installs latest R on OS X' do
data[:config][:os] = 'osx'
should include_sexp [:cmd, %r{^curl.*bin/macosx/R-latest.pkg},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads and installs aliased R 3.2.5 on OS X' do
data[:config][:os] = 'osx'
data[:config][:r] = '3.2.5'
should include_sexp [:cmd, %r{^curl.*bin/macosx/old/R-3.2.4-revised.pkg},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads and installs other R versions on OS X' do
data[:config][:os] = 'osx'
data[:config][:r] = '3.1.3'
should include_sexp [:cmd, %r{^curl.*bin/macosx/old/R-3.1.3.pkg},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads and installs R 3.1' do
data[:config][:r] = '3.1'
should include_sexp [:cmd, %r{^curl.*https://s3.amazonaws.com/rstudio-travis/R-3.1.3.xz},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads and installs R 3.2' do
data[:config][:r] = '3.2'
should include_sexp [:cmd, %r{^curl.*https://s3.amazonaws.com/rstudio-travis/R-3.2.5.xz},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads and installs R devel' do
data[:config][:r] = 'devel'
should include_sexp [:cmd, %r{^curl.*https://s3.amazonaws.com/rstudio-travis/R-devel.xz},
assert: true, echo: true, retry: true, timing: true]
end
it 'downloads pandoc and installs into /usr/bin/pandoc' do
data[:config][:pandoc_version] = '1.15.2'
should include_sexp [:cmd, %r{curl -Lo /tmp/pandoc-1\.15\.2-1-amd64.deb https://github\.com/jgm/pandoc/releases/download/1.15.2/pandoc-1\.15\.2-1-amd64.deb},
assert: true, echo: true, timing: true]
should include_sexp [:cmd, %r{sudo dpkg -i /tmp/pandoc-},
assert: true, echo: true, timing: true]
end
it 'sets repos in ~/.Rprofile.site with defaults' do
data[:config][:cran] = 'https://cloud.r-project.org'
should include_sexp [:cmd, "echo 'options(repos = c(CRAN = \"https://cloud.r-project.org\"))' > ~/.Rprofile.site",
assert: true, echo: true, timing: true]
end
it 'sets repos in ~/.Rprofile.site with user specified repos' do
data[:config][:cran] = 'https://cran.rstudio.org'
should include_sexp [:cmd, "echo 'options(repos = c(CRAN = \"https://cran.rstudio.org\"))' > ~/.Rprofile.site",
assert: true, echo: true, timing: true]
end
it 'sets repos in ~/.Rprofile.site with additional user specified repos' do
data[:config][:repos] = {CRAN: 'https://cran.rstudio.org', ropensci: 'http://packages.ropensci.org'}
should include_sexp [:cmd, "echo 'options(repos = c(CRAN = \"https://cran.rstudio.org\", ropensci = \"http://packages.ropensci.org\"))' > ~/.Rprofile.site",
assert: true, echo: true, timing: true]
end
it 'installs binary devtools if sudo: required' do
data[:config][:sudo] = 'required'
should include_sexp [:cmd, /sudo apt-get install.*r-cran-devtools/,
assert: true, echo: true, timing: true, retry: true]
end
it 'installs source devtools if sudo: is missing' do
should include_sexp [:cmd, /Rscript -e 'install\.packages\(c\(\"devtools\"\)/,
assert: true, echo: true, timing: true]
should_not include_sexp [:cmd, /sudo apt-get install.*r-cran-devtools/,
assert: true, echo: true, timing: true, retry: true]
end
it 'installs source devtools if sudo: false' do
data[:config][:sudo] = false
should include_sexp [:cmd, /Rscript -e 'install\.packages\(c\(\"devtools\"\)/,
assert: true, echo: true, timing: true]
should_not include_sexp [:cmd, /sudo apt-get install.*r-cran-devtools/,
assert: true, echo: true, timing: true, retry: true]
end
it 'fails on package build and test failures' do
should include_sexp [:cmd, /.*R CMD build.*/,
assert: true, echo: true, timing: true]
should include_sexp [:cmd, /.*R CMD check.*/,
echo: true, timing: true]
end
describe 'bioc configuration is optional' do
it 'does not install bioc if not required' do
should_not include_sexp [:cmd, /.*biocLite.*/,
assert: true, echo: true, retry: true, timing: true]
end
it 'does install bioc if requested' do
data[:config][:bioc_required] = true
should include_sexp [:cmd, /.*biocLite.*/,
assert: true, echo: true, retry: true, timing: true]
end
it 'does install bioc with bioc_packages' do
data[:config][:bioc_packages] = ['GenomicFeatures']
should include_sexp [:cmd, /.*biocLite.*/,
assert: true, echo: true, retry: true, timing: true]
end
end
describe '#cache_slug' do
subject { described_class.new(data).cache_slug }
it {
data[:config][:r] = '3.3.0'
should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.3.0")
}
it {
data[:config][:r] = '3.2'
should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.2.5")
}
it {
data[:config][:r] = 'release'
should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.3.1")
}
it {
data[:config][:r] = 'oldrel'
should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.2.5")
}
it {
data[:config][:r] = '3.1'
should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.1.3")
}
it {
data[:config][:r] = 'devel'
should eq("cache-#{CACHE_SLUG_EXTRAS}--R-devel")
}
end
end
| 1 | 14,201 | I think you need a `,` at the end of the line? | travis-ci-travis-build | rb |
@@ -54,10 +54,7 @@ import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbEntityOperation;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbOperation;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbOperation.State;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbOperationType;
-import org.camunda.bpm.engine.impl.util.DatabaseUtil;
-import org.camunda.bpm.engine.impl.util.ExceptionUtil;
-import org.camunda.bpm.engine.impl.util.IoUtil;
-import org.camunda.bpm.engine.impl.util.ReflectUtil;
+import org.camunda.bpm.engine.impl.util.*;
/**
* | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.db.sql;
import static org.camunda.bpm.engine.impl.util.EnsureUtil.ensureNotNull;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.ibatis.executor.BatchResult;
import org.apache.ibatis.mapping.BoundSql;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.session.Configuration;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.camunda.bpm.engine.ProcessEngine;
import org.camunda.bpm.engine.impl.ProcessEngineLogger;
import org.camunda.bpm.engine.impl.context.Context;
import org.camunda.bpm.engine.impl.db.AbstractPersistenceSession;
import org.camunda.bpm.engine.impl.db.DbEntity;
import org.camunda.bpm.engine.impl.db.EnginePersistenceLogger;
import org.camunda.bpm.engine.impl.db.HasDbReferences;
import org.camunda.bpm.engine.impl.db.HasDbRevision;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbBulkOperation;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbEntityOperation;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbOperation;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbOperation.State;
import org.camunda.bpm.engine.impl.db.entitymanager.operation.DbOperationType;
import org.camunda.bpm.engine.impl.util.DatabaseUtil;
import org.camunda.bpm.engine.impl.util.ExceptionUtil;
import org.camunda.bpm.engine.impl.util.IoUtil;
import org.camunda.bpm.engine.impl.util.ReflectUtil;
/**
*
* @author Tom Baeyens
* @author Joram Barrez
* @author Daniel Meyer
* @author Sebastian Menski
* @author Roman Smirnov
*
*/
public abstract class DbSqlSession extends AbstractPersistenceSession {
protected static final EnginePersistenceLogger LOG = ProcessEngineLogger.PERSISTENCE_LOGGER;
protected SqlSession sqlSession;
protected DbSqlSessionFactory dbSqlSessionFactory;
protected String connectionMetadataDefaultCatalog = null;
protected String connectionMetadataDefaultSchema = null;
public DbSqlSession(DbSqlSessionFactory dbSqlSessionFactory) {
this.dbSqlSessionFactory = dbSqlSessionFactory;
SqlSessionFactory sqlSessionFactory = dbSqlSessionFactory.getSqlSessionFactory();
this.sqlSession = ExceptionUtil.doWithExceptionWrapper(sqlSessionFactory::openSession);
}
public DbSqlSession(DbSqlSessionFactory dbSqlSessionFactory, Connection connection, String catalog, String schema) {
this.dbSqlSessionFactory = dbSqlSessionFactory;
SqlSessionFactory sqlSessionFactory = dbSqlSessionFactory.getSqlSessionFactory();
this.sqlSession = ExceptionUtil.doWithExceptionWrapper(() -> sqlSessionFactory.openSession(connection));
this.connectionMetadataDefaultCatalog = catalog;
this.connectionMetadataDefaultSchema = schema;
}
// select ////////////////////////////////////////////
public List<?> selectList(String statement, Object parameter) {
statement = dbSqlSessionFactory.mapStatement(statement);
List<Object> resultList = executeSelectList(statement, parameter);
for (Object object : resultList) {
fireEntityLoaded(object);
}
return resultList;
}
public List<Object> executeSelectList(String statement, Object parameter) {
return ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.selectList(statement, parameter));
}
@SuppressWarnings("unchecked")
public <T extends DbEntity> T selectById(Class<T> type, String id) {
String selectStatement = dbSqlSessionFactory.getSelectStatement(type);
String mappedSelectStatement = dbSqlSessionFactory.mapStatement(selectStatement);
ensureNotNull("no select statement for " + type + " in the ibatis mapping files", "selectStatement", selectStatement);
Object result = ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.selectOne(mappedSelectStatement, id));
fireEntityLoaded(result);
return (T) result;
}
public Object selectOne(String statement, Object parameter) {
String mappedStatement = dbSqlSessionFactory.mapStatement(statement);
Object result = ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.selectOne(mappedStatement, parameter));
fireEntityLoaded(result);
return result;
}
// lock ////////////////////////////////////////////
public void lock(String statement, Object parameter) {
// do not perform locking if H2 database is used. H2 uses table level locks
// by default which may cause deadlocks if the deploy command needs to get a new
// Id using the DbIdGenerator while performing a deployment.
//
// On CockroachDB, pessimistic locks are disabled since this database uses
// a stricter, SERIALIZABLE transaction isolation which ensures a serialized
// manner of transaction execution, making our use-case of pessimistic locks
// redundant.
if (!DatabaseUtil.checkDatabaseType(DbSqlSessionFactory.CRDB, DbSqlSessionFactory.H2)) {
String mappedStatement = dbSqlSessionFactory.mapStatement(statement);
executeSelectForUpdate(mappedStatement, parameter);
} else {
LOG.debugDisabledPessimisticLocks();
}
}
protected abstract void executeSelectForUpdate(String statement, Object parameter);
protected void entityUpdatePerformed(DbEntityOperation operation,
int rowsAffected,
Exception failure) {
if (failure != null) {
configureFailedDbEntityOperation(operation, failure);
} else {
DbEntity dbEntity = operation.getEntity();
if (dbEntity instanceof HasDbRevision) {
if (rowsAffected != 1) {
// failed with optimistic locking
operation.setState(State.FAILED_CONCURRENT_MODIFICATION);
} else {
// increment revision of our copy
HasDbRevision versionedObject = (HasDbRevision) dbEntity;
versionedObject.setRevision(versionedObject.getRevisionNext());
operation.setState(State.APPLIED);
}
} else {
operation.setState(State.APPLIED);
}
}
}
protected void bulkUpdatePerformed(DbBulkOperation operation,
int rowsAffected,
Exception failure) {
bulkOperationPerformed(operation, rowsAffected, failure);
}
protected void bulkDeletePerformed(DbBulkOperation operation,
int rowsAffected,
Exception failure) {
bulkOperationPerformed(operation, rowsAffected, failure);
}
protected void bulkOperationPerformed(DbBulkOperation operation,
int rowsAffected,
Exception failure) {
if (failure != null) {
operation.setFailure(failure);
State failedState = State.FAILED_ERROR;
if (isCrdbConcurrencyConflict(failure)) {
failedState = State.FAILED_CONCURRENT_MODIFICATION_CRDB;
}
operation.setState(failedState);
} else {
operation.setRowsAffected(rowsAffected);
operation.setState(State.APPLIED);
}
}
protected void entityDeletePerformed(DbEntityOperation operation,
int rowsAffected,
Exception failure) {
if (failure != null) {
configureFailedDbEntityOperation(operation, failure);
} else {
operation.setRowsAffected(rowsAffected);
DbEntity dbEntity = operation.getEntity();
// It only makes sense to check for optimistic locking exceptions for objects that actually have a revision
if (dbEntity instanceof HasDbRevision && rowsAffected == 0) {
operation.setState(State.FAILED_CONCURRENT_MODIFICATION);
} else {
operation.setState(State.APPLIED);
}
}
}
protected void configureFailedDbEntityOperation(DbEntityOperation operation, Exception failure) {
operation.setRowsAffected(0);
operation.setFailure(failure);
DbOperationType operationType = operation.getOperationType();
DbOperation dependencyOperation = operation.getDependentOperation();
State failedState;
if (isCrdbConcurrencyConflict(failure)) {
failedState = State.FAILED_CONCURRENT_MODIFICATION_CRDB;
} else if (isConcurrentModificationException(operation, failure)) {
failedState = State.FAILED_CONCURRENT_MODIFICATION;
} else if (DbOperationType.DELETE.equals(operationType)
&& dependencyOperation != null
&& dependencyOperation.getState() != null
&& dependencyOperation.getState() != State.APPLIED) {
// the owning operation was not successful, so the prerequisite for this operation was not given
LOG.ignoreFailureDuePreconditionNotMet(operation, "Parent database operation failed", dependencyOperation);
failedState = State.NOT_APPLIED;
} else {
failedState = State.FAILED_ERROR;
}
operation.setState(failedState);
}
protected boolean isConcurrentModificationException(DbOperation failedOperation,
Exception cause) {
boolean isConstraintViolation = ExceptionUtil.checkForeignKeyConstraintViolation(cause);
boolean isVariableIntegrityViolation = ExceptionUtil.checkVariableIntegrityViolation(cause);
if (isVariableIntegrityViolation) {
return true;
} else if (
isConstraintViolation
&& failedOperation instanceof DbEntityOperation
&& ((DbEntityOperation) failedOperation).getEntity() instanceof HasDbReferences
&& (failedOperation.getOperationType().equals(DbOperationType.INSERT)
|| failedOperation.getOperationType().equals(DbOperationType.UPDATE))
) {
DbEntity entity = ((DbEntityOperation) failedOperation).getEntity();
for (Map.Entry<String, Class> reference : ((HasDbReferences)entity).getReferencedEntitiesIdAndClass().entrySet()) {
DbEntity referencedEntity = selectById(reference.getValue(), reference.getKey());
if (referencedEntity == null) {
return true;
}
}
}
return false;
}
/**
* In cases where CockroachDB is used, and a failed operation is detected,
* the method checks if the exception was caused by a CockroachDB
* <code>TransactionRetryException</code>.
*
* @param cause for which an operation failed
* @return true if the failure was due to a CRDB <code>TransactionRetryException</code>.
* Otherwise, it's false.
*/
public static boolean isCrdbConcurrencyConflict(Throwable cause) {
// only check when CRDB is used
if (DatabaseUtil.checkDatabaseType(DbSqlSessionFactory.CRDB)) {
boolean isCrdbTxRetryException = ExceptionUtil.checkCrdbTransactionRetryException(cause);
if (isCrdbTxRetryException) {
return true;
}
}
return false;
}
// insert //////////////////////////////////////////
@Override
protected void insertEntity(DbEntityOperation operation) {
final DbEntity dbEntity = operation.getEntity();
// get statement
String insertStatement = dbSqlSessionFactory.getInsertStatement(dbEntity);
insertStatement = dbSqlSessionFactory.mapStatement(insertStatement);
ensureNotNull("no insert statement for " + dbEntity.getClass() + " in the ibatis mapping files", "insertStatement", insertStatement);
// execute the insert
executeInsertEntity(insertStatement, dbEntity);
}
protected void executeInsertEntity(String insertStatement, Object parameter) {
LOG.executeDatabaseOperation("INSERT", parameter);
try {
sqlSession.insert(insertStatement, parameter);
} catch (Exception e) {
// exception is wrapped later
throw e;
}
}
protected void entityInsertPerformed(DbEntityOperation operation,
int rowsAffected,
Exception failure) {
DbEntity entity = operation.getEntity();
if (failure != null) {
configureFailedDbEntityOperation(operation, failure);
} else {
// set revision of our copy to 1
if (entity instanceof HasDbRevision) {
HasDbRevision versionedObject = (HasDbRevision) entity;
versionedObject.setRevision(1);
}
operation.setState(State.APPLIED);
}
}
// delete ///////////////////////////////////////////
protected int executeDelete(String deleteStatement, Object parameter) {
// map the statement
String mappedDeleteStatement = dbSqlSessionFactory.mapStatement(deleteStatement);
try {
return sqlSession.delete(mappedDeleteStatement, parameter);
} catch (Exception e) {
// Exception is wrapped later
throw e;
}
}
// update ////////////////////////////////////////
public int executeUpdate(String updateStatement, Object parameter) {
String mappedUpdateStatement = dbSqlSessionFactory.mapStatement(updateStatement);
try {
return sqlSession.update(mappedUpdateStatement, parameter);
} catch (Exception e) {
// Exception is wrapped later
throw e;
}
}
public int update(String updateStatement, Object parameter) {
return ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.update(updateStatement, parameter));
}
@Override
public int executeNonEmptyUpdateStmt(String updateStmt, Object parameter) {
String mappedUpdateStmt = dbSqlSessionFactory.mapStatement(updateStmt);
//if mapped statement is empty, which can happens for some databases, we have no need to execute it
boolean isMappedStmtEmpty = ExceptionUtil.doWithExceptionWrapper(() -> {
Configuration configuration = sqlSession.getConfiguration();
MappedStatement mappedStatement = configuration.getMappedStatement(mappedUpdateStmt);
BoundSql boundSql = mappedStatement.getBoundSql(parameter);
String sql = boundSql.getSql();
return sql.isEmpty();
});
if (isMappedStmtEmpty) {
return 0;
}
return update(mappedUpdateStmt, parameter);
}
// flush ////////////////////////////////////////////////////////////////////
public void flush() {
}
public void flushOperations() {
ExceptionUtil.doWithExceptionWrapper(this::flushBatchOperations);
}
public List<BatchResult> flushBatchOperations() {
try {
return sqlSession.flushStatements();
} catch (RuntimeException ex) {
// exception is wrapped later
throw ex;
}
}
public void close() {
ExceptionUtil.doWithExceptionWrapper(() -> {
sqlSession.close();
return null;
});
}
public void commit() {
ExceptionUtil.doWithExceptionWrapper(() -> {
sqlSession.commit();
return null;
});
}
public void rollback() {
ExceptionUtil.doWithExceptionWrapper(() -> {
sqlSession.rollback();
return null;
});
}
// schema operations ////////////////////////////////////////////////////////
public void dbSchemaCheckVersion() {
try {
String dbVersion = getDbVersion();
if (!ProcessEngine.VERSION.equals(dbVersion)) {
throw LOG.wrongDbVersionException(ProcessEngine.VERSION, dbVersion);
}
List<String> missingComponents = new ArrayList<>();
if (!isEngineTablePresent()) {
missingComponents.add("engine");
}
if (dbSqlSessionFactory.isDbHistoryUsed() && !isHistoryTablePresent()) {
missingComponents.add("history");
}
if (dbSqlSessionFactory.isDbIdentityUsed() && !isIdentityTablePresent()) {
missingComponents.add("identity");
}
if (dbSqlSessionFactory.isCmmnEnabled() && !isCmmnTablePresent()) {
missingComponents.add("case.engine");
}
if (dbSqlSessionFactory.isDmnEnabled() && !isDmnTablePresent()) {
missingComponents.add("decision.engine");
}
if (!missingComponents.isEmpty()) {
throw LOG.missingTableException(missingComponents);
}
} catch (Exception e) {
if (isMissingTablesException(e)) {
throw LOG.missingActivitiTablesException();
} else {
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else {
throw LOG.unableToFetchDbSchemaVersion(e);
}
}
}
}
@Override
protected String getDbVersion() {
String selectSchemaVersionStatement = dbSqlSessionFactory.mapStatement("selectDbSchemaVersion");
return ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.selectOne(selectSchemaVersionStatement));
}
@Override
protected void dbSchemaCreateIdentity() {
executeMandatorySchemaResource("create", "identity");
}
@Override
protected void dbSchemaCreateHistory() {
executeMandatorySchemaResource("create", "history");
}
@Override
protected void dbSchemaCreateEngine() {
executeMandatorySchemaResource("create", "engine");
}
@Override
protected void dbSchemaCreateCmmn() {
executeMandatorySchemaResource("create", "case.engine");
}
@Override
protected void dbSchemaCreateCmmnHistory() {
executeMandatorySchemaResource("create", "case.history");
}
@Override
protected void dbSchemaCreateDmn() {
executeMandatorySchemaResource("create", "decision.engine");
}
@Override
protected void dbSchemaCreateDmnHistory() {
executeMandatorySchemaResource("create", "decision.history");
}
@Override
protected void dbSchemaDropIdentity() {
executeMandatorySchemaResource("drop", "identity");
}
@Override
protected void dbSchemaDropHistory() {
executeMandatorySchemaResource("drop", "history");
}
@Override
protected void dbSchemaDropEngine() {
executeMandatorySchemaResource("drop", "engine");
}
@Override
protected void dbSchemaDropCmmn() {
executeMandatorySchemaResource("drop", "case.engine");
}
@Override
protected void dbSchemaDropCmmnHistory() {
executeMandatorySchemaResource("drop", "case.history");
}
@Override
protected void dbSchemaDropDmn() {
executeMandatorySchemaResource("drop", "decision.engine");
}
@Override
protected void dbSchemaDropDmnHistory() {
executeMandatorySchemaResource("drop", "decision.history");
}
public void executeMandatorySchemaResource(String operation, String component) {
executeSchemaResource(operation, component, getResourceForDbOperation(operation, operation, component), false);
}
public static String[] JDBC_METADATA_TABLE_TYPES = {"TABLE"};
@Override
public boolean isEngineTablePresent(){
return isTablePresent("ACT_RU_EXECUTION");
}
@Override
public boolean isHistoryTablePresent(){
return isTablePresent("ACT_HI_PROCINST");
}
@Override
public boolean isIdentityTablePresent(){
return isTablePresent("ACT_ID_USER");
}
@Override
public boolean isCmmnTablePresent() {
return isTablePresent("ACT_RE_CASE_DEF");
}
@Override
public boolean isCmmnHistoryTablePresent() {
return isTablePresent("ACT_HI_CASEINST");
}
@Override
public boolean isDmnTablePresent() {
return isTablePresent("ACT_RE_DECISION_DEF");
}
@Override
public boolean isDmnHistoryTablePresent() {
return isTablePresent("ACT_HI_DECINST");
}
public boolean isTablePresent(String tableName) {
tableName = prependDatabaseTablePrefix(tableName);
Connection connection = null;
try {
connection = ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.getConnection());
DatabaseMetaData databaseMetaData = connection.getMetaData();
ResultSet tables = null;
String schema = this.connectionMetadataDefaultSchema;
if (dbSqlSessionFactory.getDatabaseSchema()!=null) {
schema = dbSqlSessionFactory.getDatabaseSchema();
}
if (DatabaseUtil.checkDatabaseType(DbSqlSessionFactory.POSTGRES, DbSqlSessionFactory.CRDB)) {
tableName = tableName.toLowerCase();
}
try {
tables = databaseMetaData.getTables(this.connectionMetadataDefaultCatalog, schema, tableName, JDBC_METADATA_TABLE_TYPES);
return tables.next();
} finally {
if (tables != null) {
tables.close();
}
}
} catch (Exception e) {
throw LOG.checkDatabaseTableException(e);
}
}
@Override
public List<String> getTableNamesPresent() {
List<String> tableNames = new ArrayList<>();
try {
ResultSet tablesRs = null;
try {
if (DbSqlSessionFactory.ORACLE.equals(getDbSqlSessionFactory().getDatabaseType())) {
tableNames = getTablesPresentInOracleDatabase();
} else {
Connection connection = getSqlSession().getConnection();
String databaseTablePrefix = getDbSqlSessionFactory().getDatabaseTablePrefix();
String schema = getDbSqlSessionFactory().getDatabaseSchema();
String tableNameFilter = prependDatabaseTablePrefix("ACT_%");
// for postgres or cockroachdb, we have to use lower case
if (DatabaseUtil.checkDatabaseType(DbSqlSessionFactory.POSTGRES, DbSqlSessionFactory.CRDB)) {
schema = schema == null ? schema : schema.toLowerCase();
tableNameFilter = tableNameFilter.toLowerCase();
}
DatabaseMetaData databaseMetaData = connection.getMetaData();
tablesRs = databaseMetaData.getTables(null, schema, tableNameFilter, DbSqlSession.JDBC_METADATA_TABLE_TYPES);
while (tablesRs.next()) {
String tableName = tablesRs.getString("TABLE_NAME");
if (!databaseTablePrefix.isEmpty()) {
tableName = databaseTablePrefix + tableName;
}
tableName = tableName.toUpperCase();
tableNames.add(tableName);
}
LOG.fetchDatabaseTables("jdbc metadata", tableNames);
}
} catch (SQLException se) {
throw se;
} finally {
if (tablesRs != null) {
tablesRs.close();
}
}
} catch (Exception e) {
throw LOG.getDatabaseTableNameException(e);
}
return tableNames;
}
protected List<String> getTablesPresentInOracleDatabase() throws SQLException {
List<String> tableNames = new ArrayList<>();
Connection connection = null;
PreparedStatement prepStat = null;
ResultSet tablesRs = null;
String selectTableNamesFromOracle = "SELECT table_name FROM all_tables WHERE table_name LIKE ?";
String databaseTablePrefix = getDbSqlSessionFactory().getDatabaseTablePrefix();
try {
connection = Context.getProcessEngineConfiguration().getDataSource().getConnection();
prepStat = connection.prepareStatement(selectTableNamesFromOracle);
prepStat.setString(1, databaseTablePrefix + "ACT_%");
tablesRs = prepStat.executeQuery();
while (tablesRs.next()) {
String tableName = tablesRs.getString("TABLE_NAME");
tableName = tableName.toUpperCase();
tableNames.add(tableName);
}
LOG.fetchDatabaseTables("oracle all_tables", tableNames);
} finally {
if (tablesRs != null) {
tablesRs.close();
}
if (prepStat != null) {
prepStat.close();
}
if (connection != null) {
connection.close();
}
}
return tableNames;
}
public String prependDatabaseTablePrefix(String tableName) {
String prefixWithoutSchema = dbSqlSessionFactory.getDatabaseTablePrefix();
String schema = dbSqlSessionFactory.getDatabaseSchema();
if (prefixWithoutSchema == null) {
return tableName;
}
if (schema == null) {
return prefixWithoutSchema + tableName;
}
if (prefixWithoutSchema.startsWith(schema + ".")) {
prefixWithoutSchema = prefixWithoutSchema.substring(schema.length() + 1);
}
return prefixWithoutSchema + tableName;
}
public String getResourceForDbOperation(String directory, String operation, String component) {
String databaseType = dbSqlSessionFactory.getDatabaseType();
return "org/camunda/bpm/engine/db/" + directory + "/activiti." + databaseType + "." + operation + "."+component+".sql";
}
public void executeSchemaResource(String operation, String component, String resourceName, boolean isOptional) {
InputStream inputStream = null;
try {
inputStream = ReflectUtil.getResourceAsStream(resourceName);
if (inputStream == null) {
if (isOptional) {
LOG.missingSchemaResource(resourceName, operation);
} else {
throw LOG.missingSchemaResourceException(resourceName, operation);
}
} else {
executeSchemaResource(operation, component, resourceName, inputStream);
}
} finally {
IoUtil.closeSilently(inputStream);
}
}
public void executeSchemaResource(String schemaFileResourceName) {
FileInputStream inputStream = null;
try {
inputStream = new FileInputStream(new File(schemaFileResourceName));
executeSchemaResource("schema operation", "process engine", schemaFileResourceName, inputStream);
} catch (FileNotFoundException e) {
throw LOG.missingSchemaResourceFileException(schemaFileResourceName, e);
} finally {
IoUtil.closeSilently(inputStream);
}
}
private void executeSchemaResource(String operation, String component, String resourceName, InputStream inputStream) {
String sqlStatement = null;
String exceptionSqlStatement = null;
try {
Connection connection = ExceptionUtil.doWithExceptionWrapper(() -> sqlSession.getConnection());
Exception exception = null;
byte[] bytes = IoUtil.readInputStream(inputStream, resourceName);
String ddlStatements = new String(bytes);
BufferedReader reader = new BufferedReader(new StringReader(ddlStatements));
String line = readNextTrimmedLine(reader);
List<String> logLines = new ArrayList<>();
while (line != null) {
if (line.startsWith("# ")) {
logLines.add(line.substring(2));
} else if (line.startsWith("-- ")) {
logLines.add(line.substring(3));
} else if (line.length()>0) {
if (line.endsWith(";")) {
sqlStatement = addSqlStatementPiece(sqlStatement, line.substring(0, line.length()-1));
try {
Statement jdbcStatement = connection.createStatement();
// no logging needed as the connection will log it
logLines.add(sqlStatement);
jdbcStatement.execute(sqlStatement);
jdbcStatement.close();
} catch (Exception e) {
if (exception == null) {
exception = e;
exceptionSqlStatement = sqlStatement;
}
LOG.failedDatabaseOperation(operation, sqlStatement, e);
} finally {
sqlStatement = null;
}
} else {
sqlStatement = addSqlStatementPiece(sqlStatement, line);
}
}
line = readNextTrimmedLine(reader);
}
LOG.performingDatabaseOperation(operation, component, resourceName);
LOG.executingDDL(logLines);
if (exception != null) {
throw exception;
}
LOG.successfulDatabaseOperation(operation, component);
} catch (Exception e) {
throw LOG.performDatabaseOperationException(operation, exceptionSqlStatement, e);
}
}
protected String addSqlStatementPiece(String sqlStatement, String line) {
if (sqlStatement==null) {
return line;
}
return sqlStatement + " \n" + line;
}
protected String readNextTrimmedLine(BufferedReader reader) throws IOException {
String line = reader.readLine();
if (line!=null) {
line = line.trim();
}
return line;
}
protected boolean isMissingTablesException(Exception e) {
Throwable cause = e.getCause();
if (cause != null) {
String exceptionMessage = cause.getMessage();
if(cause.getMessage() != null) {
// Matches message returned from H2
if ((exceptionMessage.contains("Table")) && (exceptionMessage.contains("not found"))) {
return true;
}
// Message returned from MySQL and Oracle
if ((exceptionMessage.contains("Table") || exceptionMessage.contains("table")) && (exceptionMessage.contains("doesn't exist"))) {
return true;
}
// Message returned from Postgres
return (exceptionMessage.contains("relation") || exceptionMessage.contains("table")) && (exceptionMessage.contains("does not exist"));
}
}
return false;
}
// getters and setters //////////////////////////////////////////////////////
public SqlSession getSqlSession() {
return sqlSession;
}
public DbSqlSessionFactory getDbSqlSessionFactory() {
return dbSqlSessionFactory;
}
}
| 1 | 11,001 | Please avoid wildcard imports. | camunda-camunda-bpm-platform | java |
@@ -84,6 +84,9 @@ namespace Microsoft.DotNet.Build.CloudTestTasks
using (HttpClient client = new HttpClient())
{
client.DefaultRequestHeaders.Clear();
+
+ // In random occassions the request fails if the network is slow and it takes more than 100 seconds to upload 4MB.
+ client.Timeout = TimeSpan.FromMinutes(5);
Func<HttpRequestMessage> createRequest = () =>
{
DateTime dt = DateTime.UtcNow; | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using Microsoft.Build.Utilities;
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading;
using Task = System.Threading.Tasks.Task;
namespace Microsoft.DotNet.Build.CloudTestTasks
{
public class UploadClient
{
private TaskLoggingHelper log;
public UploadClient(TaskLoggingHelper loggingHelper)
{
log = loggingHelper;
}
public string EncodeBlockIds(int numberOfBlocks, int lengthOfId)
{
string numberOfBlocksString = numberOfBlocks.ToString("D" + lengthOfId);
if (Encoding.UTF8.GetByteCount(numberOfBlocksString) <= 64)
{
byte[] bytes = Encoding.UTF8.GetBytes(numberOfBlocksString);
return Convert.ToBase64String(bytes);
}
else
{
throw new Exception("Task failed - Could not encode block id.");
}
}
public async Task UploadBlockBlobAsync(
CancellationToken ct,
string AccountName,
string AccountKey,
string ContainerName,
string filePath,
string destinationBlob,
string leaseId = "")
{
string resourceUrl = AzureHelper.GetContainerRestUrl(AccountName, ContainerName);
string fileName = destinationBlob;
fileName = fileName.Replace("\\", "/");
string blobUploadUrl = resourceUrl + "/" + fileName;
int size = (int)new FileInfo(filePath).Length;
int blockSize = 4 * 1024 * 1024; //4MB max size of a block blob
int bytesLeft = size;
List<string> blockIds = new List<string>();
int numberOfBlocks = (size / blockSize) + 1;
int countForId = 0;
using (FileStream fileStreamTofilePath = new FileStream(filePath, FileMode.Open))
{
int offset = 0;
while (bytesLeft > 0)
{
int nextBytesToRead = (bytesLeft < blockSize) ? bytesLeft : blockSize;
byte[] fileBytes = new byte[blockSize];
int read = fileStreamTofilePath.Read(fileBytes, 0, nextBytesToRead);
if (nextBytesToRead != read)
{
throw new Exception(string.Format(
"Number of bytes read ({0}) from file {1} isn't equal to the number of bytes expected ({2}) .",
read, fileName, nextBytesToRead));
}
string blockId = EncodeBlockIds(countForId, numberOfBlocks.ToString().Length);
blockIds.Add(blockId);
string blockUploadUrl = blobUploadUrl + "?comp=block&blockid=" + WebUtility.UrlEncode(blockId);
using (HttpClient client = new HttpClient())
{
client.DefaultRequestHeaders.Clear();
Func<HttpRequestMessage> createRequest = () =>
{
DateTime dt = DateTime.UtcNow;
var req = new HttpRequestMessage(HttpMethod.Put, blockUploadUrl);
req.Headers.Add(
AzureHelper.DateHeaderString,
dt.ToString("R", CultureInfo.InvariantCulture));
req.Headers.Add(AzureHelper.VersionHeaderString, AzureHelper.StorageApiVersion);
if (!string.IsNullOrWhiteSpace(leaseId))
{
log.LogMessage($"Sending request: {leaseId} {blockUploadUrl}");
req.Headers.Add("x-ms-lease-id", leaseId);
}
req.Headers.Add(
AzureHelper.AuthorizationHeaderString,
AzureHelper.AuthorizationHeader(
AccountName,
AccountKey,
"PUT",
dt,
req,
string.Empty,
string.Empty,
nextBytesToRead.ToString(),
string.Empty));
Stream postStream = new MemoryStream();
postStream.Write(fileBytes, 0, nextBytesToRead);
postStream.Seek(0, SeekOrigin.Begin);
req.Content = new StreamContent(postStream);
return req;
};
log.LogMessage(MessageImportance.Low, "Sending request to upload part {0} of file {1}", countForId, fileName);
using (HttpResponseMessage response = await AzureHelper.RequestWithRetry(log, client, createRequest))
{
log.LogMessage(
MessageImportance.Low,
"Received response to upload part {0} of file {1}: Status Code:{2} Status Desc: {3}",
countForId,
fileName,
response.StatusCode,
await response.Content.ReadAsStringAsync());
}
}
offset += read;
bytesLeft -= nextBytesToRead;
countForId += 1;
}
}
string blockListUploadUrl = blobUploadUrl + "?comp=blocklist";
using (HttpClient client = new HttpClient())
{
Func<HttpRequestMessage> createRequest = () =>
{
DateTime dt1 = DateTime.UtcNow;
var req = new HttpRequestMessage(HttpMethod.Put, blockListUploadUrl);
req.Headers.Add(AzureHelper.DateHeaderString, dt1.ToString("R", CultureInfo.InvariantCulture));
req.Headers.Add(AzureHelper.VersionHeaderString, AzureHelper.StorageApiVersion);
string contentType = DetermineContentTypeBasedOnFileExtension(filePath);
if (!string.IsNullOrEmpty(contentType))
{
req.Headers.Add(AzureHelper.ContentTypeString, contentType);
}
string cacheControl = DetermineCacheControlBasedOnFileExtension(filePath);
if (!string.IsNullOrEmpty(cacheControl))
{
req.Headers.Add(AzureHelper.CacheControlString, cacheControl);
}
var body = new StringBuilder("<?xml version=\"1.0\" encoding=\"UTF-8\"?><BlockList>");
foreach (object item in blockIds)
body.AppendFormat("<Latest>{0}</Latest>", item);
body.Append("</BlockList>");
byte[] bodyData = Encoding.UTF8.GetBytes(body.ToString());
if (!string.IsNullOrWhiteSpace(leaseId))
{
log.LogMessage($"Sending list request: {leaseId} {blockListUploadUrl}");
req.Headers.Add("x-ms-lease-id", leaseId);
}
req.Headers.Add(
AzureHelper.AuthorizationHeaderString,
AzureHelper.AuthorizationHeader(
AccountName,
AccountKey,
"PUT",
dt1,
req,
string.Empty,
string.Empty,
bodyData.Length.ToString(),
string.Empty));
Stream postStream = new MemoryStream();
postStream.Write(bodyData, 0, bodyData.Length);
postStream.Seek(0, SeekOrigin.Begin);
req.Content = new StreamContent(postStream);
return req;
};
using (HttpResponseMessage response = await AzureHelper.RequestWithRetry(log, client, createRequest))
{
log.LogMessage(
MessageImportance.Low,
"Received response to combine block list for file {0}: Status Code:{1} Status Desc: {2}",
fileName,
response.StatusCode,
await response.Content.ReadAsStringAsync());
}
}
}
private string DetermineContentTypeBasedOnFileExtension(string filename)
{
if (Path.GetExtension(filename) == ".svg")
{
return "image/svg+xml";
}
else if (Path.GetExtension(filename) == ".version")
{
return "text/plain";
}
return string.Empty;
}
private string DetermineCacheControlBasedOnFileExtension(string filename)
{
if (Path.GetExtension(filename) == ".svg")
{
return "No-Cache";
}
return string.Empty;
}
}
}
| 1 | 14,122 | Did we consider making the timeout an argument to the build task? Would amke it easer to configure in the future: change a build property vs. rebuilding the DLL. | dotnet-buildtools | .cs |
@@ -238,9 +238,17 @@ func run(o *Options) error {
}
var egressController *egress.EgressController
+ var nodeIP net.IP
+ if nodeConfig.NodeIPv4Addr != nil {
+ nodeIP = nodeConfig.NodeIPv4Addr.IP
+ } else if nodeConfig.NodeIPv6Addr != nil {
+ nodeIP = nodeConfig.NodeIPv6Addr.IP
+ } else {
+ return fmt.Errorf("invalid NodeIPAddr in Node config: %v", nodeConfig)
+ }
if features.DefaultFeatureGate.Enabled(features.Egress) {
egressController, err = egress.NewEgressController(
- ofClient, antreaClientProvider, crdClient, ifaceStore, routeClient, nodeConfig.Name, nodeConfig.NodeIPv4Addr.IP,
+ ofClient, antreaClientProvider, crdClient, ifaceStore, routeClient, nodeConfig.Name, nodeIP,
o.config.ClusterMembershipPort, egressInformer, nodeInformer, externalIPPoolInformer,
)
if err != nil { | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net"
"time"
"k8s.io/client-go/informers"
"k8s.io/klog/v2"
"antrea.io/antrea/pkg/agent"
"antrea.io/antrea/pkg/agent/apiserver"
"antrea.io/antrea/pkg/agent/cniserver"
_ "antrea.io/antrea/pkg/agent/cniserver/ipam"
"antrea.io/antrea/pkg/agent/config"
"antrea.io/antrea/pkg/agent/controller/egress"
"antrea.io/antrea/pkg/agent/controller/networkpolicy"
"antrea.io/antrea/pkg/agent/controller/noderoute"
"antrea.io/antrea/pkg/agent/controller/traceflow"
"antrea.io/antrea/pkg/agent/flowexporter/connections"
"antrea.io/antrea/pkg/agent/flowexporter/exporter"
"antrea.io/antrea/pkg/agent/flowexporter/flowrecords"
"antrea.io/antrea/pkg/agent/interfacestore"
"antrea.io/antrea/pkg/agent/metrics"
npl "antrea.io/antrea/pkg/agent/nodeportlocal"
"antrea.io/antrea/pkg/agent/openflow"
"antrea.io/antrea/pkg/agent/proxy"
"antrea.io/antrea/pkg/agent/querier"
"antrea.io/antrea/pkg/agent/route"
"antrea.io/antrea/pkg/agent/stats"
"antrea.io/antrea/pkg/agent/types"
crdinformers "antrea.io/antrea/pkg/client/informers/externalversions"
"antrea.io/antrea/pkg/features"
"antrea.io/antrea/pkg/log"
"antrea.io/antrea/pkg/monitor"
ofconfig "antrea.io/antrea/pkg/ovs/openflow"
"antrea.io/antrea/pkg/ovs/ovsconfig"
"antrea.io/antrea/pkg/signals"
"antrea.io/antrea/pkg/util/cipher"
"antrea.io/antrea/pkg/util/k8s"
"antrea.io/antrea/pkg/version"
)
// informerDefaultResync is the default resync period if a handler doesn't specify one.
// Use the same default value as kube-controller-manager:
// https://github.com/kubernetes/kubernetes/blob/release-1.17/pkg/controller/apis/config/v1alpha1/defaults.go#L120
const informerDefaultResync = 12 * time.Hour
// run starts Antrea agent with the given options and waits for termination signal.
func run(o *Options) error {
klog.Infof("Starting Antrea agent (version %s)", version.GetFullVersion())
// Create K8s Clientset, CRD Clientset and SharedInformerFactory for the given config.
k8sClient, _, crdClient, _, err := k8s.CreateClients(o.config.ClientConnection, o.config.KubeAPIServerOverride)
if err != nil {
return fmt.Errorf("error creating K8s clients: %v", err)
}
legacyCRDClient, err := k8s.CreateLegacyCRDClient(o.config.ClientConnection, o.config.KubeAPIServerOverride)
if err != nil {
return fmt.Errorf("error creating legacy CRD client: %v", err)
}
informerFactory := informers.NewSharedInformerFactory(k8sClient, informerDefaultResync)
crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, informerDefaultResync)
traceflowInformer := crdInformerFactory.Crd().V1alpha1().Traceflows()
egressInformer := crdInformerFactory.Crd().V1alpha2().Egresses()
nodeInformer := informerFactory.Core().V1().Nodes()
externalIPPoolInformer := crdInformerFactory.Crd().V1alpha2().ExternalIPPools()
// Create Antrea Clientset for the given config.
antreaClientProvider := agent.NewAntreaClientProvider(o.config.AntreaClientConnection, k8sClient)
// Register Antrea Agent metrics if EnablePrometheusMetrics is set
if o.config.EnablePrometheusMetrics {
metrics.InitializePrometheusMetrics()
}
// Create ovsdb and openflow clients.
ovsdbAddress := ovsconfig.GetConnAddress(o.config.OVSRunDir)
ovsdbConnection, err := ovsconfig.NewOVSDBConnectionUDS(ovsdbAddress)
if err != nil {
// TODO: ovsconfig.NewOVSDBConnectionUDS might return timeout in the future, need to add retry
return fmt.Errorf("error connecting OVSDB: %v", err)
}
defer ovsdbConnection.Close()
ovsDatapathType := ovsconfig.OVSDatapathType(o.config.OVSDatapathType)
ovsBridgeClient := ovsconfig.NewOVSBridge(o.config.OVSBridge, ovsDatapathType, ovsdbConnection)
ovsBridgeMgmtAddr := ofconfig.GetMgmtAddress(o.config.OVSRunDir, o.config.OVSBridge)
ofClient := openflow.NewClient(o.config.OVSBridge, ovsBridgeMgmtAddr, ovsDatapathType,
features.DefaultFeatureGate.Enabled(features.AntreaProxy),
features.DefaultFeatureGate.Enabled(features.AntreaPolicy),
features.DefaultFeatureGate.Enabled(features.Egress),
features.DefaultFeatureGate.Enabled(features.FlowExporter))
_, serviceCIDRNet, _ := net.ParseCIDR(o.config.ServiceCIDR)
var serviceCIDRNetv6 *net.IPNet
// Todo: use FeatureGate to check if IPv6 is enabled and then read configuration item "ServiceCIDRv6".
if o.config.ServiceCIDRv6 != "" {
_, serviceCIDRNetv6, _ = net.ParseCIDR(o.config.ServiceCIDRv6)
}
_, encapMode := config.GetTrafficEncapModeFromStr(o.config.TrafficEncapMode)
_, encryptionMode := config.GetTrafficEncryptionModeFromStr(o.config.TrafficEncryptionMode)
if o.config.EnableIPSecTunnel {
klog.Warning("enableIPSecTunnel is deprecated, use trafficEncryptionMode instead.")
encryptionMode = config.TrafficEncryptionModeIPSec
}
networkConfig := &config.NetworkConfig{
TunnelType: ovsconfig.TunnelType(o.config.TunnelType),
TrafficEncapMode: encapMode,
TrafficEncryptionMode: encryptionMode,
TransportIface: o.config.TransportInterface,
}
wireguardConfig := &config.WireGuardConfig{
Port: o.config.WireGuard.Port,
}
routeClient, err := route.NewClient(serviceCIDRNet, networkConfig, o.config.NoSNAT)
if err != nil {
return fmt.Errorf("error creating route client: %v", err)
}
// Create an ifaceStore that caches network interfaces managed by this node.
ifaceStore := interfacestore.NewInterfaceStore()
// networkReadyCh is used to notify that the Node's network is ready.
// Functions that rely on the Node's network should wait for the channel to close.
networkReadyCh := make(chan struct{})
// set up signal capture: the first SIGTERM / SIGINT signal is handled gracefully and will
// cause the stopCh channel to be closed; if another signal is received before the program
// exits, we will force exit.
stopCh := signals.RegisterSignalHandlers()
// Initialize agent and node network.
agentInitializer := agent.NewInitializer(
k8sClient,
ovsBridgeClient,
ofClient,
routeClient,
ifaceStore,
o.config.OVSBridge,
o.config.HostGateway,
o.config.DefaultMTU,
serviceCIDRNet,
serviceCIDRNetv6,
networkConfig,
wireguardConfig,
networkReadyCh,
stopCh,
features.DefaultFeatureGate.Enabled(features.AntreaProxy))
err = agentInitializer.Initialize()
if err != nil {
return fmt.Errorf("error initializing agent: %v", err)
}
nodeConfig := agentInitializer.GetNodeConfig()
nodeRouteController := noderoute.NewNodeRouteController(
k8sClient,
informerFactory,
ofClient,
ovsBridgeClient,
routeClient,
ifaceStore,
networkConfig,
nodeConfig,
agentInitializer.GetWireGuardClient())
var proxier proxy.Proxier
if features.DefaultFeatureGate.Enabled(features.AntreaProxy) {
v4Enabled := config.IsIPv4Enabled(nodeConfig, networkConfig.TrafficEncapMode)
v6Enabled := config.IsIPv6Enabled(nodeConfig, networkConfig.TrafficEncapMode)
switch {
case v4Enabled && v6Enabled:
proxier = proxy.NewDualStackProxier(nodeConfig.Name, informerFactory, ofClient)
case v4Enabled:
proxier = proxy.NewProxier(nodeConfig.Name, informerFactory, ofClient, false)
case v6Enabled:
proxier = proxy.NewProxier(nodeConfig.Name, informerFactory, ofClient, true)
default:
return fmt.Errorf("at least one of IPv4 or IPv6 should be enabled")
}
}
// entityUpdates is a channel for receiving entity updates from CNIServer and
// notifying NetworkPolicyController to reconcile rules related to the
// updated entities.
entityUpdates := make(chan types.EntityReference, 100)
// We set flow poll interval as the time interval for rule deletion in the async
// rule cache, which is implemented as part of the idAllocator. This is to preserve
// the rule info for populating NetworkPolicy fields in the Flow Exporter even
// after rule deletion.
asyncRuleDeleteInterval := o.pollInterval
antreaPolicyEnabled := features.DefaultFeatureGate.Enabled(features.AntreaPolicy)
// In Antrea agent, status manager and audit logging will automatically be enabled
// if AntreaPolicy feature is enabled.
statusManagerEnabled := antreaPolicyEnabled
loggingEnabled := antreaPolicyEnabled
var denyConnStore *connections.DenyConnectionStore
if features.DefaultFeatureGate.Enabled(features.FlowExporter) {
denyConnStore = connections.NewDenyConnectionStore(ifaceStore, proxier, o.staleConnectionTimeout)
go denyConnStore.RunPeriodicDeletion(stopCh)
}
networkPolicyController, err := networkpolicy.NewNetworkPolicyController(
antreaClientProvider,
ofClient,
ifaceStore,
nodeConfig.Name,
entityUpdates,
antreaPolicyEnabled,
statusManagerEnabled,
loggingEnabled,
denyConnStore,
asyncRuleDeleteInterval,
o.config.DNSServerOverride)
if err != nil {
return fmt.Errorf("error creating new NetworkPolicy controller: %v", err)
}
// statsCollector collects stats and reports to the antrea-controller periodically. For now it's only used for
// NetworkPolicy stats.
var statsCollector *stats.Collector
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
statsCollector = stats.NewCollector(antreaClientProvider, ofClient, networkPolicyController)
}
var egressController *egress.EgressController
if features.DefaultFeatureGate.Enabled(features.Egress) {
egressController, err = egress.NewEgressController(
ofClient, antreaClientProvider, crdClient, ifaceStore, routeClient, nodeConfig.Name, nodeConfig.NodeIPv4Addr.IP,
o.config.ClusterMembershipPort, egressInformer, nodeInformer, externalIPPoolInformer,
)
if err != nil {
return fmt.Errorf("error creating new Egress controller: %v", err)
}
}
isChaining := false
if networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() {
isChaining = true
}
cniServer := cniserver.New(
o.config.CNISocket,
o.config.HostProcPathPrefix,
nodeConfig,
k8sClient,
isChaining,
routeClient,
networkReadyCh)
err = cniServer.Initialize(ovsBridgeClient, ofClient, ifaceStore, entityUpdates)
if err != nil {
return fmt.Errorf("error initializing CNI server: %v", err)
}
var traceflowController *traceflow.Controller
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
traceflowController = traceflow.NewTraceflowController(
k8sClient,
informerFactory,
crdClient,
traceflowInformer,
ofClient,
networkPolicyController,
ovsBridgeClient,
ifaceStore,
networkConfig,
nodeConfig,
serviceCIDRNet)
}
// TODO: we should call this after installing flows for initial node routes
// and initial NetworkPolicies so that no packets will be mishandled.
if err := agentInitializer.FlowRestoreComplete(); err != nil {
return err
}
if err := antreaClientProvider.RunOnce(); err != nil {
return err
}
// Start the NPL agent.
if features.DefaultFeatureGate.Enabled(features.NodePortLocal) {
nplController, err := npl.InitializeNPLAgent(
k8sClient,
informerFactory,
o.config.NPLPortRange,
nodeConfig.Name)
if err != nil {
return fmt.Errorf("failed to start NPL agent: %v", err)
}
go nplController.Run(stopCh)
}
log.StartLogFileNumberMonitor(stopCh)
go routeClient.Run(stopCh)
go cniServer.Run(stopCh)
informerFactory.Start(stopCh)
crdInformerFactory.Start(stopCh)
go antreaClientProvider.Run(stopCh)
go nodeRouteController.Run(stopCh)
go networkPolicyController.Run(stopCh)
if features.DefaultFeatureGate.Enabled(features.Egress) {
go egressController.Run(stopCh)
}
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
go statsCollector.Run(stopCh)
}
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
go traceflowController.Run(stopCh)
}
if features.DefaultFeatureGate.Enabled(features.AntreaProxy) {
go proxier.GetProxyProvider().Run(stopCh)
}
agentQuerier := querier.NewAgentQuerier(
nodeConfig,
networkConfig,
ifaceStore,
k8sClient,
ofClient,
ovsBridgeClient,
proxier,
networkPolicyController,
o.config.APIPort)
agentMonitor := monitor.NewAgentMonitor(crdClient, legacyCRDClient, agentQuerier)
go agentMonitor.Run(stopCh)
cipherSuites, err := cipher.GenerateCipherSuitesList(o.config.TLSCipherSuites)
if err != nil {
return fmt.Errorf("error generating Cipher Suite list: %v", err)
}
apiServer, err := apiserver.New(
agentQuerier,
networkPolicyController,
o.config.APIPort,
o.config.EnablePrometheusMetrics,
o.config.ClientConnection.Kubeconfig,
cipherSuites,
cipher.TLSVersionMap[o.config.TLSMinVersion])
if err != nil {
return fmt.Errorf("error when creating agent API server: %v", err)
}
go apiServer.Run(stopCh)
// Start PacketIn for features and specify their own reason.
var packetInReasons []uint8
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
packetInReasons = append(packetInReasons, uint8(openflow.PacketInReasonTF))
}
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
packetInReasons = append(packetInReasons, uint8(openflow.PacketInReasonNP))
}
if len(packetInReasons) > 0 {
go ofClient.StartPacketInHandler(packetInReasons, stopCh)
}
// Initialize flow exporter to start go routines to poll conntrack flows and export IPFIX flow records
if features.DefaultFeatureGate.Enabled(features.FlowExporter) {
v4Enabled := config.IsIPv4Enabled(nodeConfig, networkConfig.TrafficEncapMode)
v6Enabled := config.IsIPv6Enabled(nodeConfig, networkConfig.TrafficEncapMode)
isNetworkPolicyOnly := networkConfig.TrafficEncapMode.IsNetworkPolicyOnly()
flowRecords := flowrecords.NewFlowRecords()
conntrackConnStore := connections.NewConntrackConnectionStore(
connections.InitializeConnTrackDumper(nodeConfig, serviceCIDRNet, serviceCIDRNetv6, ovsDatapathType, features.DefaultFeatureGate.Enabled(features.AntreaProxy)),
flowRecords,
ifaceStore,
v4Enabled,
v6Enabled,
proxier,
networkPolicyController,
o.pollInterval,
o.staleConnectionTimeout)
go conntrackConnStore.Run(stopCh)
flowExporter, err := exporter.NewFlowExporter(
conntrackConnStore,
flowRecords,
denyConnStore,
o.flowCollectorAddr,
o.flowCollectorProto,
o.activeFlowTimeout,
o.idleFlowTimeout,
v4Enabled,
v6Enabled,
k8sClient,
nodeRouteController,
isNetworkPolicyOnly)
if err != nil {
return fmt.Errorf("error when creating IPFIX flow exporter: %v", err)
}
go flowExporter.Run(stopCh)
}
<-stopCh
klog.Info("Stopping Antrea agent")
return nil
}
| 1 | 44,082 | dumb question: the commit message talks about dual-stack support but it seems that in a dual-stack cluster, Egress will only support the IPv4 address family? | antrea-io-antrea | go |
@@ -230,7 +230,7 @@ class ServerTLSLayer(_TLSLayer):
server.sni = self.context.client.sni.encode("idna")
else:
server.sni = server.address[0].encode("idna")
- self.tls[server].set_tlsext_host_name(server.sni)
+ self.tls[server].set_tlsext_host_name(server.sni)
self.tls[server].set_connect_state()
yield from self.process(events.DataReceived(server, b"")) | 1 | import os
import struct
from typing import MutableMapping, Optional, Iterator, Generator, Any
from OpenSSL import SSL
from mitmproxy.certs import CertStore
from mitmproxy.net.tls import ClientHello
from mitmproxy.proxy.protocol import tls
from mitmproxy.proxy2 import context
from mitmproxy.proxy2 import layer, commands, events
from mitmproxy.proxy2.utils import expect
def is_tls_handshake_record(d: bytes) -> bool:
"""
Returns:
True, if the passed bytes start with the TLS record magic bytes
False, otherwise.
"""
# TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2.
# TLS 1.3 mandates legacy_record_version to be 0x0301.
# http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
return (
len(d) >= 3 and
d[0] == 0x16 and
d[1] == 0x03 and
0x0 <= d[2] <= 0x03
)
def handshake_record_contents(data: bytes) -> Iterator[bytes]:
"""
Returns a generator that yields the bytes contained in each handshake record.
This will raise an error on the first non-handshake record, so fully exhausting this
generator is a bad idea.
"""
offset = 0
while True:
if len(data) < offset + 5:
return
record_header = data[offset:offset + 5]
if not is_tls_handshake_record(record_header):
raise ValueError(f"Expected TLS record, got {record_header} instead.")
record_size = struct.unpack("!H", record_header[3:])[0]
if record_size == 0:
raise ValueError("Record must not be empty.")
offset += 5
if len(data) < offset + record_size:
return
record_body = data[offset:offset + record_size]
yield record_body
offset += record_size
def get_client_hello(data: bytes) -> Optional[bytes]:
"""
Read all TLS records that contain the initial ClientHello.
Returns the raw handshake packet bytes, without TLS record headers.
"""
client_hello = b""
for d in handshake_record_contents(data):
client_hello += d
if len(client_hello) >= 4:
client_hello_size = struct.unpack("!I", b'\x00' + client_hello[1:4])[0] + 4
if len(client_hello) >= client_hello_size:
return client_hello[:client_hello_size]
return None
def parse_client_hello(data: bytes) -> Optional[ClientHello]:
"""
Check if the supplied bytes contain a full ClientHello message,
and if so, parse it.
Returns:
- A ClientHello object on success
- None, if the TLS record is not complete
Raises:
- A ValueError, if the passed ClientHello is invalid
"""
# Check if ClientHello is complete
client_hello = get_client_hello(data)
if client_hello:
return ClientHello(client_hello[4:])
return None
class _TLSLayer(layer.Layer):
send_buffer: MutableMapping[SSL.Connection, bytearray]
tls: MutableMapping[context.Connection, SSL.Connection]
child_layer: Optional[layer.Layer] = None
def __init__(self, context):
super().__init__(context)
self.send_buffer = {}
self.tls = {}
def tls_interact(self, conn: context.Connection):
while True:
try:
data = self.tls[conn].bio_read(65535)
except SSL.WantReadError:
# Okay, nothing more waiting to be sent.
return
else:
yield commands.SendData(conn, data)
def send(
self,
send_command: commands.SendData,
) -> commands.TCommandGenerator:
tls_conn = self.tls[send_command.connection]
if send_command.connection.tls_established:
tls_conn.sendall(send_command.data)
yield from self.tls_interact(send_command.connection)
else:
buf = self.send_buffer.setdefault(tls_conn, bytearray())
buf.extend(send_command.data)
def negotiate(self, event: events.DataReceived) -> Generator[commands.Command, Any, bool]:
"""
Make sure to trigger processing if done!
"""
# bio_write errors for b"", so we need to check first if we actually received something.
tls_conn = self.tls[event.connection]
if event.data:
tls_conn.bio_write(event.data)
try:
tls_conn.do_handshake()
except SSL.WantReadError:
yield from self.tls_interact(event.connection)
return False
else:
event.connection.tls_established = True
event.connection.alpn = tls_conn.get_alpn_proto_negotiated()
print(f"TLS established: {event.connection}")
# TODO: Set all other connection attributes here
# there might already be data in the OpenSSL BIO, so we need to trigger its processing.
yield from self.relay(events.DataReceived(event.connection, b""))
if tls_conn in self.send_buffer:
data_to_send = bytes(self.send_buffer.pop(tls_conn))
yield from self.send(commands.SendData(event.connection, data_to_send))
return True
def relay(self, event: events.DataReceived):
tls_conn = self.tls[event.connection]
if event.data:
tls_conn.bio_write(event.data)
yield from self.tls_interact(event.connection)
plaintext = bytearray()
while True:
try:
plaintext.extend(tls_conn.recv(65535))
except (SSL.WantReadError, SSL.ZeroReturnError):
break
if plaintext:
evt = events.DataReceived(event.connection, bytes(plaintext))
# yield commands.Log(f"Plain{evt}")
yield from self.event_to_child(evt)
def event_to_child(self, event: events.Event) -> commands.TCommandGenerator:
for command in self.child_layer.handle_event(event):
if isinstance(command, commands.SendData) and command.connection in self.tls:
yield from self.send(command)
else:
yield command
class ServerTLSLayer(_TLSLayer):
"""
This layer manages TLS on potentially multiple server connections.
"""
lazy_init: bool = False
def __init__(self, context: context.Context):
super().__init__(context)
self.child_layer = layer.NextLayer(context)
@expect(events.Start)
def start(self, event: events.Start) -> commands.TCommandGenerator:
yield from self.child_layer.handle_event(event)
server = self.context.server
if server.tls:
if server.connected:
yield from self._start_tls(server)
else:
self.lazy_init = True
self._handle_event = self.process
_handle_event = start
def process(self, event: events.Event) -> None:
if isinstance(event, events.DataReceived) and event.connection in self.tls:
if not event.connection.tls_established:
yield from self.negotiate(event)
else:
yield from self.relay(event)
elif isinstance(event, events.OpenConnectionReply):
err = event.reply
conn = event.command.connection
if self.lazy_init and not err and conn == self.context.server:
yield from self._start_tls(conn)
yield from self.event_to_child(event)
elif isinstance(event, events.ConnectionClosed):
yield from self.event_to_child(event)
self.send_buffer.pop(
self.tls.pop(event.connection, None),
None
)
else:
yield from self.event_to_child(event)
def _start_tls(self, server: context.Server):
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
if server.alpn_offers:
ssl_context.set_alpn_protos(server.alpn_offers)
self.tls[server] = SSL.Connection(ssl_context)
if server.sni:
if server.sni is True:
if self.context.client.sni:
server.sni = self.context.client.sni.encode("idna")
else:
server.sni = server.address[0].encode("idna")
self.tls[server].set_tlsext_host_name(server.sni)
self.tls[server].set_connect_state()
yield from self.process(events.DataReceived(server, b""))
class ClientTLSLayer(_TLSLayer):
"""
This layer establishes TLS on a single client connection.
┌─────┐
│Start│
└┬────┘
↓
┌────────────────────┐
│Wait for ClientHello│
└┬───────────────────┘
│ Do we need server TLS info
│ to establish TLS with client?
│ ┌───────────────────┐
├─────→│Wait for Server TLS│
│ yes └┬──────────────────┘
│no │
↓ ↓
┌────────────────┐
│Process messages│
└────────────────┘
"""
recv_buffer: bytearray
def __init__(self, context: context.Context):
super().__init__(context)
self.recv_buffer = bytearray()
self.child_layer = ServerTLSLayer(self.context)
@expect(events.Start)
def state_start(self, _) -> commands.TCommandGenerator:
self.context.client.tls = True
self._handle_event = self.state_wait_for_clienthello
yield from ()
_handle_event = state_start
@expect(events.DataReceived, events.ConnectionClosed)
def state_wait_for_clienthello(self, event: events.Event):
client = self.context.client
server = self.context.server
if isinstance(event, events.DataReceived) and event.connection == client:
self.recv_buffer.extend(event.data)
try:
client_hello = parse_client_hello(self.recv_buffer)
except ValueError as e:
raise NotImplementedError() from e # TODO
if client_hello:
yield commands.Log(f"Client Hello: {client_hello}")
client.sni = client_hello.sni
client.alpn_offers = client_hello.alpn_protocols
client_tls_requires_server_connection = (
self.context.server.tls and
self.context.options.upstream_cert and
(
self.context.options.add_upstream_certs_to_client_chain or
client.alpn_offers or
not client.sni
)
)
# What do we do with the client connection now?
if client_tls_requires_server_connection and not server.tls_established:
yield from self.start_server_tls()
self._handle_event = self.state_wait_for_server_tls
else:
yield from self.start_negotiate()
self._handle_event = self.state_process
# In any case, we now have enough information to start server TLS if needed.
yield from self.child_layer.handle_event(events.Start())
else:
raise NotImplementedError(event) # TODO
def state_wait_for_server_tls(self, event: events.Event):
yield from self.event_to_child(event)
# TODO: Handle case where TLS establishment fails.
# We still need a good way to signal this - one possibility would be by closing
# the connection?
if self.context.server.tls_established:
yield from self.start_negotiate()
self._handle_event = self.state_process
def state_process(self, event: events.Event):
if isinstance(event, events.DataReceived) and event.connection == self.context.client:
if not self.context.client.tls_established:
yield from self.negotiate(event)
else:
yield from self.relay(event)
else:
yield from self.event_to_child(event)
def start_server_tls(self):
"""
We often need information from the upstream connection to establish TLS with the client.
For example, we need to check if the client does ALPN or not.
"""
if not self.context.server.connected:
self.context.server.alpn_offers = [
x for x in self.context.client.alpn_offers
if not (x.startswith(b"h2-") or x.startswith(b"spdy"))
]
err = yield commands.OpenConnection(self.context.server)
if err:
yield commands.Log(
"Cannot establish server connection, which is required to establish TLS with the client."
)
def start_negotiate(self):
# FIXME: Do this properly
client = self.context.client
server = self.context.server
context = SSL.Context(SSL.SSLv23_METHOD)
cert, privkey, cert_chain = CertStore.from_store(
os.path.expanduser("~/.mitmproxy"), "mitmproxy"
).get_cert(b"example.com", (b"example.com",))
context.use_privatekey(privkey)
context.use_certificate(cert.x509)
context.set_cipher_list(tls.DEFAULT_CLIENT_CIPHERS)
if server.alpn:
def alpn_select_callback(conn_, options):
if server.alpn in options:
return server.alpn
context.set_alpn_select_callback(alpn_select_callback)
self.tls[self.context.client] = SSL.Connection(context)
self.tls[self.context.client].set_accept_state()
yield from self.state_process(events.DataReceived(
client, bytes(self.recv_buffer)
))
self.recv_buffer = bytearray()
| 1 | 14,241 | Do you want to cherry-pick these separately and get them merged already or keep it in here? Both is fine with me! | mitmproxy-mitmproxy | py |
@@ -57,12 +57,12 @@ public class TestPerformance extends LuceneTestCase {
@Test
public void en() throws Exception {
- checkAnalysisPerformance("en", 1_000_000);
+ checkAnalysisPerformance("en", 1_200_000);
}
@Test
public void en_suggest() throws Exception {
- checkSuggestionPerformance("en", 1_200);
+ checkSuggestionPerformance("en", 3_000);
}
@Test | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.hunspell;
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Assume;
import org.junit.AssumptionViolatedException;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* A test that runs various Hunspell APIs on real dictionaries and relatively large corpora for
* specific languages and prints the execution times. The dictionaries should be set up as in {@link
* TestAllDictionaries}, the corpora should be in files named {@code langCode.txt} (e.g. {@code
* en.txt}) in a directory specified in {@code -Dhunspell.corpora=...}
*/
@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class)
public class TestPerformance extends LuceneTestCase {
private static Path corporaDir;
@BeforeClass
public static void resolveCorpora() {
String dir = System.getProperty("hunspell.corpora");
Assume.assumeFalse("Requires test word corpora at -Dhunspell.corpora=...", dir == null);
corporaDir = Paths.get(dir);
}
@Test
public void en() throws Exception {
checkAnalysisPerformance("en", 1_000_000);
}
@Test
public void en_suggest() throws Exception {
checkSuggestionPerformance("en", 1_200);
}
@Test
public void de() throws Exception {
checkAnalysisPerformance("de", 300_000);
}
@Test
public void de_suggest() throws Exception {
checkSuggestionPerformance("de", 55);
}
@Test
public void fr() throws Exception {
checkAnalysisPerformance("fr", 100_000);
}
@Test
public void fr_suggest() throws Exception {
checkSuggestionPerformance("fr", 120);
}
private Dictionary loadDictionary(String code) throws IOException, ParseException {
Path aff = findAffFile(code);
Dictionary dictionary = TestAllDictionaries.loadDictionary(aff);
System.out.println("Loaded " + aff);
return dictionary;
}
private void checkAnalysisPerformance(String code, int wordCount) throws Exception {
Dictionary dictionary = loadDictionary(code);
List<String> words = loadWords(code, wordCount, dictionary);
Stemmer stemmer = new Stemmer(dictionary);
Hunspell speller = new Hunspell(dictionary, TimeoutPolicy.NO_TIMEOUT, () -> {});
measure(
"Stemming " + code,
blackHole -> {
for (String word : words) {
blackHole.accept(stemmer.stem(word));
}
});
measure(
"Spellchecking " + code,
blackHole -> {
for (String word : words) {
blackHole.accept(speller.spell(word));
}
});
System.out.println();
}
private void checkSuggestionPerformance(String code, int wordCount) throws Exception {
Dictionary dictionary = loadDictionary(code);
Hunspell speller = new Hunspell(dictionary, TimeoutPolicy.THROW_EXCEPTION, () -> {});
List<String> words =
loadWords(code, wordCount, dictionary).stream()
.filter(w -> hasQuickSuggestions(speller, w))
.collect(Collectors.toList());
System.out.println("Checking " + words.size() + " misspelled words");
measure(
"Suggestions for " + code,
blackHole -> {
for (String word : words) {
blackHole.accept(speller.suggest(word));
}
});
System.out.println();
}
private boolean hasQuickSuggestions(Hunspell speller, String word) {
if (speller.spell(word)) {
return false;
}
long start = System.nanoTime();
try {
speller.suggest(word);
} catch (SuggestionTimeoutException e) {
System.out.println("Timeout happened for " + word + ", skipping");
return false;
}
long elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
if (elapsed > Hunspell.SUGGEST_TIME_LIMIT * 4 / 5) {
System.out.println(elapsed + "ms for " + word + ", too close to time limit, skipping");
}
return true;
}
private Path findAffFile(String code) throws IOException {
return TestAllDictionaries.findAllAffixFiles()
.filter(
path -> {
String parentName = path.getParent().getFileName().toString();
return code.equals(Dictionary.extractLanguageCode(parentName));
})
.findFirst()
.orElseThrow(
() -> new AssumptionViolatedException("Ignored, cannot find aff/dic for: " + code));
}
private List<String> loadWords(String code, int wordCount, Dictionary dictionary)
throws IOException {
Path dataPath = corporaDir.resolve(code + ".txt");
if (!Files.isReadable(dataPath)) {
throw new AssumptionViolatedException("Missing text corpora at: " + dataPath);
}
List<String> words = new ArrayList<>();
try (InputStream stream = Files.newInputStream(dataPath)) {
BufferedReader reader =
new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
while (true) {
String line = reader.readLine();
if (line == null) break;
for (String token : line.split("[^a-zA-Z" + Pattern.quote(dictionary.wordChars) + "]+")) {
String word = stripPunctuation(token);
if (word != null) {
words.add(word);
if (words.size() == wordCount) {
return words;
}
}
}
}
}
return words;
}
private void measure(String what, Iteration iteration) {
Consumer<Object> consumer =
o -> {
if (o == null) {
throw new AssertionError();
}
};
// warmup
for (int i = 0; i < 2; i++) {
iteration.run(consumer);
}
List<Long> times = new ArrayList<>();
for (int i = 0; i < 7; i++) {
long start = System.currentTimeMillis();
iteration.run(consumer);
times.add(System.currentTimeMillis() - start);
}
System.out.println(
what
+ ": average "
+ times.stream().mapToLong(Long::longValue).average().orElseThrow()
+ ", all times = "
+ times);
}
private interface Iteration {
void run(Consumer<Object> blackHole);
}
static String stripPunctuation(String token) {
int start = 0;
int end = token.length();
while (start < end && isPunctuation(token.charAt(start))) start++;
while (start < end - 1 && isPunctuation(token.charAt(end - 1))) end--;
return start < end ? token.substring(start, end) : null;
}
private static boolean isPunctuation(char c) {
return ".!?,\"'’‘".indexOf(c) >= 0;
}
}
| 1 | 40,790 | Hmm what are these magical constant numbers? And why does this change mean they should increase? | apache-lucene-solr | java |
@@ -36,7 +36,7 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.8.0-dev'
+__version__ = '2.8.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform) | 1 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
import os
import platform
import re
import sys
import logging
import logging.config
import urlparse
from boto.exception import InvalidUriError
__version__ = '2.8.0-dev'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
config = Config()
# Regex to disallow buckets violating charset or not [3..255] chars total.
BUCKET_NAME_RE = re.compile(r'^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$')
# Regex to disallow buckets with individual DNS labels longer than 63.
TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)'
r'#(?P<generation>[0-9]+)$')
VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$')
def init_logging():
for file in BotoConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('boto')
perflog = logging.getLogger('boto.perf')
log.addHandler(NullHandler())
perflog.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sqs.connection.SQSConnection`
:return: A connection to Amazon's SQS
"""
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Amazon's S3
"""
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@param gs_access_key_id: Your Google Cloud Storage Access Key ID
@type gs_secret_access_key: string
@param gs_secret_access_key: Your Google Cloud Storage Secret Access Key
@rtype: L{GSConnection<boto.gs.connection.GSConnection>}
@return: A connection to Google's Storage service
"""
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.elb.ELBConnection`
:return: A connection to Amazon's Load Balancing Service
"""
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
"""
from boto.ec2.autoscale import AutoScaleConnection
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
return CloudWatchConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sdb.connection.SDBConnection`
:return: A connection to Amazon's SDB
"""
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
return CloudFrontConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.vpc.VPCConnection`
:return: A connection to VPC
"""
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds.RDSConnection`
:return: A connection to RDS
"""
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.emr.EmrConnection`
:return: A connection to Elastic mapreduce
"""
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sns.SNSConnection`
:return: A connection to Amazon's SNS
"""
from boto.sns import SNSConnection
return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.iam.IAMConnection`
:return: A connection to Amazon's IAM
"""
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_route53(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dns.Route53Connection`
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
return Route53Connection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudformation.CloudFormationConnection`
:return: A connection to Amazon's CloudFormation Service
"""
from boto.cloudformation import CloudFormationConnection
return CloudFormationConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2 import EC2Connection
from boto.ec2.regioninfo import RegionInfo
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'eucalyptus_host', None)
reg = RegionInfo(name='eucalyptus', endpoint=host)
return EC2Connection(aws_access_key_id, aws_secret_access_key,
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.glacier.layer2.Layer2`
:return: A connection to Amazon's Glacier Service
"""
from boto.glacier.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_ec2_endpoint(url, aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to an EC2 Api endpoint. Additional arguments are passed
through to connect_ec2.
:type url: string
:param url: A url for the ec2 api endpoint to connect to
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2.regioninfo import RegionInfo
purl = urlparse.urlparse(url)
kwargs['port'] = purl.port
kwargs['host'] = purl.hostname
kwargs['path'] = purl.path
if not 'is_secure' in kwargs:
kwargs['is_secure'] = (purl.scheme == "https")
kwargs['region'] = RegionInfo(name=purl.hostname,
endpoint=purl.hostname)
kwargs['aws_access_key_id'] = aws_access_key_id
kwargs['aws_secret_access_key'] = aws_secret_access_key
return(connect_ec2(**kwargs))
def connect_walrus(host=None, aws_access_key_id=None,
aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
Connect to a Walrus service.
:type host: string
:param host: the host name or ip address of the Walrus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Walrus
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'walrus_host', None)
return S3Connection(aws_access_key_id, aws_secret_access_key,
host=host, port=port, path=path,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ses.SESConnection`
:return: A connection to Amazon's SES
"""
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sts.STSConnection`
:return: A connection to Amazon's STS
"""
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
:param ia_access_key_id: Your IA Access Key ID. This will also look
in your boto config file for an entry in the Credentials
section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
look in your boto config file for an entry in the Credentials
section called "ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
access_key = config.get('Credentials', 'ia_access_key_id',
ia_access_key_id)
secret_key = config.get('Credentials', 'ia_secret_access_key',
ia_secret_access_key)
return S3Connection(access_key, secret_key,
host='s3.us.archive.org',
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_dynamodb(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dynamodb.layer2.Layer2`
:return: A connection to the Layer2 interface for DynamoDB.
"""
from boto.dynamodb.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_swf(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.swf.layer1.Layer1`
:return: A connection to the Layer1 interface for SWF.
"""
from boto.swf.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.CloudSearchConnection`
:return: A connection to Amazon's CloudSearch service
"""
from boto.cloudsearch.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_beanstalk(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.beanstalk.layer1.Layer1`
:return: A connection to Amazon's Elastic Beanstalk service
"""
from boto.beanstalk.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elastictranscoder(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ets.layer1.ElasticTranscoderConnection`
:return: A connection to Amazon's Elastic Transcoder service
"""
from boto.elastictranscoder.layer1 import ElasticTranscoderConnection
return ElasticTranscoderConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
def connect_opsworks(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
from boto.opsworks.layer1 import OpsWorksConnection
return OpsWorksConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
"""
Instantiate a StorageUri from a URI string.
:type uri_str: string
:param uri_str: URI naming bucket + optional object.
:type default_scheme: string
:param default_scheme: default scheme for scheme-less URIs.
:type debug: int
:param debug: debug level to pass in to boto connection (range 0..2).
:type validate: bool
:param validate: whether to check for bucket name validity.
:type bucket_storage_uri_class: BucketStorageUri interface.
:param bucket_storage_uri_class: Allows mocking for unit tests.
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type is_latest: bool
:param is_latest: whether this versioned object represents the
current version.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
see gsutil).
:rtype: :class:`boto.StorageUri` subclass
:return: StorageUri subclass for given URI.
``uri_str`` must be one of the following formats:
* gs://bucket/name
* gs://bucket/name#ver
* s3://bucket/name
* gs://bucket
* s3://bucket
* filename (which could be a Unix path like /a/b/c or a Windows path like
C:\a\b\c)
The last example uses the default scheme ('file', unless overridden).
"""
version_id = None
generation = None
# Manually parse URI components instead of using urlparse.urlparse because
# what we're calling URIs don't really fit the standard syntax for URIs
# (the latter includes an optional host/net location part).
end_scheme_idx = uri_str.find('://')
if end_scheme_idx == -1:
# Check for common error: user specifies gs:bucket instead
# of gs://bucket. Some URI parsers allow this, but it can cause
# confusion for callers, so we don't.
colon_pos = uri_str.find(':')
if colon_pos != -1:
# Allow Windows path names including drive letter (C: etc.)
drive_char = uri_str[0].lower()
if not (platform.system().lower().startswith('windows')
and colon_pos == 1
and drive_char >= 'a' and drive_char <= 'z'):
raise InvalidUriError('"%s" contains ":" instead of "://"' %
uri_str)
scheme = default_scheme.lower()
path = uri_str
else:
scheme = uri_str[0:end_scheme_idx].lower()
path = uri_str[end_scheme_idx + 3:]
if scheme not in ['file', 's3', 'gs']:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if scheme == 'file':
# For file URIs we have no bucket name, and use the complete path
# (minus 'file://') as the object name.
is_stream = False
if path == '-':
is_stream = True
return FileStorageUri(path, debug, is_stream)
else:
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
object_name = ''
# If validate enabled, ensure the bucket name is valid, to avoid
# possibly confusing other parts of the code. (For example if we didn't
# catch bucket names containing ':', when a user tried to connect to
# the server with that name they might get a confusing error about
# non-integer port numbers.)
if (validate and bucket_name and
(not BUCKET_NAME_RE.match(bucket_name)
or TOO_LONG_DNS_NAME_COMP.search(bucket_name))):
raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
if scheme == 'gs':
match = GENERATION_RE.search(path)
if match:
md = match.groupdict()
versionless_uri_str = md['versionless_uri_str']
path_parts = versionless_uri_str.split('/', 1)
generation = int(md['generation'])
elif scheme == 's3':
match = VERSION_RE.search(path)
if match:
md = match.groupdict()
versionless_uri_str = md['versionless_uri_str']
path_parts = versionless_uri_str.split('/', 1)
version_id = md['version_id']
else:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if len(path_parts) > 1:
object_name = path_parts[1]
return bucket_storage_uri_class(
scheme, bucket_name, object_name, debug,
suppress_consec_slashes=suppress_consec_slashes,
version_id=version_id, generation=generation, is_latest=is_latest)
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: URI naming bucket + optional object.
"""
if not isinstance(key, boto.s3.key.Key):
raise InvalidUriError('Requested key (%s) is not a subclass of '
'boto.s3.key.Key' % str(type(key)))
prov_name = key.bucket.connection.provider.get_provider_name()
uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
return storage_uri(uri_str)
boto.plugin.load_plugins(config)
| 1 | 8,881 | This change shouldn't be included with this push. Shouldn't push version changes with a pull-request. | boto-boto | py |
@@ -97,6 +97,9 @@ func (h handler) handle(ctx context.Context, call inboundCall) {
err := h.callHandler(ctx, call, responseWriter)
+ // echo accepted rpc-service in response header in any status
+ responseWriter.addHeader(RespondServiceHeaderKey, call.ServiceName())
+
// black-hole requests on resource exhausted errors
if yarpcerrors.FromError(err).Code() == yarpcerrors.CodeResourceExhausted {
// all TChannel clients will time out instead of receiving an error | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tchannel
import (
"context"
"fmt"
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/tchannel-go"
"go.uber.org/multierr"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/bufferpool"
"go.uber.org/yarpc/pkg/errors"
"go.uber.org/yarpc/yarpcerrors"
ncontext "golang.org/x/net/context"
)
// inboundCall provides an interface similar tchannel.InboundCall.
//
// We use it instead of *tchannel.InboundCall because tchannel.InboundCall is
// not an interface, so we have little control over its behavior in tests.
type inboundCall interface {
ServiceName() string
CallerName() string
MethodString() string
ShardKey() string
RoutingKey() string
RoutingDelegate() string
Format() tchannel.Format
Arg2Reader() (tchannel.ArgReader, error)
Arg3Reader() (tchannel.ArgReader, error)
Response() inboundCallResponse
}
// inboundCallResponse provides an interface similar to
// tchannel.InboundCallResponse.
//
// Its purpose is the same as inboundCall: Make it easier to test functions
// that consume InboundCallResponse without having control of
// InboundCallResponse's behavior.
type inboundCallResponse interface {
Arg2Writer() (tchannel.ArgWriter, error)
Arg3Writer() (tchannel.ArgWriter, error)
Blackhole()
SendSystemError(err error) error
SetApplicationError() error
}
// tchannelCall wraps a TChannel InboundCall into an inboundCall.
//
// We need to do this so that we can change the return type of call.Response()
// to match inboundCall's Response().
type tchannelCall struct{ *tchannel.InboundCall }
func (c tchannelCall) Response() inboundCallResponse {
return c.InboundCall.Response()
}
// handler wraps a transport.UnaryHandler into a TChannel Handler.
type handler struct {
existing map[string]tchannel.Handler
router transport.Router
tracer opentracing.Tracer
headerCase headerCase
}
func (h handler) Handle(ctx ncontext.Context, call *tchannel.InboundCall) {
h.handle(ctx, tchannelCall{call})
}
func (h handler) handle(ctx context.Context, call inboundCall) {
// you MUST close the responseWriter no matter what unless you have a tchannel.SystemError
responseWriter := newResponseWriter(call.Response(), call.Format(), h.headerCase)
err := h.callHandler(ctx, call, responseWriter)
// black-hole requests on resource exhausted errors
if yarpcerrors.FromError(err).Code() == yarpcerrors.CodeResourceExhausted {
// all TChannel clients will time out instead of receiving an error
call.Response().Blackhole()
return
}
if err != nil && !responseWriter.isApplicationError {
// TODO: log error
_ = call.Response().SendSystemError(getSystemError(err))
return
}
if err != nil && responseWriter.isApplicationError {
// we have an error, so we're going to propagate it as a yarpc error,
// regardless of whether or not it is a system error.
status := yarpcerrors.FromError(errors.WrapHandlerError(err, call.ServiceName(), call.MethodString()))
// TODO: what to do with error? we could have a whole complicated scheme to
// return a SystemError here, might want to do that
text, _ := status.Code().MarshalText()
responseWriter.addHeader(ErrorCodeHeaderKey, string(text))
if status.Name() != "" {
responseWriter.addHeader(ErrorNameHeaderKey, status.Name())
}
if status.Message() != "" {
responseWriter.addHeader(ErrorMessageHeaderKey, status.Message())
}
}
if err := responseWriter.Close(); err != nil {
// TODO: log error
_ = call.Response().SendSystemError(getSystemError(err))
}
}
func (h handler) callHandler(ctx context.Context, call inboundCall, responseWriter *responseWriter) error {
start := time.Now()
_, ok := ctx.Deadline()
if !ok {
return tchannel.ErrTimeoutRequired
}
treq := &transport.Request{
Caller: call.CallerName(),
Service: call.ServiceName(),
Encoding: transport.Encoding(call.Format()),
Transport: transportName,
Procedure: call.MethodString(),
ShardKey: call.ShardKey(),
RoutingKey: call.RoutingKey(),
RoutingDelegate: call.RoutingDelegate(),
}
ctx, headers, err := readRequestHeaders(ctx, call.Format(), call.Arg2Reader)
if err != nil {
return errors.RequestHeadersDecodeError(treq, err)
}
treq.Headers = headers
if tcall, ok := call.(tchannelCall); ok {
tracer := h.tracer
ctx = tchannel.ExtractInboundSpan(ctx, tcall.InboundCall, headers.Items(), tracer)
}
body, err := call.Arg3Reader()
if err != nil {
return err
}
defer body.Close()
treq.Body = body
if err := transport.ValidateRequest(treq); err != nil {
return err
}
spec, err := h.router.Choose(ctx, treq)
if err != nil {
if yarpcerrors.FromError(err).Code() != yarpcerrors.CodeUnimplemented {
return err
}
if tcall, ok := call.(tchannelCall); !ok {
if m, ok := h.existing[call.MethodString()]; ok {
m.Handle(ctx, tcall.InboundCall)
return nil
}
}
return err
}
if err := transport.ValidateRequestContext(ctx); err != nil {
return err
}
switch spec.Type() {
case transport.Unary:
return transport.DispatchUnaryHandler(ctx, spec.Unary(), start, treq, responseWriter)
default:
return yarpcerrors.Newf(yarpcerrors.CodeUnimplemented, "transport tchannel does not handle %s handlers", spec.Type().String())
}
}
type responseWriter struct {
failedWith error
format tchannel.Format
headers transport.Headers
buffer *bufferpool.Buffer
response inboundCallResponse
isApplicationError bool
headerCase headerCase
}
func newResponseWriter(response inboundCallResponse, format tchannel.Format, headerCase headerCase) *responseWriter {
return &responseWriter{
response: response,
format: format,
headerCase: headerCase,
}
}
func (rw *responseWriter) AddHeaders(h transport.Headers) {
for k, v := range h.OriginalItems() {
// TODO: is this considered a breaking change?
if isReservedHeaderKey(k) {
rw.failedWith = appendError(rw.failedWith, fmt.Errorf("cannot use reserved header key: %s", k))
return
}
rw.addHeader(k, v)
}
}
func (rw *responseWriter) addHeader(key string, value string) {
rw.headers = rw.headers.With(key, value)
}
func (rw *responseWriter) SetApplicationError() {
rw.isApplicationError = true
}
func (rw *responseWriter) Write(s []byte) (int, error) {
if rw.failedWith != nil {
return 0, rw.failedWith
}
if rw.buffer == nil {
rw.buffer = bufferpool.Get()
}
n, err := rw.buffer.Write(s)
if err != nil {
rw.failedWith = appendError(rw.failedWith, err)
}
return n, err
}
func (rw *responseWriter) Close() error {
retErr := rw.failedWith
if rw.isApplicationError {
if err := rw.response.SetApplicationError(); err != nil {
retErr = appendError(retErr, fmt.Errorf("SetApplicationError() failed: %v", err))
}
}
headers := headerMap(rw.headers, rw.headerCase)
retErr = appendError(retErr, writeHeaders(rw.format, headers, nil, rw.response.Arg2Writer))
// Arg3Writer must be opened and closed regardless of if there is data
// However, if there is a system error, we do not want to do this
bodyWriter, err := rw.response.Arg3Writer()
if err != nil {
return appendError(retErr, err)
}
defer func() { retErr = appendError(retErr, bodyWriter.Close()) }()
if rw.buffer != nil {
defer bufferpool.Put(rw.buffer)
if _, err := rw.buffer.WriteTo(bodyWriter); err != nil {
return appendError(retErr, err)
}
}
return retErr
}
func getSystemError(err error) error {
if _, ok := err.(tchannel.SystemError); ok {
return err
}
if !yarpcerrors.IsStatus(err) {
return tchannel.NewSystemError(tchannel.ErrCodeUnexpected, err.Error())
}
status := yarpcerrors.FromError(err)
tchannelCode, ok := _codeToTChannelCode[status.Code()]
if !ok {
tchannelCode = tchannel.ErrCodeUnexpected
}
return tchannel.NewSystemError(tchannelCode, status.Message())
}
func appendError(left error, right error) error {
if _, ok := left.(tchannel.SystemError); ok {
return left
}
if _, ok := right.(tchannel.SystemError); ok {
return right
}
return multierr.Append(left, right)
}
| 1 | 16,753 | nit: let's move this right under where we create the `responseWriter`, before `h.callHandler` | yarpc-yarpc-go | go |
@@ -361,7 +361,7 @@ void nano::mdb_store::upgrade_v14_to_v15 (nano::write_transaction & transaction_
nano::mdb_val value{ data.size (), (void *)data.data () };
auto s = mdb_put (env.tx (transaction_a), state_blocks_new, nano::mdb_val (hash), value, MDB_APPEND);
- release_assert_success (s);
+ release_assert_success<MDB_val, mdb_store> ((*this), s);
// Every so often output to the log to indicate progress
constexpr auto output_cutoff = 1000000; | 1 | #include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/utility.hpp>
#include <nano/node/common.hpp>
#include <nano/node/lmdb/lmdb.hpp>
#include <nano/node/lmdb/lmdb_iterator.hpp>
#include <nano/node/lmdb/wallet_value.hpp>
#include <nano/secure/buffer.hpp>
#include <nano/secure/versioning.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
#include <queue>
namespace nano
{
template <>
void * mdb_val::data () const
{
return value.mv_data;
}
template <>
size_t mdb_val::size () const
{
return value.mv_size;
}
template <>
mdb_val::db_val (size_t size_a, void * data_a) :
value ({ size_a, data_a })
{
}
template <>
void mdb_val::convert_buffer_to_value ()
{
value = { buffer->size (), const_cast<uint8_t *> (buffer->data ()) };
}
}
nano::mdb_store::mdb_store (nano::logger_mt & logger_a, boost::filesystem::path const & path_a, nano::txn_tracking_config const & txn_tracking_config_a, std::chrono::milliseconds block_processor_batch_max_time_a, nano::lmdb_config const & lmdb_config_a, bool backup_before_upgrade_a) :
logger (logger_a),
env (error, path_a, nano::mdb_env::options::make ().set_config (lmdb_config_a).set_use_no_mem_init (true)),
mdb_txn_tracker (logger_a, txn_tracking_config_a, block_processor_batch_max_time_a),
txn_tracking_enabled (txn_tracking_config_a.enable)
{
if (!error)
{
auto is_fully_upgraded (false);
auto is_fresh_db (false);
{
auto transaction (tx_begin_read ());
auto err = mdb_dbi_open (env.tx (transaction), "meta", 0, &meta);
is_fresh_db = err != MDB_SUCCESS;
if (err == MDB_SUCCESS)
{
is_fully_upgraded = (version_get (transaction) == version);
mdb_dbi_close (env, meta);
}
}
// Only open a write lock when upgrades are needed. This is because CLI commands
// open inactive nodes which can otherwise be locked here if there is a long write
// (can be a few minutes with the --fast_bootstrap flag for instance)
if (!is_fully_upgraded)
{
nano::network_constants network_constants;
if (!is_fresh_db)
{
if (!network_constants.is_dev_network ())
{
std::cout << "Upgrade in progress..." << std::endl;
}
if (backup_before_upgrade_a)
{
create_backup_file (env, path_a, logger_a);
}
}
auto needs_vacuuming = false;
{
auto transaction (tx_begin_write ());
open_databases (error, transaction, MDB_CREATE);
if (!error)
{
error |= do_upgrades (transaction, needs_vacuuming);
}
}
if (needs_vacuuming && !network_constants.is_dev_network ())
{
logger.always_log ("Preparing vacuum...");
auto vacuum_success = vacuum_after_upgrade (path_a, lmdb_config_a);
logger.always_log (vacuum_success ? "Vacuum succeeded." : "Failed to vacuum. (Optional) Ensure enough disk space is available for a copy of the database and try to vacuum after shutting down the node");
}
}
else
{
auto transaction (tx_begin_read ());
open_databases (error, transaction, 0);
}
}
}
bool nano::mdb_store::vacuum_after_upgrade (boost::filesystem::path const & path_a, nano::lmdb_config const & lmdb_config_a)
{
// Vacuum the database. This is not a required step and may actually fail if there isn't enough storage space.
auto vacuum_path = path_a.parent_path () / "vacuumed.ldb";
auto vacuum_success = copy_db (vacuum_path);
if (vacuum_success)
{
// Need to close the database to release the file handle
mdb_env_sync (env.environment, true);
mdb_env_close (env.environment);
env.environment = nullptr;
// Replace the ledger file with the vacuumed one
boost::filesystem::rename (vacuum_path, path_a);
// Set up the environment again
auto options = nano::mdb_env::options::make ()
.set_config (lmdb_config_a)
.set_use_no_mem_init (true);
env.init (error, path_a, options);
if (!error)
{
auto transaction (tx_begin_read ());
open_databases (error, transaction, 0);
}
}
else
{
// The vacuum file can be in an inconsistent state if there wasn't enough space to create it
boost::filesystem::remove (vacuum_path);
}
return vacuum_success;
}
void nano::mdb_store::serialize_mdb_tracker (boost::property_tree::ptree & json, std::chrono::milliseconds min_read_time, std::chrono::milliseconds min_write_time)
{
mdb_txn_tracker.serialize_json (json, min_read_time, min_write_time);
}
void nano::mdb_store::serialize_memory_stats (boost::property_tree::ptree & json)
{
MDB_stat stats;
auto status (mdb_env_stat (env.environment, &stats));
release_assert (status == 0);
json.put ("branch_pages", stats.ms_branch_pages);
json.put ("depth", stats.ms_depth);
json.put ("entries", stats.ms_entries);
json.put ("leaf_pages", stats.ms_leaf_pages);
json.put ("overflow_pages", stats.ms_overflow_pages);
json.put ("page_size", stats.ms_psize);
}
nano::write_transaction nano::mdb_store::tx_begin_write (std::vector<nano::tables> const &, std::vector<nano::tables> const &)
{
return env.tx_begin_write (create_txn_callbacks ());
}
nano::read_transaction nano::mdb_store::tx_begin_read () const
{
return env.tx_begin_read (create_txn_callbacks ());
}
std::string nano::mdb_store::vendor_get () const
{
return boost::str (boost::format ("LMDB %1%.%2%.%3%") % MDB_VERSION_MAJOR % MDB_VERSION_MINOR % MDB_VERSION_PATCH);
}
nano::mdb_txn_callbacks nano::mdb_store::create_txn_callbacks () const
{
nano::mdb_txn_callbacks mdb_txn_callbacks;
if (txn_tracking_enabled)
{
mdb_txn_callbacks.txn_start = ([&mdb_txn_tracker = mdb_txn_tracker] (const nano::transaction_impl * transaction_impl) {
mdb_txn_tracker.add (transaction_impl);
});
mdb_txn_callbacks.txn_end = ([&mdb_txn_tracker = mdb_txn_tracker] (const nano::transaction_impl * transaction_impl) {
mdb_txn_tracker.erase (transaction_impl);
});
}
return mdb_txn_callbacks;
}
void nano::mdb_store::open_databases (bool & error_a, nano::transaction const & transaction_a, unsigned flags)
{
error_a |= mdb_dbi_open (env.tx (transaction_a), "frontiers", flags, &frontiers) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "unchecked", flags, &unchecked) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "online_weight", flags, &online_weight) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "meta", flags, &meta) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "peers", flags, &peers) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "pruned", flags, &pruned) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "confirmation_height", flags, &confirmation_height) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "accounts", flags, &accounts_v0) != 0;
accounts = accounts_v0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "pending", flags, &pending_v0) != 0;
pending = pending_v0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "final_votes", flags, &final_votes) != 0;
auto version_l = version_get (transaction_a);
if (version_l < 19)
{
// These legacy (and state) block databases are no longer used, but need opening so they can be deleted during an upgrade
error_a |= mdb_dbi_open (env.tx (transaction_a), "send", flags, &send_blocks) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "receive", flags, &receive_blocks) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "open", flags, &open_blocks) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "change", flags, &change_blocks) != 0;
if (version_l >= 15)
{
error_a |= mdb_dbi_open (env.tx (transaction_a), "state_blocks", flags, &state_blocks) != 0;
state_blocks_v0 = state_blocks;
}
}
else
{
error_a |= mdb_dbi_open (env.tx (transaction_a), "blocks", MDB_CREATE, &blocks) != 0;
}
if (version_l < 16)
{
// The representation database is no longer used, but needs opening so that it can be deleted during an upgrade
error_a |= mdb_dbi_open (env.tx (transaction_a), "representation", flags, &representation) != 0;
}
if (version_l < 15)
{
// These databases are no longer used, but need opening so they can be deleted during an upgrade
error_a |= mdb_dbi_open (env.tx (transaction_a), "state", flags, &state_blocks_v0) != 0;
state_blocks = state_blocks_v0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "accounts_v1", flags, &accounts_v1) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "pending_v1", flags, &pending_v1) != 0;
error_a |= mdb_dbi_open (env.tx (transaction_a), "state_v1", flags, &state_blocks_v1) != 0;
}
}
bool nano::mdb_store::do_upgrades (nano::write_transaction & transaction_a, bool & needs_vacuuming)
{
auto error (false);
auto version_l = version_get (transaction_a);
switch (version_l)
{
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
logger.always_log (boost::str (boost::format ("The version of the ledger (%1%) is lower than the minimum (%2%) which is supported for upgrades. Either upgrade to a v19, v20 or v21 node first or delete the ledger.") % version_l % minimum_version));
error = true;
break;
case 14:
upgrade_v14_to_v15 (transaction_a);
[[fallthrough]];
// Upgrades to version 16, 17 & 18 are all part of the v21 node release
case 15:
upgrade_v15_to_v16 (transaction_a);
[[fallthrough]];
case 16:
upgrade_v16_to_v17 (transaction_a);
[[fallthrough]];
case 17:
upgrade_v17_to_v18 (transaction_a);
[[fallthrough]];
// Upgrades to version 19 & 20 are both part of the v22 node release
case 18:
upgrade_v18_to_v19 (transaction_a);
needs_vacuuming = true;
[[fallthrough]];
case 19:
upgrade_v19_to_v20 (transaction_a);
[[fallthrough]];
case 20:
upgrade_v20_to_v21 (transaction_a);
[[fallthrough]];
case 21:
break;
default:
logger.always_log (boost::str (boost::format ("The version of the ledger (%1%) is too high for this node") % version_l));
error = true;
break;
}
return error;
}
void nano::mdb_store::upgrade_v14_to_v15 (nano::write_transaction & transaction_a)
{
logger.always_log ("Preparing v14 to v15 database upgrade...");
std::vector<std::pair<nano::account, nano::account_info>> account_infos;
upgrade_counters account_counters (count (transaction_a, accounts_v0), count (transaction_a, accounts_v1));
account_infos.reserve (account_counters.before_v0 + account_counters.before_v1);
nano::mdb_merge_iterator<nano::account, nano::account_info_v14> i_account (transaction_a, accounts_v0, accounts_v1);
nano::mdb_merge_iterator<nano::account, nano::account_info_v14> n_account{};
for (; i_account != n_account; ++i_account)
{
nano::account account (i_account->first);
nano::account_info_v14 account_info_v14 (i_account->second);
// Upgrade rep block to representative account
auto rep_block = block_get_v14 (transaction_a, account_info_v14.rep_block);
release_assert (rep_block != nullptr);
account_infos.emplace_back (account, nano::account_info{ account_info_v14.head, rep_block->representative (), account_info_v14.open_block, account_info_v14.balance, account_info_v14.modified, account_info_v14.block_count, i_account.from_first_database ? nano::epoch::epoch_0 : nano::epoch::epoch_1 });
// Move confirmation height from account_info database to its own table
mdb_put (env.tx (transaction_a), confirmation_height, nano::mdb_val (account), nano::mdb_val (account_info_v14.confirmation_height), MDB_APPEND);
i_account.from_first_database ? ++account_counters.after_v0 : ++account_counters.after_v1;
}
logger.always_log ("Finished extracting confirmation height to its own database");
debug_assert (account_counters.are_equal ());
// No longer need accounts_v1, keep v0 but clear it
mdb_drop (env.tx (transaction_a), accounts_v1, 1);
mdb_drop (env.tx (transaction_a), accounts_v0, 0);
for (auto const & account_account_info_pair : account_infos)
{
auto const & account_info (account_account_info_pair.second);
mdb_put (env.tx (transaction_a), accounts, nano::mdb_val (account_account_info_pair.first), nano::mdb_val (account_info), MDB_APPEND);
}
logger.always_log ("Epoch merge upgrade: Finished accounts, now doing state blocks");
account_infos.clear ();
// Have to create a new database as we are iterating over the existing ones and want to use MDB_APPEND for quick insertion
MDB_dbi state_blocks_new;
mdb_dbi_open (env.tx (transaction_a), "state_blocks", MDB_CREATE, &state_blocks_new);
upgrade_counters state_counters (count (transaction_a, state_blocks_v0), count (transaction_a, state_blocks_v1));
nano::mdb_merge_iterator<nano::block_hash, nano::state_block_w_sideband_v14> i_state (transaction_a, state_blocks_v0, state_blocks_v1);
nano::mdb_merge_iterator<nano::block_hash, nano::state_block_w_sideband_v14> n_state{};
auto num = 0u;
for (; i_state != n_state; ++i_state, ++num)
{
nano::block_hash hash (i_state->first);
nano::state_block_w_sideband_v14 state_block_w_sideband_v14 (i_state->second);
auto & sideband_v14 = state_block_w_sideband_v14.sideband;
nano::block_sideband_v18 sideband (sideband_v14.account, sideband_v14.successor, sideband_v14.balance, sideband_v14.height, sideband_v14.timestamp, i_state.from_first_database ? nano::epoch::epoch_0 : nano::epoch::epoch_1, false, false, false);
// Write these out
std::vector<uint8_t> data;
{
nano::vectorstream stream (data);
state_block_w_sideband_v14.state_block->serialize (stream);
sideband.serialize (stream, sideband_v14.type);
}
nano::mdb_val value{ data.size (), (void *)data.data () };
auto s = mdb_put (env.tx (transaction_a), state_blocks_new, nano::mdb_val (hash), value, MDB_APPEND);
release_assert_success (s);
// Every so often output to the log to indicate progress
constexpr auto output_cutoff = 1000000;
if (num % output_cutoff == 0 && num != 0)
{
logger.always_log (boost::str (boost::format ("Database epoch merge upgrade %1% million state blocks upgraded") % (num / output_cutoff)));
}
i_state.from_first_database ? ++state_counters.after_v0 : ++state_counters.after_v1;
}
debug_assert (state_counters.are_equal ());
logger.always_log ("Epoch merge upgrade: Finished state blocks, now doing pending blocks");
state_blocks = state_blocks_new;
// No longer need states v0/v1 databases
mdb_drop (env.tx (transaction_a), state_blocks_v1, 1);
mdb_drop (env.tx (transaction_a), state_blocks_v0, 1);
state_blocks_v0 = state_blocks;
upgrade_counters pending_counters (count (transaction_a, pending_v0), count (transaction_a, pending_v1));
std::vector<std::pair<nano::pending_key, nano::pending_info>> pending_infos;
pending_infos.reserve (pending_counters.before_v0 + pending_counters.before_v1);
nano::mdb_merge_iterator<nano::pending_key, nano::pending_info_v14> i_pending (transaction_a, pending_v0, pending_v1);
nano::mdb_merge_iterator<nano::pending_key, nano::pending_info_v14> n_pending{};
for (; i_pending != n_pending; ++i_pending)
{
nano::pending_info_v14 info (i_pending->second);
pending_infos.emplace_back (nano::pending_key (i_pending->first), nano::pending_info{ info.source, info.amount, i_pending.from_first_database ? nano::epoch::epoch_0 : nano::epoch::epoch_1 });
i_pending.from_first_database ? ++pending_counters.after_v0 : ++pending_counters.after_v1;
}
debug_assert (pending_counters.are_equal ());
// No longer need the pending v1 table
mdb_drop (env.tx (transaction_a), pending_v1, 1);
mdb_drop (env.tx (transaction_a), pending_v0, 0);
for (auto const & pending_key_pending_info_pair : pending_infos)
{
mdb_put (env.tx (transaction_a), pending, nano::mdb_val (pending_key_pending_info_pair.first), nano::mdb_val (pending_key_pending_info_pair.second), MDB_APPEND);
}
version_put (transaction_a, 15);
logger.always_log ("Finished epoch merge upgrade");
}
void nano::mdb_store::upgrade_v15_to_v16 (nano::write_transaction const & transaction_a)
{
// Representation table is no longer used
debug_assert (representation != 0);
if (representation != 0)
{
auto status (mdb_drop (env.tx (transaction_a), representation, 1));
release_assert (status == MDB_SUCCESS);
representation = 0;
}
version_put (transaction_a, 16);
}
void nano::mdb_store::upgrade_v16_to_v17 (nano::write_transaction const & transaction_a)
{
logger.always_log ("Preparing v16 to v17 database upgrade...");
auto account_info_i = accounts_begin (transaction_a);
auto account_info_n = accounts_end ();
// Set the confirmed frontier for each account in the confirmation height table
std::vector<std::pair<nano::account, nano::confirmation_height_info>> confirmation_height_infos;
auto num = 0u;
for (nano::mdb_iterator<nano::account, uint64_t> i (transaction_a, confirmation_height), n (nano::mdb_iterator<nano::account, uint64_t>{}); i != n; ++i, ++account_info_i, ++num)
{
nano::account account (i->first);
uint64_t confirmation_height (i->second);
// Check account hashes matches both the accounts table and confirmation height table
debug_assert (account == account_info_i->first);
auto const & account_info = account_info_i->second;
if (confirmation_height == 0)
{
confirmation_height_infos.emplace_back (account, confirmation_height_info{ 0, nano::block_hash (0) });
}
else
{
if (account_info_i->second.block_count / 2 >= confirmation_height)
{
// The confirmation height of the account is closer to the bottom of the chain, so start there and work up
auto block = block_get_v18 (transaction_a, account_info.open_block);
debug_assert (block);
auto height = 1;
while (height != confirmation_height)
{
block = block_get_v18 (transaction_a, block->sideband ().successor);
debug_assert (block);
++height;
}
debug_assert (block->sideband ().height == confirmation_height);
confirmation_height_infos.emplace_back (account, confirmation_height_info{ confirmation_height, block->hash () });
}
else
{
// The confirmation height of the account is closer to the top of the chain so start there and work down
auto block = block_get_v18 (transaction_a, account_info.head);
auto height = block->sideband ().height;
while (height != confirmation_height)
{
block = block_get_v18 (transaction_a, block->previous ());
debug_assert (block);
--height;
}
confirmation_height_infos.emplace_back (account, confirmation_height_info{ confirmation_height, block->hash () });
}
}
// Every so often output to the log to indicate progress (every 200k accounts)
constexpr auto output_cutoff = 200000;
if (num % output_cutoff == 0 && num != 0)
{
logger.always_log (boost::str (boost::format ("Confirmation height frontier set for %1%00k accounts") % ((num / output_cutoff) * 2)));
}
}
// Clear it then append
auto status (mdb_drop (env.tx (transaction_a), confirmation_height, 0));
release_assert_success (status);
for (auto const & confirmation_height_info_pair : confirmation_height_infos)
{
mdb_put (env.tx (transaction_a), confirmation_height, nano::mdb_val (confirmation_height_info_pair.first), nano::mdb_val (confirmation_height_info_pair.second), MDB_APPEND);
}
version_put (transaction_a, 17);
logger.always_log ("Finished upgrading confirmation height frontiers");
}
void nano::mdb_store::upgrade_v17_to_v18 (nano::write_transaction const & transaction_a)
{
logger.always_log ("Preparing v17 to v18 database upgrade...");
auto count_pre (count (transaction_a, state_blocks));
auto num = 0u;
for (nano::mdb_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::state_block>> state_i (transaction_a, state_blocks), state_n{}; state_i != state_n; ++state_i, ++num)
{
nano::block_w_sideband_v18<nano::state_block> block_w_sideband (state_i->second);
auto & block (block_w_sideband.block);
auto & sideband (block_w_sideband.sideband);
bool is_send{ false };
bool is_receive{ false };
bool is_epoch{ false };
nano::amount prev_balance (0);
if (!block->hashables.previous.is_zero ())
{
prev_balance = block_balance_v18 (transaction_a, block->hashables.previous);
}
if (block->hashables.balance == prev_balance && network_params.ledger.epochs.is_epoch_link (block->hashables.link))
{
is_epoch = true;
}
else if (block->hashables.balance < prev_balance)
{
is_send = true;
}
else if (!block->hashables.link.is_zero ())
{
is_receive = true;
}
nano::block_sideband_v18 new_sideband (sideband.account, sideband.successor, sideband.balance, sideband.height, sideband.timestamp, sideband.details.epoch, is_send, is_receive, is_epoch);
// Write these out
std::vector<uint8_t> data;
{
nano::vectorstream stream (data);
block->serialize (stream);
new_sideband.serialize (stream, block->type ());
}
nano::mdb_val value{ data.size (), (void *)data.data () };
auto s = mdb_cursor_put (state_i.cursor, state_i->first, value, MDB_CURRENT);
release_assert_success (s);
// Every so often output to the log to indicate progress
constexpr auto output_cutoff = 1000000;
if (num > 0 && num % output_cutoff == 0)
{
logger.always_log (boost::str (boost::format ("Database sideband upgrade %1% million state blocks upgraded (out of %2%)") % (num / output_cutoff) % count_pre));
}
}
auto count_post (count (transaction_a, state_blocks));
release_assert (count_pre == count_post);
version_put (transaction_a, 18);
logger.always_log ("Finished upgrading the sideband");
}
void nano::mdb_store::upgrade_v18_to_v19 (nano::write_transaction const & transaction_a)
{
logger.always_log ("Preparing v18 to v19 database upgrade...");
auto count_pre (count (transaction_a, state_blocks) + count (transaction_a, send_blocks) + count (transaction_a, receive_blocks) + count (transaction_a, change_blocks) + count (transaction_a, open_blocks));
// Combine in order of likeliness of counts
std::map<nano::block_hash, nano::block_w_sideband> legacy_open_receive_change_blocks;
for (auto i (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::change_block>> (std::make_unique<nano::mdb_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::change_block>>> (transaction_a, change_blocks))), n (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::change_block>> (nullptr)); i != n; ++i)
{
nano::block_sideband_v18 const & old_sideband (i->second.sideband);
nano::block_sideband new_sideband (old_sideband.account, old_sideband.successor, old_sideband.balance, old_sideband.height, old_sideband.timestamp, nano::epoch::epoch_0, false, false, false, nano::epoch::epoch_0);
legacy_open_receive_change_blocks[i->first] = { nano::block_w_sideband{ i->second.block, new_sideband } };
}
for (auto i (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::open_block>> (std::make_unique<nano::mdb_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::open_block>>> (transaction_a, open_blocks))), n (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::open_block>> (nullptr)); i != n; ++i)
{
nano::block_sideband_v18 const & old_sideband (i->second.sideband);
nano::block_sideband new_sideband (old_sideband.account, old_sideband.successor, old_sideband.balance, old_sideband.height, old_sideband.timestamp, nano::epoch::epoch_0, false, false, false, nano::epoch::epoch_0);
legacy_open_receive_change_blocks[i->first] = { nano::block_w_sideband{ i->second.block, new_sideband } };
}
for (auto i (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::receive_block>> (std::make_unique<nano::mdb_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::receive_block>>> (transaction_a, receive_blocks))), n (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::receive_block>> (nullptr)); i != n; ++i)
{
nano::block_sideband_v18 const & old_sideband (i->second.sideband);
nano::block_sideband new_sideband (old_sideband.account, old_sideband.successor, old_sideband.balance, old_sideband.height, old_sideband.timestamp, nano::epoch::epoch_0, false, false, false, nano::epoch::epoch_0);
legacy_open_receive_change_blocks[i->first] = { nano::block_w_sideband{ i->second.block, new_sideband } };
}
release_assert (!mdb_drop (env.tx (transaction_a), receive_blocks, 1));
receive_blocks = 0;
release_assert (!mdb_drop (env.tx (transaction_a), open_blocks, 1));
open_blocks = 0;
release_assert (!mdb_drop (env.tx (transaction_a), change_blocks, 1));
change_blocks = 0;
logger.always_log ("Write legacy open/receive/change to new format");
MDB_dbi temp_legacy_open_receive_change_blocks;
{
mdb_dbi_open (env.tx (transaction_a), "temp_legacy_open_receive_change_blocks", MDB_CREATE, &temp_legacy_open_receive_change_blocks);
for (auto const & legacy_block : legacy_open_receive_change_blocks)
{
std::vector<uint8_t> data;
{
nano::vectorstream stream (data);
nano::serialize_block (stream, *legacy_block.second.block);
legacy_block.second.sideband.serialize (stream, legacy_block.second.block->type ());
}
nano::mdb_val value{ data.size (), (void *)data.data () };
auto s = mdb_put (env.tx (transaction_a), temp_legacy_open_receive_change_blocks, nano::mdb_val (legacy_block.first), value, MDB_APPEND);
release_assert_success (s);
}
}
logger.always_log ("Write legacy send to new format");
// Write send blocks to a new table (this was not done in memory as it would push us above memory requirements)
MDB_dbi temp_legacy_send_blocks;
{
mdb_dbi_open (env.tx (transaction_a), "temp_legacy_send_blocks", MDB_CREATE, &temp_legacy_send_blocks);
for (auto i (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::send_block>> (std::make_unique<nano::mdb_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::send_block>>> (transaction_a, send_blocks))), n (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::send_block>> (nullptr)); i != n; ++i)
{
auto const & block_w_sideband_v18 (i->second);
std::vector<uint8_t> data;
{
nano::vectorstream stream (data);
nano::serialize_block (stream, *block_w_sideband_v18.block);
block_w_sideband_v18.sideband.serialize (stream, nano::block_type::send); // Equal to new version for legacy blocks
}
nano::mdb_val value{ data.size (), (void *)data.data () };
auto s = mdb_put (env.tx (transaction_a), temp_legacy_send_blocks, nano::mdb_val (i->first), value, MDB_APPEND);
release_assert_success (s);
}
}
release_assert (!mdb_drop (env.tx (transaction_a), send_blocks, 1));
send_blocks = 0;
logger.always_log ("Merge legacy open/receive/change with legacy send blocks");
MDB_dbi temp_legacy_send_open_receive_change_blocks;
{
mdb_dbi_open (env.tx (transaction_a), "temp_legacy_send_open_receive_change_blocks", MDB_CREATE, &temp_legacy_send_open_receive_change_blocks);
nano::mdb_merge_iterator<nano::block_hash, nano::block_w_sideband> i (transaction_a, temp_legacy_open_receive_change_blocks, temp_legacy_send_blocks);
nano::mdb_merge_iterator<nano::block_hash, nano::block_w_sideband> n{};
for (; i != n; ++i)
{
auto s = mdb_put (env.tx (transaction_a), temp_legacy_send_open_receive_change_blocks, nano::mdb_val (i->first), nano::mdb_val (i->second), MDB_APPEND);
release_assert_success (s);
}
// Delete tables
mdb_drop (env.tx (transaction_a), temp_legacy_send_blocks, 1);
mdb_drop (env.tx (transaction_a), temp_legacy_open_receive_change_blocks, 1);
}
logger.always_log ("Write state blocks to new format");
// Write state blocks to a new table (this was not done in memory as it would push us above memory requirements)
MDB_dbi temp_state_blocks;
{
auto type_state (nano::block_type::state);
mdb_dbi_open (env.tx (transaction_a), "temp_state_blocks", MDB_CREATE, &temp_state_blocks);
for (auto i (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::state_block>> (std::make_unique<nano::mdb_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::state_block>>> (transaction_a, state_blocks))), n (nano::store_iterator<nano::block_hash, nano::block_w_sideband_v18<nano::state_block>> (nullptr)); i != n; ++i)
{
auto const & block_w_sideband_v18 (i->second);
nano::block_sideband_v18 const & old_sideband (block_w_sideband_v18.sideband);
nano::epoch source_epoch (nano::epoch::epoch_0);
// Source block v18 epoch
if (old_sideband.details.is_receive)
{
auto db_val (block_raw_get_by_type_v18 (transaction_a, block_w_sideband_v18.block->link ().as_block_hash (), type_state));
if (db_val.is_initialized ())
{
nano::bufferstream stream (reinterpret_cast<uint8_t const *> (db_val.get ().data ()), db_val.get ().size ());
auto source_block (nano::deserialize_block (stream, type_state));
release_assert (source_block != nullptr);
nano::block_sideband_v18 source_sideband;
auto error (source_sideband.deserialize (stream, type_state));
release_assert (!error);
source_epoch = source_sideband.details.epoch;
}
}
nano::block_sideband new_sideband (old_sideband.account, old_sideband.successor, old_sideband.balance, old_sideband.height, old_sideband.timestamp, old_sideband.details.epoch, old_sideband.details.is_send, old_sideband.details.is_receive, old_sideband.details.is_epoch, source_epoch);
std::vector<uint8_t> data;
{
nano::vectorstream stream (data);
nano::serialize_block (stream, *block_w_sideband_v18.block);
new_sideband.serialize (stream, nano::block_type::state);
}
nano::mdb_val value{ data.size (), (void *)data.data () };
auto s = mdb_put (env.tx (transaction_a), temp_state_blocks, nano::mdb_val (i->first), value, MDB_APPEND);
release_assert_success (s);
}
}
release_assert (!mdb_drop (env.tx (transaction_a), state_blocks, 1));
state_blocks = 0;
logger.always_log ("Merging all legacy blocks with state blocks");
// Merge all legacy blocks with state blocks into the final table
nano::mdb_merge_iterator<nano::block_hash, nano::block_w_sideband> i (transaction_a, temp_legacy_send_open_receive_change_blocks, temp_state_blocks);
nano::mdb_merge_iterator<nano::block_hash, nano::block_w_sideband> n{};
mdb_dbi_open (env.tx (transaction_a), "blocks", MDB_CREATE, &blocks);
for (; i != n; ++i)
{
auto s = mdb_put (env.tx (transaction_a), blocks, nano::mdb_val (i->first), nano::mdb_val (i->second), MDB_APPEND);
release_assert_success (s);
}
// Delete tables
mdb_drop (env.tx (transaction_a), temp_legacy_send_open_receive_change_blocks, 1);
mdb_drop (env.tx (transaction_a), temp_state_blocks, 1);
auto count_post (count (transaction_a, blocks));
release_assert (count_pre == count_post);
MDB_dbi vote{ 0 };
release_assert (!mdb_dbi_open (env.tx (transaction_a), "vote", MDB_CREATE, &vote));
release_assert (!mdb_drop (env.tx (transaction_a), vote, 1));
version_put (transaction_a, 19);
logger.always_log ("Finished upgrading all blocks to new blocks database");
}
void nano::mdb_store::upgrade_v19_to_v20 (nano::write_transaction const & transaction_a)
{
logger.always_log ("Preparing v19 to v20 database upgrade...");
mdb_dbi_open (env.tx (transaction_a), "pruned", MDB_CREATE, &pruned);
version_put (transaction_a, 20);
logger.always_log ("Finished creating new pruned table");
}
void nano::mdb_store::upgrade_v20_to_v21 (nano::write_transaction const & transaction_a)
{
logger.always_log ("Preparing v20 to v21 database upgrade...");
mdb_dbi_open (env.tx (transaction_a), "final_votes", MDB_CREATE, &final_votes);
version_put (transaction_a, 21);
logger.always_log ("Finished creating new final_vote table");
}
/** Takes a filepath, appends '_backup_<timestamp>' to the end (but before any extension) and saves that file in the same directory */
void nano::mdb_store::create_backup_file (nano::mdb_env & env_a, boost::filesystem::path const & filepath_a, nano::logger_mt & logger_a)
{
auto extension = filepath_a.extension ();
auto filename_without_extension = filepath_a.filename ().replace_extension ("");
auto orig_filepath = filepath_a;
auto & backup_path = orig_filepath.remove_filename ();
auto backup_filename = filename_without_extension;
backup_filename += "_backup_";
backup_filename += std::to_string (std::chrono::system_clock::now ().time_since_epoch ().count ());
backup_filename += extension;
auto backup_filepath = backup_path / backup_filename;
auto start_message (boost::str (boost::format ("Performing %1% backup before database upgrade...") % filepath_a.filename ()));
logger_a.always_log (start_message);
std::cout << start_message << std::endl;
auto error (mdb_env_copy (env_a, backup_filepath.string ().c_str ()));
if (error)
{
auto error_message (boost::str (boost::format ("%1% backup failed") % filepath_a.filename ()));
logger_a.always_log (error_message);
std::cerr << error_message << std::endl;
std::exit (1);
}
else
{
auto success_message (boost::str (boost::format ("Backup created: %1%") % backup_filename));
logger_a.always_log (success_message);
std::cout << success_message << std::endl;
}
}
std::vector<nano::unchecked_info> nano::mdb_store::unchecked_get (nano::transaction const & transaction_a, nano::block_hash const & hash_a)
{
std::vector<nano::unchecked_info> result;
for (auto i (unchecked_begin (transaction_a, nano::unchecked_key (hash_a, 0))), n (unchecked_end ()); i != n && i->first.key () == hash_a; ++i)
{
nano::unchecked_info const & unchecked_info (i->second);
result.push_back (unchecked_info);
}
return result;
}
void nano::mdb_store::version_put (nano::write_transaction const & transaction_a, int version_a)
{
nano::uint256_union version_key (1);
nano::uint256_union version_value (version_a);
auto status (mdb_put (env.tx (transaction_a), meta, nano::mdb_val (version_key), nano::mdb_val (version_value), 0));
release_assert_success (status);
}
bool nano::mdb_store::exists (nano::transaction const & transaction_a, tables table_a, nano::mdb_val const & key_a) const
{
nano::mdb_val junk;
auto status = get (transaction_a, table_a, key_a, junk);
release_assert (status == MDB_SUCCESS || status == MDB_NOTFOUND);
return (status == MDB_SUCCESS);
}
int nano::mdb_store::get (nano::transaction const & transaction_a, tables table_a, nano::mdb_val const & key_a, nano::mdb_val & value_a) const
{
return mdb_get (env.tx (transaction_a), table_to_dbi (table_a), key_a, value_a);
}
int nano::mdb_store::put (nano::write_transaction const & transaction_a, tables table_a, nano::mdb_val const & key_a, const nano::mdb_val & value_a) const
{
return (mdb_put (env.tx (transaction_a), table_to_dbi (table_a), key_a, value_a, 0));
}
int nano::mdb_store::del (nano::write_transaction const & transaction_a, tables table_a, nano::mdb_val const & key_a) const
{
return (mdb_del (env.tx (transaction_a), table_to_dbi (table_a), key_a, nullptr));
}
int nano::mdb_store::drop (nano::write_transaction const & transaction_a, tables table_a)
{
return clear (transaction_a, table_to_dbi (table_a));
}
int nano::mdb_store::clear (nano::write_transaction const & transaction_a, MDB_dbi handle_a)
{
return mdb_drop (env.tx (transaction_a), handle_a, 0);
}
uint64_t nano::mdb_store::count (nano::transaction const & transaction_a, tables table_a) const
{
return count (transaction_a, table_to_dbi (table_a));
}
uint64_t nano::mdb_store::count (nano::transaction const & transaction_a, MDB_dbi db_a) const
{
MDB_stat stats;
auto status (mdb_stat (env.tx (transaction_a), db_a, &stats));
release_assert_success (status);
return (stats.ms_entries);
}
MDB_dbi nano::mdb_store::table_to_dbi (tables table_a) const
{
switch (table_a)
{
case tables::frontiers:
return frontiers;
case tables::accounts:
return accounts;
case tables::blocks:
return blocks;
case tables::pending:
return pending;
case tables::unchecked:
return unchecked;
case tables::online_weight:
return online_weight;
case tables::meta:
return meta;
case tables::peers:
return peers;
case tables::pruned:
return pruned;
case tables::confirmation_height:
return confirmation_height;
case tables::final_votes:
return final_votes;
default:
release_assert (false);
return peers;
}
}
bool nano::mdb_store::not_found (int status) const
{
return (status_code_not_found () == status);
}
bool nano::mdb_store::success (int status) const
{
return (MDB_SUCCESS == status);
}
int nano::mdb_store::status_code_not_found () const
{
return MDB_NOTFOUND;
}
std::string nano::mdb_store::error_string (int status) const
{
return mdb_strerror (status);
}
bool nano::mdb_store::copy_db (boost::filesystem::path const & destination_file)
{
return !mdb_env_copy2 (env.environment, destination_file.string ().c_str (), MDB_CP_COMPACT);
}
void nano::mdb_store::rebuild_db (nano::write_transaction const & transaction_a)
{
// Tables with uint256_union key
std::vector<MDB_dbi> tables = { accounts, blocks, pruned, confirmation_height };
for (auto const & table : tables)
{
MDB_dbi temp;
mdb_dbi_open (env.tx (transaction_a), "temp_table", MDB_CREATE, &temp);
// Copy all values to temporary table
for (auto i (nano::store_iterator<nano::uint256_union, nano::mdb_val> (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::mdb_val>> (transaction_a, table))), n (nano::store_iterator<nano::uint256_union, nano::mdb_val> (nullptr)); i != n; ++i)
{
auto s = mdb_put (env.tx (transaction_a), temp, nano::mdb_val (i->first), i->second, MDB_APPEND);
release_assert_success (s);
}
release_assert (count (transaction_a, table) == count (transaction_a, temp));
// Clear existing table
mdb_drop (env.tx (transaction_a), table, 0);
// Put values from copy
for (auto i (nano::store_iterator<nano::uint256_union, nano::mdb_val> (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::mdb_val>> (transaction_a, temp))), n (nano::store_iterator<nano::uint256_union, nano::mdb_val> (nullptr)); i != n; ++i)
{
auto s = mdb_put (env.tx (transaction_a), table, nano::mdb_val (i->first), i->second, MDB_APPEND);
release_assert_success (s);
}
release_assert (count (transaction_a, table) == count (transaction_a, temp));
// Remove temporary table
mdb_drop (env.tx (transaction_a), temp, 1);
}
// Pending table
{
MDB_dbi temp;
mdb_dbi_open (env.tx (transaction_a), "temp_table", MDB_CREATE, &temp);
// Copy all values to temporary table
for (auto i (nano::store_iterator<nano::pending_key, nano::pending_info> (std::make_unique<nano::mdb_iterator<nano::pending_key, nano::pending_info>> (transaction_a, pending))), n (nano::store_iterator<nano::pending_key, nano::pending_info> (nullptr)); i != n; ++i)
{
auto s = mdb_put (env.tx (transaction_a), temp, nano::mdb_val (i->first), nano::mdb_val (i->second), MDB_APPEND);
release_assert_success (s);
}
release_assert (count (transaction_a, pending) == count (transaction_a, temp));
mdb_drop (env.tx (transaction_a), pending, 0);
// Put values from copy
for (auto i (nano::store_iterator<nano::pending_key, nano::pending_info> (std::make_unique<nano::mdb_iterator<nano::pending_key, nano::pending_info>> (transaction_a, temp))), n (nano::store_iterator<nano::pending_key, nano::pending_info> (nullptr)); i != n; ++i)
{
auto s = mdb_put (env.tx (transaction_a), pending, nano::mdb_val (i->first), nano::mdb_val (i->second), MDB_APPEND);
release_assert_success (s);
}
release_assert (count (transaction_a, pending) == count (transaction_a, temp));
mdb_drop (env.tx (transaction_a), temp, 1);
}
}
bool nano::mdb_store::init_error () const
{
return error;
}
std::shared_ptr<nano::block> nano::mdb_store::block_get_v18 (nano::transaction const & transaction_a, nano::block_hash const & hash_a) const
{
nano::block_type type;
auto value (block_raw_get_v18 (transaction_a, hash_a, type));
std::shared_ptr<nano::block> result;
if (value.size () != 0)
{
nano::bufferstream stream (reinterpret_cast<uint8_t const *> (value.data ()), value.size ());
result = nano::deserialize_block (stream, type);
release_assert (result != nullptr);
nano::block_sideband_v18 sideband;
auto error = (sideband.deserialize (stream, type));
release_assert (!error);
result->sideband_set (nano::block_sideband (sideband.account, sideband.successor, sideband.balance, sideband.height, sideband.timestamp, sideband.details.epoch, sideband.details.is_send, sideband.details.is_receive, sideband.details.is_epoch, nano::epoch::epoch_0));
}
return result;
}
nano::mdb_val nano::mdb_store::block_raw_get_v18 (nano::transaction const & transaction_a, nano::block_hash const & hash_a, nano::block_type & type_a) const
{
nano::mdb_val result;
// Table lookups are ordered by match probability
nano::block_type block_types[]{ nano::block_type::state, nano::block_type::send, nano::block_type::receive, nano::block_type::open, nano::block_type::change };
for (auto current_type : block_types)
{
auto db_val (block_raw_get_by_type_v18 (transaction_a, hash_a, current_type));
if (db_val.is_initialized ())
{
type_a = current_type;
result = db_val.get ();
break;
}
}
return result;
}
boost::optional<nano::mdb_val> nano::mdb_store::block_raw_get_by_type_v18 (nano::transaction const & transaction_a, nano::block_hash const & hash_a, nano::block_type & type_a) const
{
nano::mdb_val value;
nano::mdb_val hash (hash_a);
int status = status_code_not_found ();
switch (type_a)
{
case nano::block_type::send:
{
status = mdb_get (env.tx (transaction_a), send_blocks, hash, value);
break;
}
case nano::block_type::receive:
{
status = mdb_get (env.tx (transaction_a), receive_blocks, hash, value);
break;
}
case nano::block_type::open:
{
status = mdb_get (env.tx (transaction_a), open_blocks, hash, value);
break;
}
case nano::block_type::change:
{
status = mdb_get (env.tx (transaction_a), change_blocks, hash, value);
break;
}
case nano::block_type::state:
{
status = mdb_get (env.tx (transaction_a), state_blocks, hash, value);
break;
}
case nano::block_type::invalid:
case nano::block_type::not_a_block:
{
break;
}
}
release_assert (success (status) || not_found (status));
boost::optional<nano::mdb_val> result;
if (success (status))
{
result = value;
}
return result;
}
nano::uint128_t nano::mdb_store::block_balance_v18 (nano::transaction const & transaction_a, nano::block_hash const & hash_a) const
{
auto block (block_get_v18 (transaction_a, hash_a));
release_assert (block);
nano::uint128_t result (block_balance_calculated (block));
return result;
}
// All the v14 functions below are only needed during upgrades
size_t nano::mdb_store::block_successor_offset_v14 (nano::transaction const & transaction_a, size_t entry_size_a, nano::block_type type_a) const
{
return entry_size_a - nano::block_sideband_v14::size (type_a);
}
nano::block_hash nano::mdb_store::block_successor_v14 (nano::transaction const & transaction_a, nano::block_hash const & hash_a) const
{
nano::block_type type;
auto value (block_raw_get_v14 (transaction_a, hash_a, type));
nano::block_hash result;
if (value.size () != 0)
{
debug_assert (value.size () >= result.bytes.size ());
nano::bufferstream stream (reinterpret_cast<uint8_t const *> (value.data ()) + block_successor_offset_v14 (transaction_a, value.size (), type), result.bytes.size ());
auto error (nano::try_read (stream, result.bytes));
(void)error;
debug_assert (!error);
}
else
{
result.clear ();
}
return result;
}
nano::mdb_val nano::mdb_store::block_raw_get_v14 (nano::transaction const & transaction_a, nano::block_hash const & hash_a, nano::block_type & type_a, bool * is_state_v1) const
{
nano::mdb_val result;
// Table lookups are ordered by match probability
nano::block_type block_types[]{ nano::block_type::state, nano::block_type::send, nano::block_type::receive, nano::block_type::open, nano::block_type::change };
for (auto current_type : block_types)
{
auto db_val (block_raw_get_by_type_v14 (transaction_a, hash_a, current_type, is_state_v1));
if (db_val.is_initialized ())
{
type_a = current_type;
result = db_val.get ();
break;
}
}
return result;
}
boost::optional<nano::mdb_val> nano::mdb_store::block_raw_get_by_type_v14 (nano::transaction const & transaction_a, nano::block_hash const & hash_a, nano::block_type & type_a, bool * is_state_v1) const
{
nano::mdb_val value;
nano::mdb_val hash (hash_a);
int status = status_code_not_found ();
switch (type_a)
{
case nano::block_type::send:
{
status = mdb_get (env.tx (transaction_a), send_blocks, hash, value);
break;
}
case nano::block_type::receive:
{
status = mdb_get (env.tx (transaction_a), receive_blocks, hash, value);
break;
}
case nano::block_type::open:
{
status = mdb_get (env.tx (transaction_a), open_blocks, hash, value);
break;
}
case nano::block_type::change:
{
status = mdb_get (env.tx (transaction_a), change_blocks, hash, value);
break;
}
case nano::block_type::state:
{
status = mdb_get (env.tx (transaction_a), state_blocks_v1, hash, value);
if (is_state_v1 != nullptr)
{
*is_state_v1 = success (status);
}
if (not_found (status))
{
status = mdb_get (env.tx (transaction_a), state_blocks_v0, hash, value);
}
break;
}
case nano::block_type::invalid:
case nano::block_type::not_a_block:
{
break;
}
}
release_assert (success (status) || not_found (status));
boost::optional<nano::mdb_val> result;
if (success (status))
{
result = value;
}
return result;
}
std::shared_ptr<nano::block> nano::mdb_store::block_get_v14 (nano::transaction const & transaction_a, nano::block_hash const & hash_a, nano::block_sideband_v14 * sideband_a, bool * is_state_v1) const
{
nano::block_type type;
auto value (block_raw_get_v14 (transaction_a, hash_a, type, is_state_v1));
std::shared_ptr<nano::block> result;
if (value.size () != 0)
{
nano::bufferstream stream (reinterpret_cast<uint8_t const *> (value.data ()), value.size ());
result = nano::deserialize_block (stream, type);
debug_assert (result != nullptr);
if (sideband_a)
{
sideband_a->type = type;
bool error = sideband_a->deserialize (stream);
(void)error;
debug_assert (!error);
}
}
return result;
}
nano::mdb_store::upgrade_counters::upgrade_counters (uint64_t count_before_v0, uint64_t count_before_v1) :
before_v0 (count_before_v0),
before_v1 (count_before_v1)
{
}
bool nano::mdb_store::upgrade_counters::are_equal () const
{
return (before_v0 == after_v0) && (before_v1 == after_v1);
}
unsigned nano::mdb_store::max_block_write_batch_num () const
{
return std::numeric_limits<unsigned>::max ();
}
// Explicitly instantiate
template class nano::block_store_partial<MDB_val, nano::mdb_store>;
| 1 | 16,715 | The way it was implemented, needed to pass the template values. Please check whether we want this. | nanocurrency-nano-node | cpp |
@@ -36,7 +36,8 @@ var daemonCmd = &cmds.Command{
cmdkit.BoolOption(OfflineMode, "start the node without networking"),
cmdkit.BoolOption(ELStdout),
cmdkit.BoolOption(IsRelay, "advertise and allow filecoin network traffic to be relayed through this node"),
- cmdkit.StringOption(BlockTime, "time a node waits before trying to mine the next block").WithDefault(clock.DefaultEpochDuration.String()),
+ cmdkit.StringOption(BlockTime, "period a node waits between mining successive blocks").WithDefault(clock.DefaultEpochDuration.String()),
+ cmdkit.StringOption(PropagationDelay, "time a node waits after the start of an epoch for blocks to arrive").WithDefault(clock.DefaultPropagationDelay.String()),
},
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return daemonRun(req, re) | 1 | package commands
import (
"context"
"fmt"
"net/http"
_ "net/http/pprof" // nolint: golint
"os"
"os/signal"
"syscall"
"time"
cmdkit "github.com/ipfs/go-ipfs-cmdkit"
cmds "github.com/ipfs/go-ipfs-cmds"
cmdhttp "github.com/ipfs/go-ipfs-cmds/http"
writer "github.com/ipfs/go-log/writer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr-net"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/node"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/internal/pkg/clock"
"github.com/filecoin-project/go-filecoin/internal/pkg/config"
"github.com/filecoin-project/go-filecoin/internal/pkg/journal"
"github.com/filecoin-project/go-filecoin/internal/pkg/repo"
)
var daemonCmd = &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "Start a long-running daemon process",
},
Options: []cmdkit.Option{
cmdkit.StringOption(SwarmAddress, "multiaddress to listen on for filecoin network connections"),
cmdkit.StringOption(SwarmPublicRelayAddress, "public multiaddress for routing circuit relay traffic. Necessary for relay nodes to provide this if they are not publically dialable"),
cmdkit.BoolOption(OfflineMode, "start the node without networking"),
cmdkit.BoolOption(ELStdout),
cmdkit.BoolOption(IsRelay, "advertise and allow filecoin network traffic to be relayed through this node"),
cmdkit.StringOption(BlockTime, "time a node waits before trying to mine the next block").WithDefault(clock.DefaultEpochDuration.String()),
},
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return daemonRun(req, re)
},
}
func daemonRun(req *cmds.Request, re cmds.ResponseEmitter) error {
// third precedence is config file.
rep, err := getRepo(req)
if err != nil {
return err
}
config := rep.Config()
// second highest precedence is env vars.
if envAPI := os.Getenv("FIL_API"); envAPI != "" {
config.API.Address = envAPI
}
// highest precedence is cmd line flag.
if flagAPI, ok := req.Options[OptionAPI].(string); ok && flagAPI != "" {
config.API.Address = flagAPI
}
if swarmAddress, ok := req.Options[SwarmAddress].(string); ok && swarmAddress != "" {
config.Swarm.Address = swarmAddress
}
if publicRelayAddress, ok := req.Options[SwarmPublicRelayAddress].(string); ok && publicRelayAddress != "" {
config.Swarm.PublicRelayAddress = publicRelayAddress
}
opts, err := node.OptionsFromRepo(rep)
if err != nil {
return err
}
if offlineMode, ok := req.Options[OfflineMode].(bool); ok {
opts = append(opts, node.OfflineMode(offlineMode))
}
if isRelay, ok := req.Options[IsRelay].(bool); ok && isRelay {
opts = append(opts, node.IsRelay())
}
durStr, ok := req.Options[BlockTime].(string)
if !ok {
return errors.New("Bad block time passed")
}
blockTime, err := time.ParseDuration(durStr)
if err != nil {
return errors.Wrap(err, "Bad block time passed")
}
opts = append(opts, node.BlockTime(blockTime))
journal, err := journal.NewZapJournal(rep.JournalPath())
if err != nil {
return err
}
opts = append(opts, node.JournalConfigOption(journal))
// Monkey-patch network parameters option will set package variables during node build
opts = append(opts, node.MonkeyPatchNetworkParamsOption(config.NetworkParams))
// Instantiate the node.
fcn, err := node.New(req.Context, opts...)
if err != nil {
return err
}
if fcn.OfflineMode {
_ = re.Emit("Filecoin node running in offline mode (libp2p is disabled)\n")
} else {
_ = re.Emit(fmt.Sprintf("My peer ID is %s\n", fcn.Host().ID().Pretty()))
for _, a := range fcn.Host().Addrs() {
_ = re.Emit(fmt.Sprintf("Swarm listening on: %s\n", a))
}
}
if _, ok := req.Options[ELStdout].(bool); ok {
writer.WriterGroup.AddWriter(os.Stdout)
}
// Start the node.
if err := fcn.Start(req.Context); err != nil {
return err
}
defer fcn.Stop(req.Context)
// Run API server around the node.
ready := make(chan interface{}, 1)
go func() {
<-ready
_ = re.Emit(fmt.Sprintf("API server listening on %s\n", config.API.Address))
}()
var terminate = make(chan os.Signal, 1)
signal.Notify(terminate, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(terminate)
// The request is expected to remain open so the daemon uses the request context.
// Pass a new context here if the flow changes such that the command should exit while leaving
// a forked deamon running.
return RunAPIAndWait(req.Context, fcn, config.API, ready, terminate)
}
func getRepo(req *cmds.Request) (repo.Repo, error) {
repoDir, _ := req.Options[OptionRepoDir].(string)
repoDir, err := paths.GetRepoPath(repoDir)
if err != nil {
return nil, err
}
return repo.OpenFSRepo(repoDir, repo.Version)
}
// RunAPIAndWait starts an API server and waits for it to finish.
// The `ready` channel is closed when the server is running and its API address has been
// saved to the node's repo.
// A message sent to or closure of the `terminate` channel signals the server to stop.
func RunAPIAndWait(ctx context.Context, nd *node.Node, config *config.APIConfig, ready chan interface{}, terminate chan os.Signal) error {
servenv := CreateServerEnv(ctx, nd)
cfg := cmdhttp.NewServerConfig()
cfg.APIPath = APIPrefix
cfg.SetAllowedOrigins(config.AccessControlAllowOrigin...)
cfg.SetAllowedMethods(config.AccessControlAllowMethods...)
cfg.SetAllowCredentials(config.AccessControlAllowCredentials)
maddr, err := ma.NewMultiaddr(config.Address)
if err != nil {
return err
}
// Listen on the configured address in order to bind the port number in case it has
// been configured as zero (i.e. OS-provided)
apiListener, err := manet.Listen(maddr)
if err != nil {
return err
}
handler := http.NewServeMux()
handler.Handle("/debug/pprof/", http.DefaultServeMux)
handler.Handle(APIPrefix+"/", cmdhttp.NewHandler(servenv, rootCmdDaemon, cfg))
apiserv := http.Server{
Handler: handler,
}
go func() {
err := apiserv.Serve(manet.NetListener(apiListener))
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
// Write the resolved API address to the repo
config.Address = apiListener.Multiaddr().String()
if err := nd.Repo.SetAPIAddr(config.Address); err != nil {
return errors.Wrap(err, "Could not save API address to repo")
}
// Signal that the sever has started and then wait for a signal to stop.
close(ready)
received := <-terminate
if received != nil {
fmt.Println("Received signal", received)
}
fmt.Println("Shutting down...")
// Allow a grace period for clean shutdown.
ctx, cancel := context.WithTimeout(ctx, time.Second*5)
defer cancel()
if err := apiserv.Shutdown(ctx); err != nil {
fmt.Println("Error shutting down API server:", err)
}
return nil
}
func CreateServerEnv(ctx context.Context, nd *node.Node) *Env {
return &Env{
blockMiningAPI: nd.BlockMining.BlockMiningAPI,
drandAPI: nd.DrandAPI,
ctx: ctx,
inspectorAPI: NewInspectorAPI(nd.Repo),
porcelainAPI: nd.PorcelainAPI,
retrievalAPI: nd.RetrievalProtocol,
storageAPI: nd.StorageAPI,
}
}
| 1 | 23,810 | Just FYI we need this to align with `builtin.EpochDurationSeconds` from specs-actors or the state machine computations will be off. We'll need to turn this into a config option and follow the same monkey patching paths as the min miner size, seal proof types etc. The EpochDurationSeconds is currently a `const` so can't do it yet tho. | filecoin-project-venus | go |
@@ -18,6 +18,7 @@
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+from __future__ import print_function
import errno
import httplib | 1 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import random
import re
import socket
import time
import urlparse
from boto import config, UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from boto.s3.keyfile import KeyFile
try:
from hashlib import md5
except ImportError:
from md5 import md5
"""
Handler for Google Cloud Storage resumable uploads. See
http://code.google.com/apis/storage/docs/developer-guide.html#resumable
for details.
Resumable uploads will retry failed uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
The caller can optionally specify a tracker_file_name param in the
ResumableUploadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
"""
class ResumableUploadHandler(object):
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating server has nothing (upload protocol uses
# inclusive numbering).
SERVER_HAS_NOTHING = (0, -1)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each uploaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracker URI.
If supplied and the current process fails the upload, it can be
retried in a new process. If called with an existing file containing
a valid tracker URI, we'll resume the upload from this URI; else
we'll start a new resumable upload (and write the URI to this
tracker file).
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable upload
making no progress. (Count resets every time we get progress, so
upload can span many more than this number of retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.server_has_bytes = 0 # Byte count at last server check.
self.tracker_uri = None
if tracker_file_name:
self._load_tracker_uri_from_file()
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
def _load_tracker_uri_from_file(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
uri = f.readline().strip()
self._set_tracker_uri(uri)
except IOError, e:
# Ignore non-existent file (happens first time an upload
# is attempted on a file), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because self.tracker_uri is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'upload from scratch.' %
(self.tracker_file_name, e.strerror))
except InvalidUriError, e:
# Warn user, but proceed (will restart because
# self.tracker_uri is None).
print('Invalid tracker URI (%s) found in URI tracker file '
'(%s). Restarting upload from scratch.' %
(uri, self.tracker_file_name))
finally:
if f:
f.close()
def _save_tracker_uri_to_file(self):
"""
Saves URI to tracker file if one was passed to constructor.
"""
if not self.tracker_file_name:
return
f = None
try:
with os.fdopen(os.open(self.tracker_file_name,
os.O_WRONLY | os.O_CREAT, 0600), 'w') as f:
f.write(self.tracker_uri)
except IOError, e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
def _set_tracker_uri(self, uri):
"""
Called when we start a new resumable upload or get a new tracker
URI for the upload. Saves URI and resets upload state.
Raises InvalidUriError if URI is syntactically invalid.
"""
parse_result = urlparse.urlparse(uri)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
self.tracker_uri = uri
self.tracker_uri_host = parse_result.netloc
self.tracker_uri_path = '%s?%s' % (
parse_result.path, parse_result.query)
self.server_has_bytes = 0
def get_tracker_uri(self):
"""
Returns upload tracker URI, or None if the upload has not yet started.
"""
return self.tracker_uri
def get_upload_id(self):
"""
Returns the upload ID for the resumable upload, or None if the upload
has not yet started.
"""
# We extract the upload_id from the tracker uri. We could retrieve the
# upload_id from the headers in the response but this only works for
# the case where we get the tracker uri from the service. In the case
# where we get the tracker from the tracking file we need to do this
# logic anyway.
delim = '?upload_id='
if self.tracker_uri and delim in self.tracker_uri:
return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):]
else:
return None
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _build_content_range_header(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _query_server_state(self, conn, file_length):
"""
Queries server to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload server always returns the current start/end
state whenever a PUT doesn't complete.
Returns HTTP response from sending request.
Raises ResumableUploadException if problem querying server.
"""
# Send an empty PUT so that server replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._build_content_range_header('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(conn, 'PUT',
path=self.tracker_uri_path,
auth_path=self.tracker_uri_path,
headers=put_headers,
host=self.tracker_uri_host)
def _query_server_pos(self, conn, file_length):
"""
Queries server to find out what bytes it currently has.
Returns (server_start, server_end), where the values are inclusive.
For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2.
Raises ResumableUploadException if problem querying server.
"""
resp = self._query_server_state(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the server has the complete
# file, we return (server_start, file_length-1). That way the
# calling code can always simply read up through server_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether server_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the server didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the tracker URI to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# tracker URI to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from server state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search('bytes=(\d+)-(\d+)', range_spec)
if m:
server_start = long(m.group(1))
server_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the server does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the server
# has byte 0, omitting the Range header is used to indicate that
# the server doesn't have any bytes.
return self.SERVER_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload server state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
print 'Server has: Range: %d - %d.' % (server_start, server_end)
return (server_start, server_end)
def _start_new_resumable_upload(self, key, headers=None):
"""
Starts a new resumable upload.
Raises ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
print 'Starting new resumable upload.'
self.server_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the server to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request(
'POST', key.bucket.name, key.name, post_headers)
# Get tracker URI from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [500, 503]:
# Retry status 500 and 503 errors after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status,
ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
tracker_uri = resp.getheader('Location')
if not tracker_uri:
raise ResumableUploadException(
'No resumable tracker URI found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._set_tracker_uri(tracker_uri)
self._save_tracker_uri_to_file()
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
if not headers:
put_headers = {}
else:
put_headers = headers.copy()
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._build_content_range_header(
'*', file_length)
else:
range_header = self._build_content_range_header(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.tracker_uri_path, auth_path=None,
headers=put_headers, host=self.tracker_uri_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
for alg in self.digesters:
self.digesters[alg].update(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
# Success.
return (resp.getheader('etag'),
resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 500 and 503 errors after a delay.
elif resp.status in [408, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition)
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
num_cb):
"""
Attempts a resumable upload.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
(server_start, server_end) = self.SERVER_HAS_NOTHING
conn = key.bucket.connection
if self.tracker_uri:
# Try to resume existing resumable upload.
try:
(server_start, server_end) = (
self._query_server_pos(conn, file_length))
self.server_has_bytes = server_start
if server_end:
# If the server already has some of the content, we need to
# update the digesters with the bytes that have already been
# uploaded to ensure we get a complete hash in the end.
print 'Catching up hash digest(s) for resumed upload'
fp.seek(0)
# Read local file's bytes through position server has. For
# example, if server has (0, 3) we want to read 3-0+1=4 bytes.
bytes_to_go = server_end + 1
while bytes_to_go:
chunk = fp.read(min(key.BufferSize, bytes_to_go))
if not chunk:
raise ResumableUploadException(
'Hit end of file during resumable upload hash '
'catchup. This should not happen under\n'
'normal circumstances, as it indicates the '
'server has more bytes of this transfer\nthan'
' the current file size. Restarting upload.',
ResumableTransferDisposition.START_OVER)
for alg in self.digesters:
self.digesters[alg].update(chunk)
bytes_to_go -= len(chunk)
if conn.debug >= 1:
print 'Resuming transfer.'
except ResumableUploadException, e:
if conn.debug >= 1:
print 'Unable to resume transfer (%s).' % e.message
self._start_new_resumable_upload(key, headers)
else:
self._start_new_resumable_upload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = server_end
total_bytes_uploaded = server_end + 1
# Corner case: Don't attempt to seek if we've already uploaded the
# entire file, because if the file is a stream (e.g., the KeyFile
# wrapper around input key when copying between providers), attempting
# to seek to the end of file would result in an InvalidRange error.
if file_length < total_bytes_uploaded:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through server will terminate current upload
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb,
headers)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
raise ResumableUploadException('Got 400 response from server '
'state query after failed resumable upload attempt. This '
'can happen for various reasons, including specifying an '
'invalid request (e.g., an invalid canned ACL) or if the '
'file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def _check_final_md5(self, key, etag):
"""
Checks that etag from server agrees with md5 computed before upload.
This is important, since the upload could have spanned a number of
hours and multiple processes (e.g., gsutil runs), and the user could
change some of the file and not realize they have inconsistent data.
"""
if key.bucket.connection.debug >= 1:
print 'Checking md5 against etag.'
if key.md5 != etag.strip('"\''):
# Call key.open_read() before attempting to delete the
# (incorrect-content) key, so we perform that request on a
# different HTTP connection. This is neededb because httplib
# will return a "Response not ready" error if you try to perform
# a second transaction on the connection.
key.open_read()
key.close()
key.delete()
raise ResumableUploadException(
'File changed during upload: md5 signature doesn\'t match etag '
'(incorrect uploaded object deleted)',
ResumableTransferDisposition.ABORT)
def handle_resumable_upload_exception(self, e, debug):
if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file' % e.message)
raise
elif (e.disposition == ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file' % e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableUploadException (%s) - will retry' %
e.message)
def track_progress_less_iterations(self, server_had_bytes_before_attempt,
roll_back_md5=True, debug=0):
# At this point we had a re-tryable failure; see if made progress.
if self.server_has_bytes > server_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if roll_back_md5:
# Rollback any potential hash updates, as we did not
# make any progress in this iteration.
self.digesters = self.digesters_before_attempt
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = random.random() * (2**self.progress_less_iterations)
if debug >= 1:
print ('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying' %
(self.progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None):
"""
Upload a file to a key into a bucket on GS, using GS resumable upload
protocol.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object to which data is to be uploaded
:type fp: file-like object
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary mapping hash algorithm
descriptions to corresponding state-ful hashing objects that
implement update(), digest(), and copy() (e.g. hashlib.md5()).
Defaults to {'md5': md5()}.
Raises ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
CT = 'Content-Type'
if CT in headers and headers[CT] is None:
del headers[CT]
headers['User-Agent'] = UserAgent
# Determine file size different ways for case where fp is actually a
# wrapper around a Key vs an actual file.
if isinstance(fp, KeyFile):
file_length = fp.getkey().size
else:
fp.seek(0, os.SEEK_END)
file_length = fp.tell()
fp.seek(0)
debug = key.bucket.connection.debug
# Compute the MD5 checksum on the fly.
if hash_algs is None:
hash_algs = {'md5': md5}
self.digesters = dict(
(alg, hash_algs[alg]()) for alg in hash_algs or {})
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
server_had_bytes_before_attempt = self.server_has_bytes
self.digesters_before_attempt = dict(
(alg, self.digesters[alg].copy())
for alg in self.digesters)
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
(etag, self.generation, self.metageneration) = (
self._attempt_resumable_upload(key, fp, file_length,
headers, cb, num_cb))
# Get the final digests for the uploaded content.
for alg in self.digesters:
key.local_hashes[alg] = self.digesters[alg].digest()
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, etag)
key.generation = self.generation
if debug >= 1:
print 'Resumable upload complete.'
return
except self.RETRYABLE_EXCEPTIONS, e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException, e:
self.handle_resumable_upload_exception(e, debug)
self.track_progress_less_iterations(server_had_bytes_before_attempt,
True, debug)
| 1 | 10,155 | Is this import needed? | boto-boto | py |
@@ -68,6 +68,8 @@ storiesOf( 'Dashboard', module )
<PostSearcher />
</WithTestRegistry>
);
+ }, {
+ padding: 0,
} )
.add( 'URL Search Widget', () => {
const setupRegistry = ( registry ) => provideSiteInfo( registry ); | 1 | /**
* Dashboard Page Stories.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
/**
* WordPress dependencies
*/
import { addFilter, doAction, removeAllFilters } from '@wordpress/hooks';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import DashboardModuleHeader from '../assets/js/components/dashboard/DashboardModuleHeader';
import CTA from '../assets/js/components/legacy-notifications/cta';
import { createAddToFilter } from '../assets/js/util/helpers';
import Layout from '../assets/js/components/layout/Layout';
import LegacyDashboardSearchFunnelInner from '../assets/js/modules/search-console/components/dashboard/LegacyDashboardSearchFunnelInner';
import LegacyAnalyticsDashboardWidgetTopLevel from '../assets/js/modules/analytics/components/dashboard/LegacyAnalyticsDashboardWidgetTopLevel';
import LegacySearchConsoleDashboardWidgetTopLevel from '../assets/js/modules/search-console/components/dashboard/LegacySearchConsoleDashboardWidgetTopLevel';
import PostSearcher from '../assets/js/components/PostSearcher';
import URLSearchWidget from '../assets/js/googlesitekit/widgets/components/URLSearchWidget';
import { googlesitekit as analyticsDashboardData } from '../.storybook/data/wp-admin-admin.php-page=googlesitekit-module-analytics-googlesitekit';
import { CORE_SITE } from '../assets/js/googlesitekit/datastore/site/constants';
import { MODULES_ANALYTICS } from '../assets/js/modules/analytics/datastore/constants';
import { MODULES_SEARCH_CONSOLE } from '../assets/js/modules/search-console/datastore/constants';
import { provideSiteInfo, WithTestRegistry } from '../tests/js/utils';
import { getWidgetComponentProps } from '../assets/js/googlesitekit/widgets/util';
storiesOf( 'Dashboard', module )
.add( 'Module Header', () => (
<DashboardModuleHeader
title={ __( 'Module Header', 'google-site-kit' ) }
description={ __( 'Description of Module', 'google-site-kit' ) }
/>
) )
.add( 'Post Searcher', () => {
const setupRegistry = ( { dispatch } ) => {
dispatch( CORE_SITE ).receiveSiteInfo( {
usingProxy: true,
referenceSiteURL: 'http://example.com',
adminURL: 'http://example.com/wp-admin',
timezone: 'America/Detroit',
siteName: 'My Site Name',
} );
};
return (
<WithTestRegistry callback={ setupRegistry } >
<PostSearcher />
</WithTestRegistry>
);
} )
.add( 'URL Search Widget', () => {
const setupRegistry = ( registry ) => provideSiteInfo( registry );
const widgetComponentProps = getWidgetComponentProps( 'urlSearch' );
return (
<WithTestRegistry callback={ setupRegistry } >
<URLSearchWidget { ...widgetComponentProps } />
</WithTestRegistry>
);
} )
.add( 'Search Funnel Analytics Inactive', () => {
global._googlesitekitLegacyData = analyticsDashboardData;
const setupRegistry = ( { dispatch } ) => {
dispatch( MODULES_ANALYTICS ).receiveGetSettings( {
accountID: '123456789',
propertyID: '123456789',
internalWebPropertyID: '123456789',
profileID: '123456789',
} );
dispatch( MODULES_SEARCH_CONSOLE ).receiveGetSettings( {
propertyID: 'https://example.com/',
} );
};
const addLegacySearchConsoleDashboardWidgetTopLevel = createAddToFilter( <LegacySearchConsoleDashboardWidgetTopLevel /> );
removeAllFilters( 'googlesitekit.DashboardSearchFunnel' );
addFilter( 'googlesitekit.DashboardSearchFunnel',
'googlesitekit.SearchConsoleSearchFunnel',
addLegacySearchConsoleDashboardWidgetTopLevel );
// Load the datacache with data.
setTimeout( () => {
doAction(
'googlesitekit.moduleLoaded',
'Dashboard'
);
}, 250 );
return (
<WithTestRegistry callback={ setupRegistry } >
<Layout className="googlesitekit-analytics-search-funnel">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<LegacyDashboardSearchFunnelInner />
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-4-phone
mdc-layout-grid__cell--span-4-tablet
mdc-layout-grid__cell--span-6-desktop
">
<CTA
title={ __( 'Learn more about what visitors do on your site.', 'google-site-kit' ) }
description={ __( 'Connecting with Google Analytics to see unique visitors, goal completions, top pages and more.', 'google-site-kit' ) }
ctaLink="#"
ctaLabel={ __( 'Set up analytics', 'google-site-kit' ) }
/>
</div>
</div>
</div>
</Layout>
</WithTestRegistry>
);
},
{ options: { readySelector: '.googlesitekit-chart .googlesitekit-chart__inner' } } )
.add( 'Search Funnel', () => {
global._googlesitekitLegacyData = analyticsDashboardData;
const addLegacyAnalyticsDashboardWidgetTopLevel = createAddToFilter( <LegacyAnalyticsDashboardWidgetTopLevel /> );
const addLegacySearchConsoleDashboardWidgetTopLevel = createAddToFilter( <LegacySearchConsoleDashboardWidgetTopLevel /> );
removeAllFilters( 'googlesitekit.DashboardSearchFunnel' );
addFilter( 'googlesitekit.DashboardSearchFunnel',
'googlesitekit.Analytics',
addLegacyAnalyticsDashboardWidgetTopLevel, 11 );
addFilter( 'googlesitekit.DashboardSearchFunnel',
'googlesitekit.SearchConsoleSearchFunnel',
addLegacySearchConsoleDashboardWidgetTopLevel );
// Manual set some missing goals data;
const datacacheIsString = 'string' === typeof global._googlesitekitLegacyData.admin.datacache;
if ( datacacheIsString ) {
global._googlesitekitLegacyData.admin.datacache = JSON.parse( global._googlesitekitLegacyData.admin.datacache );
}
global._googlesitekitLegacyData.admin.datacache[ 'modules::analytics::goals::ed2bfff92ddeb68e5946584315c67b28' ] = JSON.parse( '{"itemsPerPage":1000,"kind":"analytics#goals","nextLink":null,"previousLink":null,"startIndex":1,"totalResults":5,"username":"[email protected]","items":[{"accountID":"XXXXXX","active":true,"created":"2016-12-06T15:36:07.002Z","id":"1","internalWebPropertyID":"XXXXXXX","kind":"analytics#goal","name":"Basic","profileID":"XXXXXXXX","selfLink":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX/goals/1","type":"URL_DESTINATION","updated":"2016-12-06T21:40:31.531Z","value":299,"webPropertyID":"UA-XXXXXX-3","parentLink":{"href":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX","type":"analytics#profile"},"urlDestinationDetails":{"caseSensitive":false,"firstStepRequired":true,"matchType":"EXACT","url":"/thankyou","steps":[{"name":"Basic Button","number":1,"url":"/pricing-basic"}]}},{"accountID":"XXXXXX","active":true,"created":"2016-12-06T16:30:57.626Z","id":"2","internalWebPropertyID":"XXXXXXX","kind":"analytics#goal","name":"Professional","profileID":"XXXXXXXX","selfLink":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX/goals/2","type":"URL_DESTINATION","updated":"2016-12-06T21:40:43.894Z","value":699,"webPropertyID":"UA-XXXXXX-3","parentLink":{"href":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX","type":"analytics#profile"},"urlDestinationDetails":{"caseSensitive":false,"firstStepRequired":true,"matchType":"EXACT","url":"/thankyou","steps":[{"name":"Professional Button","number":1,"url":"/pricing-professional"}]}},{"accountID":"XXXXXX","active":true,"created":"2016-12-06T16:31:32.429Z","id":"3","internalWebPropertyID":"XXXXXXX","kind":"analytics#goal","name":"Enterprise","profileID":"XXXXXXXX","selfLink":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX/goals/3","type":"URL_DESTINATION","updated":"2016-12-06T21:40:55.366Z","value":999,"webPropertyID":"UA-XXXXXX-3","parentLink":{"href":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX","type":"analytics#profile"},"urlDestinationDetails":{"caseSensitive":false,"firstStepRequired":true,"matchType":"EXACT","url":"/thankyou","steps":[{"name":"Enterprise Button","number":1,"url":"/pricing-enterprise"}]}},{"accountID":"XXXXXX","active":true,"created":"2016-12-06T16:32:17.667Z","id":"4","internalWebPropertyID":"XXXXXXX","kind":"analytics#goal","name":"Form Success (non-funnel)","profileID":"XXXXXXXX","selfLink":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX/goals/4","type":"URL_DESTINATION","updated":"2016-12-06T16:53:22.277Z","value":0,"webPropertyID":"UA-XXXXXX-3","parentLink":{"href":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX","type":"analytics#profile"},"urlDestinationDetails":{"caseSensitive":false,"firstStepRequired":false,"matchType":"EXACT","url":"/thankyou"}},{"accountID":"XXXXXX","active":true,"created":"2016-12-06T16:41:10.580Z","id":"5","internalWebPropertyID":"XXXXXXX","kind":"analytics#goal","name":"Get Started","profileID":"XXXXXXXX","selfLink":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX/goals/5","type":"URL_DESTINATION","updated":"2016-12-06T16:53:14.486Z","value":0,"webPropertyID":"UA-XXXXXX-3","parentLink":{"href":"https://www.googleapis.com/analytics/v3/management/accounts/XXXXXX/webproperties/UA-XXXXXX-3/profiles/XXXXXXXX","type":"analytics#profile"},"urlDestinationDetails":{"caseSensitive":false,"firstStepRequired":true,"matchType":"EXACT","url":"/thankyou","steps":[{"name":"Get Started Header Button","number":1,"url":"/get-started"}]}}]}' );
if ( datacacheIsString ) {
global._googlesitekitLegacyData.admin.datacache = JSON.stringify( global._googlesitekitLegacyData.admin.datacache );
}
// Load the datacache with data.
setTimeout( () => {
doAction(
'googlesitekit.moduleLoaded',
'Dashboard'
);
}, 250 );
const setupRegistry = ( { dispatch } ) => {
dispatch( MODULES_ANALYTICS ).receiveGetSettings( {
accountID: '123456789',
propertyID: '123456789',
internalWebPropertyID: '123456789',
profileID: '123456789',
} );
dispatch( MODULES_SEARCH_CONSOLE ).receiveGetSettings( {
propertyID: 'https://example.com/',
} );
};
return (
<WithTestRegistry callback={ setupRegistry } >
<Layout className="googlesitekit-analytics-search-funnel">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<LegacyDashboardSearchFunnelInner />
</div>
</div>
</Layout>
</WithTestRegistry>
);
}, {
options: {
readySelector: '.googlesitekit-chart .googlesitekit-chart__inner',
},
} );
| 1 | 38,254 | Dashboard stories also need to have the default padding. | google-site-kit-wp | js |
@@ -77,6 +77,17 @@ public interface RewriteDataFiles extends SnapshotUpdate<RewriteDataFiles, Rewri
*/
String TARGET_FILE_SIZE_BYTES = "target-file-size-bytes";
+ /**
+ * If the compaction should commit rewritten data files using the sequence number at compaction start time instead
+ * of optimistically incrementing the latest sequence number.
+ * <p>
+ * This avoids commit conflicts with updates that add newer equality deletes at a higher sequence number.
+ * <p>
+ * Defaults to false.
+ */
+ String USE_STARTING_SEQUENCE_NUMBER = "use-starting-sequence-number";
+ boolean USE_STARTING_SEQUENCE_NUMBER_DEFAULT = false;
+
/**
* Choose BINPACK as a strategy for this rewrite operation
* @return this for method chaining | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.actions;
import java.util.List;
import org.apache.iceberg.SortOrder;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.expressions.Expression;
/**
* An action for rewriting data files according to a rewrite strategy.
* Generally used for optimizing the sizing and layout of data files within a table.
*/
public interface RewriteDataFiles extends SnapshotUpdate<RewriteDataFiles, RewriteDataFiles.Result> {
/**
* Enable committing groups of files (see max-file-group-size-bytes) prior to the entire rewrite completing.
* This will produce additional commits but allow for progress even if some groups fail to commit. This setting
* will not change the correctness of the rewrite operation as file groups can be compacted independently.
* <p>
* The default is false, which produces a single commit when the entire job has completed.
*/
String PARTIAL_PROGRESS_ENABLED = "partial-progress.enabled";
boolean PARTIAL_PROGRESS_ENABLED_DEFAULT = false;
/**
* The maximum amount of Iceberg commits that this rewrite is allowed to produce if partial progress is enabled. This
* setting has no effect if partial progress is disabled.
*/
String PARTIAL_PROGRESS_MAX_COMMITS = "partial-progress.max-commits";
int PARTIAL_PROGRESS_MAX_COMMITS_DEFAULT = 10;
/**
* The entire rewrite operation is broken down into pieces based on partitioning and within partitions based
* on size into groups. These sub-units of the rewrite are referred to as file groups. The largest amount of data that
* should be compacted in a single group is controlled by {@link #MAX_FILE_GROUP_SIZE_BYTES}. This helps with
* breaking down the rewriting of very large partitions which may not be rewritable otherwise due to the resource
* constraints of the cluster. For example a sort based rewrite may not scale to terabyte sized partitions, those
* partitions need to be worked on in small subsections to avoid exhaustion of resources.
* <p>
* When grouping files, the underlying rewrite strategy will use this value as to limit the files which
* will be included in a single file group. A group will be processed by a single framework "action". For example,
* in Spark this means that each group would be rewritten in its own Spark action. A group will never contain files
* for multiple output partitions.
*/
String MAX_FILE_GROUP_SIZE_BYTES = "max-file-group-size-bytes";
long MAX_FILE_GROUP_SIZE_BYTES_DEFAULT = 1024L * 1024L * 1024L * 100L; // 100 Gigabytes
/**
* The max number of file groups to be simultaneously rewritten by the rewrite strategy. The structure and
* contents of the group is determined by the rewrite strategy. Each file group will be rewritten
* independently and asynchronously.
**/
String MAX_CONCURRENT_FILE_GROUP_REWRITES = "max-concurrent-file-group-rewrites";
int MAX_CONCURRENT_FILE_GROUP_REWRITES_DEFAULT = 1;
/**
* The output file size that this rewrite strategy will attempt to generate when rewriting files. By default this
* will use the "write.target-file-size-bytes value" in the table properties of the table being updated.
*/
String TARGET_FILE_SIZE_BYTES = "target-file-size-bytes";
/**
* Choose BINPACK as a strategy for this rewrite operation
* @return this for method chaining
*/
default RewriteDataFiles binPack() {
return this;
}
/**
* Choose SORT as a strategy for this rewrite operation using the table's sortOrder
* @return this for method chaining
*/
default RewriteDataFiles sort() {
throw new UnsupportedOperationException("SORT Rewrite Strategy not implemented for this framework");
}
/**
* Choose SORT as a strategy for this rewrite operation and manually specify the sortOrder to use
* @param sortOrder user defined sortOrder
* @return this for method chaining
*/
default RewriteDataFiles sort(SortOrder sortOrder) {
throw new UnsupportedOperationException("SORT Rewrite Strategy not implemented for this framework");
}
/**
* A user provided filter for determining which files will be considered by the rewrite strategy. This will be used
* in addition to whatever rules the rewrite strategy generates. For example this would be used for providing a
* restriction to only run rewrite on a specific partition.
*
* @param expression An iceberg expression used to determine which files will be considered for rewriting
* @return this for chaining
*/
RewriteDataFiles filter(Expression expression);
/**
* A map of file group information to the results of rewriting that file group. If the results are null then
* that particular file group failed. We should only have failed groups if partial progress is enabled otherwise we
* will report a total failure for the job.
*/
interface Result {
List<FileGroupRewriteResult> rewriteResults();
default int addedDataFilesCount() {
return rewriteResults().stream().mapToInt(FileGroupRewriteResult::addedDataFilesCount).sum();
}
default int rewrittenDataFilesCount() {
return rewriteResults().stream().mapToInt(FileGroupRewriteResult::rewrittenDataFilesCount).sum();
}
}
/**
* For a particular file group, the number of files which are newly created and the number of files
* which were formerly part of the table but have been rewritten.
*/
interface FileGroupRewriteResult {
FileGroupInfo info();
int addedDataFilesCount();
int rewrittenDataFilesCount();
}
/**
* A description of a file group, when it was processed, and within which partition. For use
* tracking rewrite operations and for returning results.
*/
interface FileGroupInfo {
/**
* returns which file group this is out of the total set of file groups for this rewrite
*/
int globalIndex();
/**
* returns which file group this is out of the set of file groups for this partition
*/
int partitionIndex();
/**
* returns which partition this file group contains files from
*/
StructLike partition();
}
}
| 1 | 45,768 | Is there a reason why we wouldn't use this as the default? | apache-iceberg | java |
@@ -15,7 +15,7 @@ void CreateEdgeProcessor::process(const cpp2::CreateEdgeReq& req) {
{
// if there is an edge of the same name
// TODO: there exists race condition, we should address it in the future
- folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock());
+ folly::SharedMutex::ReadHolder rHolder(LockUtils::tagLock());
auto conflictRet = getTagId(req.get_space_id(), edgeName);
if (conflictRet.ok()) {
LOG(ERROR) << "Failed to create edge `" << edgeName | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "meta/processors/schemaMan/CreateEdgeProcessor.h"
namespace nebula {
namespace meta {
void CreateEdgeProcessor::process(const cpp2::CreateEdgeReq& req) {
CHECK_SPACE_ID_AND_RETURN(req.get_space_id());
auto edgeName = req.get_edge_name();
{
// if there is an edge of the same name
// TODO: there exists race condition, we should address it in the future
folly::SharedMutex::ReadHolder rHolder(LockUtils::edgeLock());
auto conflictRet = getTagId(req.get_space_id(), edgeName);
if (conflictRet.ok()) {
LOG(ERROR) << "Failed to create edge `" << edgeName
<< "': some edge with the same name already exists.";
resp_.set_id(to(conflictRet.value(), EntryType::EDGE));
handleErrorCode(cpp2::ErrorCode::E_CONFLICT);
onFinished();
return;
}
}
folly::SharedMutex::WriteHolder wHolder(LockUtils::edgeLock());
auto ret = getEdgeType(req.get_space_id(), edgeName);
if (ret.ok()) {
if (req.get_if_not_exists()) {
handleErrorCode(cpp2::ErrorCode::SUCCEEDED);
} else {
handleErrorCode(cpp2::ErrorCode::E_EXISTED);
}
resp_.set_id(to(ret.value(), EntryType::EDGE));
onFinished();
return;
}
auto edgeTypeRet = autoIncrementId();
if (!nebula::ok(edgeTypeRet)) {
LOG(ERROR) << "Create edge failed : Get edge type id failed";
handleErrorCode(nebula::error(edgeTypeRet));
onFinished();
return;
}
auto edgeType = nebula::value(edgeTypeRet);
std::vector<kvstore::KV> data;
data.emplace_back(MetaServiceUtils::indexEdgeKey(req.get_space_id(), edgeName),
std::string(reinterpret_cast<const char*>(&edgeType), sizeof(EdgeType)));
data.emplace_back(MetaServiceUtils::schemaEdgeKey(req.get_space_id(), edgeType, 0),
MetaServiceUtils::schemaEdgeVal(edgeName, req.get_schema()));
LOG(INFO) << "Create Edge " << edgeName << ", edgeType " << edgeType;
auto columns = req.get_schema().get_columns();
for (auto& column : columns) {
if (column.__isset.default_value) {
auto name = column.get_name();
auto value = column.get_default_value();
std::string defaultValue;
switch (column.get_type().get_type()) {
case nebula::cpp2::SupportedType::BOOL:
if (value->getType() != nebula::cpp2::Value::Type::bool_value) {
LOG(ERROR) << "Create Edge Failed: " << name
<< " type mismatch";
handleErrorCode(cpp2::ErrorCode::E_CONFLICT);
onFinished();
return;
}
defaultValue = folly::to<std::string>(value->get_bool_value());
break;
case nebula::cpp2::SupportedType::INT:
if (value->getType() != nebula::cpp2::Value::Type::int_value) {
LOG(ERROR) << "Create Edge Failed: " << name
<< " type mismatch";
handleErrorCode(cpp2::ErrorCode::E_CONFLICT);
onFinished();
return;
}
defaultValue = folly::to<std::string>(value->get_int_value());
break;
case nebula::cpp2::SupportedType::DOUBLE:
if (value->getType() != nebula::cpp2::Value::Type::double_value) {
LOG(ERROR) << "Create Edge Failed: " << name
<< " type mismatch";
handleErrorCode(cpp2::ErrorCode::E_CONFLICT);
onFinished();
return;
}
defaultValue = folly::to<std::string>(value->get_double_value());
break;
case nebula::cpp2::SupportedType::STRING:
if (value->getType() != nebula::cpp2::Value::Type::string_value) {
LOG(ERROR) << "Create Edge Failed: " << name
<< " type mismatch";
handleErrorCode(cpp2::ErrorCode::E_CONFLICT);
onFinished();
return;
}
defaultValue = value->get_string_value();
break;
case nebula::cpp2::SupportedType::TIMESTAMP:
if (value->getType() != nebula::cpp2::Value::Type::timestamp) {
LOG(ERROR) << "Create Edge Failed: " << name
<< " type mismatch";
handleErrorCode(cpp2::ErrorCode::E_CONFLICT);
onFinished();
return;
}
defaultValue = folly::to<std::string>(value->get_timestamp());
break;
default:
LOG(ERROR) << "Unknown type " << static_cast<int>(column.get_type().get_type());
handleErrorCode(cpp2::ErrorCode::E_INVALID_PARM);
onFinished();
return;
}
VLOG(3) << "Get Edge Default value: Property Name " << name
<< ", Value " << defaultValue;
auto defaultKey = MetaServiceUtils::defaultKey(req.get_space_id(),
edgeType,
name);
data.emplace_back(std::move(defaultKey), std::move(defaultValue));
}
}
LOG(INFO) << "Create Edge " << edgeName << ", edgeType " << edgeType;
handleErrorCode(cpp2::ErrorCode::SUCCEEDED);
resp_.set_id(to(edgeType, EntryType::EDGE));
doSyncPutAndUpdate(std::move(data));
}
} // namespace meta
} // namespace nebula
| 1 | 27,753 | if there is an tag of the same name? | vesoft-inc-nebula | cpp |
@@ -1,10 +1,8 @@
package net
import (
- "bytes"
"errors"
"fmt"
- "os/exec"
"path/filepath"
"runtime"
| 1 | package net
import (
"bytes"
"errors"
"fmt"
"os/exec"
"path/filepath"
"runtime"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netns"
)
var ErrLinkNotFound = errors.New("Link not found")
// NB: The following function is unsafe, because:
// - It changes a network namespace (netns) of an OS thread which runs
// the function. During execution, the Go runtime might clone a new OS thread
// for scheduling other go-routines, thus they might end up running in
// a "wrong" netns.
// - runtime.LockOSThread does not guarantee that a spawned go-routine on
// the locked thread will be run by it. Thus, the work function is
// not allowed to spawn any go-routine which is dependent on the given netns.
// Please see https://github.com/weaveworks/weave/issues/2388#issuecomment-228365069
// for more details and make sure that you understand the implications before
// using the function!
func WithNetNSUnsafe(ns netns.NsHandle, work func() error) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
oldNs, err := netns.Get()
if err == nil {
defer oldNs.Close()
err = netns.Set(ns)
if err == nil {
defer netns.Set(oldNs)
err = work()
}
}
return err
}
func WithNetNSLinkUnsafe(ns netns.NsHandle, ifName string, work func(link netlink.Link) error) error {
return WithNetNSUnsafe(ns, func() error {
link, err := netlink.LinkByName(ifName)
if err != nil {
if err.Error() == errors.New("Link not found").Error() {
return ErrLinkNotFound
}
return err
}
return work(link)
})
}
var WeaveUtilCmd = "weaveutil"
// A safe version of WithNetNS* which creates a process executing
// "nsenter --net=<ns-path> weaveutil <cmd> [args]".
func WithNetNS(nsPath string, cmd string, args ...string) ([]byte, error) {
var stdout, stderr bytes.Buffer
args = append([]string{"--net=" + nsPath, WeaveUtilCmd, cmd}, args...)
c := exec.Command("nsenter", args...)
c.Stdout = &stdout
c.Stderr = &stderr
if err := c.Run(); err != nil {
return nil, fmt.Errorf("%s: %s", string(stderr.Bytes()), err)
}
return stdout.Bytes(), nil
}
func WithNetNSByPid(pid int, cmd string, args ...string) ([]byte, error) {
return WithNetNS(NSPathByPid(pid), cmd, args...)
}
func NSPathByPid(pid int) string {
return NSPathByPidWithRoot("/", pid)
}
func NSPathByPidWithRoot(root string, pid int) string {
return filepath.Join(root, fmt.Sprintf("/proc/%d/ns/net", pid))
}
| 1 | 15,494 | I'd add `// +build go1.10` to prevent from accidentally compiling with older Go. | weaveworks-weave | go |
@@ -1,10 +1,12 @@
+from __future__ import unicode_literals
+
import numpy as np
import param
from ..core import (HoloMap, DynamicMap, CompositeOverlay, Layout,
GridSpace, NdLayout, Store)
from ..core.util import (match_spec, is_number, wrap_tuple,
- get_overlay_spec, unique_iterator)
+ get_overlay_spec, unique_iterator, safe_unicode)
def displayable(obj): | 1 | import numpy as np
import param
from ..core import (HoloMap, DynamicMap, CompositeOverlay, Layout,
GridSpace, NdLayout, Store)
from ..core.util import (match_spec, is_number, wrap_tuple,
get_overlay_spec, unique_iterator)
def displayable(obj):
"""
Predicate that returns whether the object is displayable or not
(i.e whether the object obeys the nesting hierarchy
"""
if isinstance(obj, HoloMap):
return not (obj.type in [Layout, GridSpace, NdLayout])
if isinstance(obj, (GridSpace, Layout, NdLayout)):
for el in obj.values():
if not displayable(el):
return False
return True
return True
class Warning(param.Parameterized): pass
display_warning = Warning(name='Warning')
def collate(obj):
if isinstance(obj, HoloMap):
display_warning.warning("Nesting %ss within a HoloMap makes it difficult "
"to access your data or control how it appears; "
"we recommend calling .collate() on the HoloMap "
"in order to follow the recommended nesting "
"structure shown in the Composing Data tutorial"
"(http://git.io/vtIQh)" % obj.type.__name__)
return obj.collate()
elif isinstance(obj, (Layout, NdLayout)):
try:
display_warning.warning(
"Layout contains HoloMaps which are not nested in the "
"recommended format for accessing your data; calling "
".collate() on these objects will resolve any violations "
"of the recommended nesting presented in the Composing Data "
"tutorial (http://git.io/vqs03)")
expanded = []
for el in obj.values():
if isinstance(el, HoloMap) and not displayable(el):
collated_layout = Layout.from_values(el.collate())
expanded.extend(collated_layout.values())
return Layout(expanded)
except:
raise Exception(undisplayable_info(obj))
else:
raise Exception(undisplayable_info(obj))
def undisplayable_info(obj, html=False):
"Generate helpful message regarding an undisplayable object"
collate = '<tt>collate</tt>' if html else 'collate'
info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)"
if isinstance(obj, HoloMap):
error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__
remedy = "Please call the %s method to generate a displayable object" % collate
elif isinstance(obj, Layout):
error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
elif isinstance(obj, GridSpace):
error = "GridSpace containing HoloMaps of Layouts cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
if not html:
return '\n'.join([error, remedy, info])
else:
return "<center>{msg}</center>".format(msg=('<br>'.join(
['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
def compute_sizes(sizes, size_fn, scaling_factor, scaling_method, base_size):
"""
Scales point sizes according to a scaling factor,
base size and size_fn, which will be applied before
scaling.
"""
if scaling_method == 'area':
pass
elif scaling_method == 'width':
scaling_factor = scaling_factor**2
else:
raise ValueError(
'Invalid value for argument "scaling_method": "{}". '
'Valid values are: "width", "area".'.format(scaling_method))
sizes = size_fn(sizes)
return (base_size*scaling_factor*sizes)
def get_sideplot_ranges(plot, element, main, ranges):
"""
Utility to find the range for an adjoined
plot given the plot, the element, the
Element the plot is adjoined to and the
dictionary of ranges.
"""
key = plot.current_key
dims = element.dimensions(label=True)
dim = dims[1] if dims[1] != 'Frequency' else dims[0]
range_item = main
if isinstance(main, HoloMap):
if issubclass(main.type, CompositeOverlay):
range_item = [hm for hm in main.split_overlays()[1]
if dim in hm.dimensions('all', label=True)][0]
else:
range_item = HoloMap({0: main}, kdims=['Frame'])
ranges = match_spec(range_item.last, ranges)
if dim in ranges:
main_range = ranges[dim]
else:
framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise')
if framewise and range_item.get(key, False):
main_range = range_item[key].range(dim)
else:
main_range = range_item.range(dim)
# If .main is an NdOverlay or a HoloMap of Overlays get the correct style
if isinstance(range_item, HoloMap):
range_item = range_item.last
if isinstance(range_item, CompositeOverlay):
range_item = [ov for ov in range_item
if dim in ov.dimensions('all', label=True)][0]
return range_item, main_range, dim
def within_range(range1, range2):
"""Checks whether range1 is within the range specified by range2."""
return ((range1[0] is None or range2[0] is None or range1[0] >= range2[0]) and
(range1[1] is None or range2[1] is None or range1[1] <= range2[1]))
def validate_sampled_mode(holomaps, dynmaps):
composite = HoloMap(enumerate(holomaps), kdims=['testing_kdim'])
holomap_kdims = set(unique_iterator([kd.name for dm in holomaps for kd in dm.kdims]))
hmranges = {d: composite.range(d) for d in holomap_kdims}
if any(not set(d.name for d in dm.kdims) <= holomap_kdims
for dm in dynmaps):
raise Exception('In sampled mode DynamicMap key dimensions must be a '
'subset of dimensions of the HoloMap(s) defining the sampling.')
elif not all(within_range(hmrange, dm.range(d)) for dm in dynmaps
for d, hmrange in hmranges.items() if d in dm.kdims):
raise Exception('HoloMap(s) have keys outside the ranges specified on '
'the DynamicMap(s).')
def get_dynamic_mode(composite):
"Returns the common mode of the dynamic maps in given composite object"
dynmaps = composite.traverse(lambda x: x, [DynamicMap])
holomaps = composite.traverse(lambda x: x, ['HoloMap'])
dynamic_modes = [m.call_mode for m in dynmaps]
dynamic_sampled = any(m.sampled for m in dynmaps)
if holomaps:
validate_sampled_mode(holomaps, dynmaps)
elif dynamic_sampled and not holomaps:
raise Exception("DynamicMaps in sampled mode must be displayed alongside "
"a HoloMap to define the sampling.")
if len(set(dynamic_modes)) > 1:
raise Exception("Cannot display composites of DynamicMap objects "
"with different interval modes (i.e open or bounded mode).")
elif dynamic_modes and not holomaps:
return 'bounded' if dynamic_modes[0] == 'key' else 'open', dynamic_sampled
else:
return None, dynamic_sampled
def initialize_sampled(obj, dimensions, key):
"""
Initializes any DynamicMaps in sampled mode.
"""
select = dict(zip([d.name for d in dimensions], key))
try:
obj.select([DynamicMap], **select)
except KeyError:
pass
def save_frames(obj, filename, fmt=None, backend=None, options=None):
"""
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
"""
backend = Store.current_backend if backend is None else backend
renderer = Store.renderers[backend]
fmt = renderer.params('fig').objects[0] if fmt is None else fmt
plot = renderer.get_plot(obj)
for i in range(len(plot)):
plot.update(i)
renderer.save(plot, '%s_%s' % (filename, i), fmt=fmt, options=options)
def dynamic_update(plot, subplot, key, overlay, items):
"""
Given a plot, subplot and dynamically generated (Nd)Overlay
find the closest matching Element for that plot.
"""
match_spec = get_overlay_spec(overlay,
wrap_tuple(key),
subplot.current_frame)
specs = [(i, get_overlay_spec(overlay, wrap_tuple(k), el))
for i, (k, el) in enumerate(items)]
return closest_match(match_spec, specs)
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:
if is_number(match[0]) and is_number(spec[0]):
match_length = -abs(match[0]-spec[0])
elif all(isinstance(s[0], basestring) for s in [spec, match]):
match_length = max(i for i in range(len(match[0]))
if match[0].startswith(spec[0][:i]))
else:
match_length = 0
match_lengths.append((i, match_length, spec[0]))
if len(new_specs) == 1:
return new_specs[0][0]
elif new_specs:
depth = depth+1
return closest_match(match[1:], new_specs, depth)
else:
if depth == 0 or not match_lengths:
return None
else:
return sorted(match_lengths, key=lambda x: -x[1])[0][0]
def map_colors(arr, crange, cmap, hex=True):
"""
Maps an array of values to RGB hex strings, given
a color range and colormap.
"""
if crange:
cmin, cmax = crange
else:
cmin, cmax = np.nanmin(arr), np.nanmax(arr)
arr = (arr - cmin) / (cmax-cmin)
arr = np.ma.array(arr, mask=np.logical_not(np.isfinite(arr)))
arr = cmap(arr)
if hex:
arr *= 255
return ["#{0:02x}{1:02x}{2:02x}".format(*(int(v) for v in c[:-1]))
for c in arr]
else:
return arr
| 1 | 14,548 | Not sure how this import relates to the PR but seems like a good idea anyway... | holoviz-holoviews | py |
@@ -5,6 +5,7 @@
#include "../CustomBuild/ESPEasyLimits.h"
#include "../DataStructs/DeviceStruct.h"
#include "../../ESPEasy_common.h"
+#include "../WebServer/HardwarePage.h"
template<unsigned int N_TASKS>
SettingsStruct_tmpl<N_TASKS>::SettingsStruct_tmpl() : ResetFactoryDefaultPreference(0) { | 1 | #include "../DataStructs/SettingsStruct.h"
#include "../Globals/Plugins.h"
#include "../Globals/CPlugins.h"
#include "../CustomBuild/ESPEasyLimits.h"
#include "../DataStructs/DeviceStruct.h"
#include "../../ESPEasy_common.h"
template<unsigned int N_TASKS>
SettingsStruct_tmpl<N_TASKS>::SettingsStruct_tmpl() : ResetFactoryDefaultPreference(0) {
clearAll();
clearNetworkSettings();
}
// VariousBits1 defaults to 0, keep in mind when adding bit lookups.
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::appendUnitToHostname() const {
return !bitRead(VariousBits1, 1);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::appendUnitToHostname(bool value) {
bitWrite(VariousBits1, 1, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::uniqueMQTTclientIdReconnect_unused() const {
return bitRead(VariousBits1, 2);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::uniqueMQTTclientIdReconnect_unused(bool value) {
bitWrite(VariousBits1, 2, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::OldRulesEngine() const {
#ifdef WEBSERVER_NEW_RULES
return !bitRead(VariousBits1, 3);
#else
return true;
#endif
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::OldRulesEngine(bool value) {
bitWrite(VariousBits1, 3, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::ForceWiFi_bg_mode() const {
return bitRead(VariousBits1, 4);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::ForceWiFi_bg_mode(bool value) {
bitWrite(VariousBits1, 4, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::WiFiRestart_connection_lost() const {
return bitRead(VariousBits1, 5);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::WiFiRestart_connection_lost(bool value) {
bitWrite(VariousBits1, 5, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::EcoPowerMode() const {
return bitRead(VariousBits1, 6);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::EcoPowerMode(bool value) {
bitWrite(VariousBits1, 6, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::WifiNoneSleep() const {
return bitRead(VariousBits1, 7);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::WifiNoneSleep(bool value) {
bitWrite(VariousBits1, 7, value);
}
// Enable send gratuitous ARP by default, so invert the values (default = 0)
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::gratuitousARP() const {
return !bitRead(VariousBits1, 8);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::gratuitousARP(bool value) {
bitWrite(VariousBits1, 8, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::TolerantLastArgParse() const {
return bitRead(VariousBits1, 9);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::TolerantLastArgParse(bool value) {
bitWrite(VariousBits1, 9, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::SendToHttp_ack() const {
return bitRead(VariousBits1, 10);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::SendToHttp_ack(bool value) {
bitWrite(VariousBits1, 10, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseESPEasyNow() const {
return bitRead(VariousBits1, 11);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseESPEasyNow(bool value) {
bitWrite(VariousBits1, 11, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::IncludeHiddenSSID() const {
return bitRead(VariousBits1, 12);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::IncludeHiddenSSID(bool value) {
bitWrite(VariousBits1, 12, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseMaxTXpowerForSending() const {
return bitRead(VariousBits1, 13);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseMaxTXpowerForSending(bool value) {
bitWrite(VariousBits1, 13, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::ApDontForceSetup() const {
return bitRead(VariousBits1, 14);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::ApDontForceSetup(bool value) {
bitWrite(VariousBits1, 14, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::PeriodicalScanWiFi() const {
// Invert to enable it by default
return !bitRead(VariousBits1, 15);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::PeriodicalScanWiFi(bool value) {
// Invert to enable it by default
bitWrite(VariousBits1, 15, !value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::JSONBoolWithoutQuotes() const {
return bitRead(VariousBits1, 16);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::JSONBoolWithoutQuotes(bool value) {
bitWrite(VariousBits1, 16, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::CombineTaskValues_SingleEvent(taskIndex_t taskIndex) const {
if (validTaskIndex(taskIndex)) {
return bitRead(TaskDeviceSendDataFlags[taskIndex], 0);
}
return false;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::CombineTaskValues_SingleEvent(taskIndex_t taskIndex, bool value) {
if (validTaskIndex(taskIndex)) {
bitWrite(TaskDeviceSendDataFlags[taskIndex], 0, value);
}
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::DoNotStartAP() const {
return bitRead(VariousBits1, 17);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::DoNotStartAP(bool value) {
bitWrite(VariousBits1, 17, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseAlternativeDeepSleep() const {
return bitRead(VariousBits1, 18);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseAlternativeDeepSleep(bool value) {
bitWrite(VariousBits1, 18, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseLastWiFiFromRTC() const {
return bitRead(VariousBits1, 19);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseLastWiFiFromRTC(bool value) {
bitWrite(VariousBits1, 19, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::EnableTimingStats() const {
return bitRead(VariousBits1, 20);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::EnableTimingStats(bool value) {
bitWrite(VariousBits1, 20, value);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::AllowTaskValueSetAllPlugins() const {
return bitRead(VariousBits1, 21);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::AllowTaskValueSetAllPlugins(bool value) {
bitWrite(VariousBits1, 21, value);
}
template<unsigned int N_TASKS>
ExtTimeSource_e SettingsStruct_tmpl<N_TASKS>::ExtTimeSource() const {
return static_cast<ExtTimeSource_e>(ExternalTimeSource >> 1);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::ExtTimeSource(ExtTimeSource_e value) {
uint8_t newValue = static_cast<uint8_t>(value) << 1;
if (UseNTP()) {
newValue += 1;
}
ExternalTimeSource = newValue;
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::UseNTP() const {
return bitRead(ExternalTimeSource, 0);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::UseNTP(bool value) {
bitWrite(ExternalTimeSource, 0, value);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::validate() {
if (UDPPort > 65535) { UDPPort = 0; }
if ((Latitude < -90.0f) || (Latitude > 90.0f)) { Latitude = 0.0f; }
if ((Longitude < -180.0f) || (Longitude > 180.0f)) { Longitude = 0.0f; }
if (VariousBits1 > (1 << 30)) { VariousBits1 = 0; }
ZERO_TERMINATE(Name);
ZERO_TERMINATE(NTPHost);
if ((I2C_clockSpeed == 0) || (I2C_clockSpeed > 3400000)) { I2C_clockSpeed = DEFAULT_I2C_CLOCK_SPEED; }
if (WebserverPort == 0) { WebserverPort = 80;}
if (SyslogPort == 0) { SyslogPort = 514; }
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::networkSettingsEmpty() const {
return IP[0] == 0 && Gateway[0] == 0 && Subnet[0] == 0 && DNS[0] == 0;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearNetworkSettings() {
for (uint8_t i = 0; i < 4; ++i) {
IP[i] = 0;
Gateway[i] = 0;
Subnet[i] = 0;
DNS[i] = 0;
ETH_IP[i] = 0;
ETH_Gateway[i] = 0;
ETH_Subnet[i] = 0;
ETH_DNS[i] = 0;
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearTimeSettings() {
ExternalTimeSource = 0;
ZERO_FILL(NTPHost);
TimeZone = 0;
DST = false;
DST_Start = 0;
DST_End = 0;
Latitude = 0.0f;
Longitude = 0.0f;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearNotifications() {
for (uint8_t i = 0; i < NOTIFICATION_MAX; ++i) {
Notification[i] = 0;
NotificationEnabled[i] = false;
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearControllers() {
for (controllerIndex_t i = 0; i < CONTROLLER_MAX; ++i) {
Protocol[i] = 0;
ControllerEnabled[i] = false;
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearTasks() {
for (taskIndex_t task = 0; task < N_TASKS; ++task) {
clearTask(task);
}
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearLogSettings() {
SyslogLevel = 0;
SerialLogLevel = 0;
WebLogLevel = 0;
SDLogLevel = 0;
SyslogFacility = DEFAULT_SYSLOG_FACILITY;
for (uint8_t i = 0; i < 4; ++i) { Syslog_IP[i] = 0; }
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearUnitNameSettings() {
Unit = 0;
ZERO_FILL(Name);
UDPPort = 0;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearMisc() {
PID = 0;
Version = 0;
Build = 0;
IP_Octet = 0;
Delay = 0;
Pin_i2c_sda = DEFAULT_PIN_I2C_SDA;
Pin_i2c_scl = DEFAULT_PIN_I2C_SCL;
Pin_status_led = DEFAULT_PIN_STATUS_LED;
Pin_sd_cs = -1;
ETH_Phy_Addr = DEFAULT_ETH_PHY_ADDR;
ETH_Pin_mdc = DEFAULT_ETH_PIN_MDC;
ETH_Pin_mdio = DEFAULT_ETH_PIN_MDIO;
ETH_Pin_power = DEFAULT_ETH_PIN_POWER;
ETH_Phy_Type = DEFAULT_ETH_PHY_TYPE;
ETH_Clock_Mode = DEFAULT_ETH_CLOCK_MODE;
NetworkMedium = DEFAULT_NETWORK_MEDIUM;
I2C_clockSpeed_Slow = DEFAULT_I2C_CLOCK_SPEED_SLOW;
I2C_Multiplexer_Type = I2C_MULTIPLEXER_NONE;
I2C_Multiplexer_Addr = -1;
for (taskIndex_t x = 0; x < TASKS_MAX; x++) {
I2C_Multiplexer_Channel[x] = -1;
}
I2C_Multiplexer_ResetPin = -1;
{
// Here we initialize all data to 0, so this is the ONLY reason why PinBootStates
// can now be directly accessed.
// In all other use cases, use the get and set functions for it.
constexpr uint8_t maxStates = sizeof(PinBootStates) / sizeof(PinBootStates[0]);
for (uint8_t i = 0; i < maxStates; ++i) {
PinBootStates[i] = 0;
}
#ifdef ESP32
constexpr uint8_t maxStatesesp32 = sizeof(PinBootStates_ESP32) / sizeof(PinBootStates_ESP32[0]);
for (uint8_t i = 0; i < maxStatesesp32; ++i) {
PinBootStates_ESP32[i] = 0;
}
#endif
}
BaudRate = 0;
MessageDelay_unused = 0;
deepSleep_wakeTime = 0;
CustomCSS = false;
WDI2CAddress = 0;
UseRules = false;
UseSerial = true;
UseSSDP = false;
WireClockStretchLimit = 0;
I2C_clockSpeed = 400000;
WebserverPort = 80;
SyslogPort = 514;
GlobalSync = false;
ConnectionFailuresThreshold = 0;
MQTTRetainFlag_unused = false;
InitSPI = DEFAULT_SPI;
Pin_status_led_Inversed = false;
deepSleepOnFail = false;
UseValueLogger = false;
ArduinoOTAEnable = false;
UseRTOSMultitasking = false;
Pin_Reset = -1;
StructSize = sizeof(SettingsStruct_tmpl<N_TASKS>);
MQTTUseUnitNameAsClientId_unused = 0;
VariousBits1 = 0;
OldRulesEngine(DEFAULT_RULES_OLDENGINE);
ForceWiFi_bg_mode(DEFAULT_WIFI_FORCE_BG_MODE);
WiFiRestart_connection_lost(DEFAULT_WIFI_RESTART_WIFI_CONN_LOST);
EcoPowerMode(DEFAULT_ECO_MODE);
WifiNoneSleep(DEFAULT_WIFI_NONE_SLEEP);
gratuitousARP(DEFAULT_GRATUITOUS_ARP);
TolerantLastArgParse(DEFAULT_TOLERANT_LAST_ARG_PARSE);
SendToHttp_ack(DEFAULT_SEND_TO_HTTP_ACK);
ApDontForceSetup(DEFAULT_AP_DONT_FORCE_SETUP);
DoNotStartAP(DEFAULT_DONT_ALLOW_START_AP);
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearAll() {
clearMisc();
clearTimeSettings();
clearNetworkSettings();
clearNotifications();
clearControllers();
clearTasks();
clearLogSettings();
clearUnitNameSettings();
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::clearTask(taskIndex_t task) {
if (task >= N_TASKS) { return; }
for (controllerIndex_t i = 0; i < CONTROLLER_MAX; ++i) {
TaskDeviceID[i][task] = 0;
TaskDeviceSendData[i][task] = false;
}
TaskDeviceNumber[task] = 0;
OLD_TaskDeviceID[task] = 0; // UNUSED: this can be removed
TaskDevicePin1[task] = -1;
TaskDevicePin2[task] = -1;
TaskDevicePin3[task] = -1;
TaskDevicePort[task] = 0;
TaskDevicePin1PullUp[task] = false;
for (uint8_t cv = 0; cv < PLUGIN_CONFIGVAR_MAX; ++cv) {
TaskDevicePluginConfig[task][cv] = 0;
}
TaskDevicePin1Inversed[task] = false;
for (uint8_t cv = 0; cv < PLUGIN_CONFIGFLOATVAR_MAX; ++cv) {
TaskDevicePluginConfigFloat[task][cv] = 0.0f;
}
for (uint8_t cv = 0; cv < PLUGIN_CONFIGLONGVAR_MAX; ++cv) {
TaskDevicePluginConfigLong[task][cv] = 0;
}
TaskDeviceSendDataFlags[task] = 0;
OLD_TaskDeviceGlobalSync[task]= 0;
TaskDeviceDataFeed[task] = 0;
TaskDeviceTimer[task] = 0;
TaskDeviceEnabled[task] = false;
I2C_Multiplexer_Channel[task] = -1;
}
template<unsigned int N_TASKS>
String SettingsStruct_tmpl<N_TASKS>::getHostname() const {
return this->getHostname(this->appendUnitToHostname());
}
template<unsigned int N_TASKS>
String SettingsStruct_tmpl<N_TASKS>::getHostname(bool appendUnit) const {
String hostname = this->Name;
if ((this->Unit != 0) && appendUnit) { // only append non-zero unit number
hostname += '_';
hostname += this->Unit;
}
return hostname;
}
template<unsigned int N_TASKS>
PinBootState SettingsStruct_tmpl<N_TASKS>::getPinBootState(uint8_t gpio_pin) const {
constexpr uint8_t maxStates = sizeof(PinBootStates) / sizeof(PinBootStates[0]);
if (gpio_pin < maxStates) {
return static_cast<PinBootState>(PinBootStates[gpio_pin]);
}
#ifdef ESP32
constexpr uint8_t maxStatesesp32 = sizeof(PinBootStates_ESP32) / sizeof(PinBootStates_ESP32[0]);
const uint8_t addr = gpio_pin - maxStates;
if (addr < maxStatesesp32) {
return static_cast<PinBootState>(PinBootStates_ESP32[addr]);
}
#endif
return PinBootState::Default_state;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::setPinBootState(uint8_t gpio_pin, PinBootState state) {
constexpr uint8_t maxStates = sizeof(PinBootStates) / sizeof(PinBootStates[0]);
if (gpio_pin < maxStates) {
PinBootStates[gpio_pin] = static_cast<int8_t>(state);
}
#ifdef ESP32
constexpr uint8_t maxStatesesp32 = sizeof(PinBootStates_ESP32) / sizeof(PinBootStates_ESP32[0]);
const uint8_t addr = gpio_pin - maxStates;
if (addr < maxStatesesp32) {
PinBootStates_ESP32[addr] = static_cast<int8_t>(state);
}
#endif
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::getSPI_pins(int8_t spi_gpios[3]) const {
spi_gpios[0] = -1;
spi_gpios[1] = -1;
spi_gpios[2] = -1;
if (InitSPI > 0) {
# ifdef ESP32
switch (InitSPI) {
case 1:
{
spi_gpios[0] = 18; spi_gpios[1] = 19; spi_gpios[2] = 23;
break;
}
case 2:
{
spi_gpios[0] = 14; // HSPI_SCLK
spi_gpios[1] = 12; // HSPI_MISO
spi_gpios[2] = 13; // HSPI_MOSI
break;
}
default:
return false;
}
# endif // ifdef ESP32
# ifdef ESP8266
spi_gpios[0] = 14; spi_gpios[1] = 12; spi_gpios[2] = 13;
# endif // ifdef ESP8266
return true;
}
return false;
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::isSPI_pin(int8_t pin) const {
if (pin < 0) return false;
int8_t spi_gpios[3];
if (getSPI_pins(spi_gpios)) {
for (uint8_t i = 0; i < 3; ++i) {
if (spi_gpios[i] == pin) return true;
}
}
return false;
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::isI2C_pin(int8_t pin) const {
if (pin < 0) return false;
return Pin_i2c_sda == pin || Pin_i2c_scl == pin;
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::isI2CEnabled() const {
return (Pin_i2c_sda != -1) &&
(Pin_i2c_scl != -1) &&
(I2C_clockSpeed > 0) &&
(I2C_clockSpeed_Slow > 0);
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::isEthernetPin(int8_t pin) const {
#ifdef HAS_ETHERNET
if (pin < 0) return false;
if (NetworkMedium == NetworkMedium_t::Ethernet) {
if (19 == pin) return true; // ETH TXD0
if (21 == pin) return true; // ETH TX EN
if (22 == pin) return true; // ETH TXD1
if (25 == pin) return true; // ETH RXD0
if (26 == pin) return true; // ETH RXD1
if (27 == pin) return true; // ETH CRS_DV
}
#endif
return false;
}
template<unsigned int N_TASKS>
bool SettingsStruct_tmpl<N_TASKS>::isEthernetPinOptional(int8_t pin) const {
#ifdef HAS_ETHERNET
if (pin < 0) return false;
if (NetworkMedium == NetworkMedium_t::Ethernet) {
if (ETH_Pin_mdc == pin) return true;
if (ETH_Pin_mdio == pin) return true;
if (ETH_Pin_power == pin) return true;
}
#endif
return false;
}
template<unsigned int N_TASKS>
int8_t SettingsStruct_tmpl<N_TASKS>::getTaskDevicePin(taskIndex_t taskIndex, uint8_t pinnr) const {
if (validTaskIndex(taskIndex)) {
switch(pinnr) {
case 1: return TaskDevicePin1[taskIndex];
case 2: return TaskDevicePin2[taskIndex];
case 3: return TaskDevicePin3[taskIndex];
}
}
return -1;
}
template<unsigned int N_TASKS>
float SettingsStruct_tmpl<N_TASKS>::getWiFi_TX_power() const {
return WiFi_TX_power / 4.0f;
}
template<unsigned int N_TASKS>
void SettingsStruct_tmpl<N_TASKS>::setWiFi_TX_power(float dBm) {
WiFi_TX_power = dBm * 4.0f;
}
| 1 | 22,639 | Hmm this is strange, why should the SettingsStruct import something from `WebServer` ? If that's really needed, then we must move stuff as it makes no sense to have some code related to viewing things included in a settings struct. It should be the other way around. | letscontrolit-ESPEasy | cpp |
@@ -4,13 +4,16 @@
using System;
using Microsoft.AspNet.Builder;
using Microsoft.AspNet.Http;
+using Microsoft.Framework.Logging;
namespace SampleApp
{
public class Startup
{
- public void Configure(IApplicationBuilder app)
+ public void Configure(IApplicationBuilder app, ILoggerFactory loggerFactory)
{
+ loggerFactory.MinimumLevel = LogLevel.Debug;
+ loggerFactory.AddConsole(LogLevel.Debug);
app.Run(context =>
{
Console.WriteLine("{0} {1}{2}{3}", | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using Microsoft.AspNet.Builder;
using Microsoft.AspNet.Http;
namespace SampleApp
{
public class Startup
{
public void Configure(IApplicationBuilder app)
{
app.Run(context =>
{
Console.WriteLine("{0} {1}{2}{3}",
context.Request.Method,
context.Request.PathBase,
context.Request.Path,
context.Request.QueryString);
context.Response.ContentLength = 11;
context.Response.ContentType = "text/plain";
return context.Response.WriteAsync("Hello world");
});
}
}
}
| 1 | 5,748 | nit: Space between these lines. | aspnet-KestrelHttpServer | .cs |
@@ -353,7 +353,7 @@ class ResultsProvider(object):
:type listeners: list[AggregatorListener]
"""
- def __init__(self):
+ def __init__(self, translator=None):
super(ResultsProvider, self).__init__()
self.cumulative = BetterDict()
self.track_percentiles = [] | 1 | """
Aggregating results into DataPoints
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import math
import re
from abc import abstractmethod
from collections import Counter
from bzt.engine import EngineModule
from bzt.six import iteritems
from bzt.utils import BetterDict, dehumanize_time
class KPISet(BetterDict):
"""
Main entity in results, contains all KPIs for single label,
capable of merging other KPISet's into it to compose cumulative results
"""
ERRORS = "errors"
SAMPLE_COUNT = "throughput"
CONCURRENCY = "concurrency"
SUCCESSES = "succ"
FAILURES = "fail"
RESP_TIMES = "rt"
AVG_RESP_TIME = "avg_rt"
STDEV_RESP_TIME = "stdev_rt"
AVG_LATENCY = "avg_lt"
AVG_CONN_TIME = "avg_ct"
PERCENTILES = "perc"
RESP_CODES = "rc"
ERRTYPE_ERROR = 0
ERRTYPE_ASSERT = 1
def __init__(self, perc_levels=()):
super(KPISet, self).__init__()
self.sum_rt = 0
self.sum_lt = 0
self.sum_cn = 0
self.perc_levels = perc_levels
# scalars
self.get(self.SAMPLE_COUNT, 0)
self.get(self.CONCURRENCY, 0)
self.get(self.SUCCESSES, 0)
self.get(self.FAILURES, 0)
self.get(self.AVG_RESP_TIME, 0)
self.get(self.STDEV_RESP_TIME, 0)
self.get(self.AVG_LATENCY, 0)
self.get(self.AVG_CONN_TIME, 0)
# vectors
self.get(self.ERRORS, [])
self.get(self.RESP_TIMES, Counter())
self.get(self.RESP_CODES, Counter())
self.get(self.PERCENTILES)
self._concurrencies = BetterDict() # NOTE: shouldn't it be Counter?
def __deepcopy__(self, memo):
mycopy = KPISet(self.perc_levels)
mycopy.sum_rt = self.sum_rt
mycopy.sum_lt = self.sum_lt
mycopy.sum_cn = self.sum_cn
for key, val in iteritems(self):
mycopy[key] = copy.deepcopy(val, memo)
return mycopy
@staticmethod
def error_item_skel(error, ret_c, cnt, errtype, urls):
"""
:type error: str
:type ret_c: str
:type cnt: int
:type errtype: int
:type urls: Counter
:rtype: dict
"""
return {
"cnt": cnt,
"msg": error,
"rc": ret_c,
"type": errtype,
"urls": urls
}
def add_sample(self, sample):
"""
Add sample, consisting of: cnc, rt, cn, lt, rc, error, trname
:type sample: tuple
"""
# TODO: introduce a flag to not count failed in resp times? or offer it always?
cnc, r_time, con_time, latency, r_code, error, trname = sample
self[self.SAMPLE_COUNT] += 1
if cnc:
self._concurrencies[trname] = cnc
if r_code is not None:
self[self.RESP_CODES][r_code] += 1
# count times only if we have RCs
if con_time:
self.sum_cn += con_time
self.sum_lt += latency
self.sum_rt += r_time
if error is not None:
self[self.FAILURES] += 1
item = self.error_item_skel(error, r_code, 1, KPISet.ERRTYPE_ERROR, Counter())
self.inc_list(self[self.ERRORS], ("msg", error), item)
else:
self[self.SUCCESSES] += 1
self[self.RESP_TIMES][r_time] += 1
# TODO: max/min rt? there is percentiles...
# TODO: throughput if interval is not 1s
@staticmethod
def inc_list(values, selector, value):
"""
Increment list item, based on selector criteria
:param values: list to update
:type values: list
:param selector: tuple of 2 values, field name and value to match
:type selector: tuple
:param value: dict to put into list
:type value: dict
"""
found = False
for item in values:
if item[selector[0]] == selector[1]:
item['cnt'] += value['cnt']
item['urls'] += value['urls']
found = True
break
if not found:
values.append(value)
def recalculate(self):
"""
Recalculate averages, stdev and percentiles
:return:
"""
if self[self.SAMPLE_COUNT]:
self[self.AVG_CONN_TIME] = self.sum_cn / self[self.SAMPLE_COUNT]
self[self.AVG_LATENCY] = self.sum_lt / self[self.SAMPLE_COUNT]
self[self.AVG_RESP_TIME] = self.sum_rt / self[self.SAMPLE_COUNT]
if len(self._concurrencies):
self[self.CONCURRENCY] = sum(self._concurrencies.values())
perc, stdev = self.__perc_and_stdev(self[self.RESP_TIMES], self.perc_levels, self[self.AVG_RESP_TIME])
for level, val in perc:
self[self.PERCENTILES][str(float(level))] = val
self[self.STDEV_RESP_TIME] = stdev
return self
def merge_kpis(self, src, sid=None):
"""
Merge other instance into self
:param sid: source ID to use when suming up concurrency
:type src: KPISet
:return:
"""
src.recalculate()
self.sum_cn += src.sum_cn
self.sum_lt += src.sum_lt
self.sum_rt += src.sum_rt
self[self.SAMPLE_COUNT] += src[self.SAMPLE_COUNT]
self[self.SUCCESSES] += src[self.SUCCESSES]
self[self.FAILURES] += src[self.FAILURES]
# NOTE: should it be average? mind the timestamp gaps
if src[self.CONCURRENCY]:
self._concurrencies[sid] = src[self.CONCURRENCY]
if src[self.RESP_TIMES]:
# using raw times to calculate percentiles
self[self.RESP_TIMES].update(src[self.RESP_TIMES])
elif not self[self.PERCENTILES]:
# using existing percentiles
# FIXME: it's not valid to overwrite, better take average
self[self.PERCENTILES] = copy.deepcopy(src[self.PERCENTILES])
self[self.RESP_CODES].update(src[self.RESP_CODES])
for src_item in src[self.ERRORS]:
self.inc_list(self[self.ERRORS], ('msg', src_item['msg']), src_item)
@staticmethod
def from_dict(obj):
"""
:type obj: dict
:rtype: KPISet
"""
inst = KPISet()
for key, val in iteritems(obj):
inst[key] = val
inst.sum_cn = obj[inst.AVG_CONN_TIME] * obj[inst.SAMPLE_COUNT]
inst.sum_lt = obj[inst.AVG_LATENCY] * obj[inst.SAMPLE_COUNT]
inst.sum_rt = obj[inst.AVG_RESP_TIME] * obj[inst.SAMPLE_COUNT]
inst.perc_levels = [float(x) for x in inst[inst.PERCENTILES].keys()]
inst[inst.RESP_TIMES] = {float(level): inst[inst.RESP_TIMES][level] for level in inst[inst.RESP_TIMES].keys()}
for error in inst[KPISet.ERRORS]:
error['urls'] = Counter(error['urls'])
return inst
@staticmethod
def __perc_and_stdev(cnts_dict, percentiles_to_calc=(), avg=0):
"""
from http://stackoverflow.com/questions/25070086/percentiles-from-counts-of-values
Returns [(percentile, value)] with nearest rank percentiles.
Percentile 0: <min_value>, 100: <max_value>.
cnts_dict: { <value>: <count> }
percentiles_to_calc: iterable for percentiles to calculate; 0 <= ~ <= 100
upd: added stdev calc to have it in single-pass for mans of efficiency
:type percentiles_to_calc: list(float)
:type cnts_dict: collections.Counter
"""
assert all(0 <= percentile <= 100 for percentile in percentiles_to_calc)
percentiles = []
if not cnts_dict:
return percentiles, 0
num = sum(cnts_dict.values())
cnts = sorted(cnts_dict.items())
curr_cnts_pos = 0 # current position in cnts
curr_pos = cnts[0][1] # sum of freqs up to current_cnts_pos
sqr_diffs = 0
for percentile in sorted(percentiles_to_calc):
if percentile < 100:
percentile_pos = percentile / 100.0 * num
while curr_pos <= percentile_pos and curr_cnts_pos < len(cnts):
sqr_diffs += cnts[curr_cnts_pos][1] * math.pow(cnts[curr_cnts_pos][0] - avg, 2)
curr_cnts_pos += 1
curr_pos += cnts[curr_cnts_pos][1]
percentiles.append((percentile, cnts[curr_cnts_pos][0]))
else:
percentiles.append((percentile, cnts[-1][0])) # we could add a small value
while curr_cnts_pos < len(cnts):
sqr_diffs += cnts[curr_cnts_pos][1] * math.pow(cnts[curr_cnts_pos][0] - avg, 2)
curr_cnts_pos += 1
stdev = math.sqrt(sqr_diffs / len(cnts))
return percentiles, stdev
class DataPoint(BetterDict):
"""
Represents an aggregate data poing
:param ts: timestamp of this point
"""
SOURCE_ID = 'id'
TIMESTAMP = "ts"
CURRENT = "current"
CUMULATIVE = "cumulative"
SUBRESULTS = "subresults"
def __init__(self, ts, perc_levels=()):
"""
:type ts: int
:type perc_levels: list[float]
"""
super(DataPoint, self).__init__()
self.perc_levels = perc_levels
self[self.SOURCE_ID] = None
self[self.TIMESTAMP] = ts
self[self.CUMULATIVE] = BetterDict()
self[self.CURRENT] = BetterDict()
self[self.SUBRESULTS] = []
def __deepcopy__(self, memo):
new = DataPoint(self[self.TIMESTAMP], self.perc_levels)
for key in self.keys():
new[key] = copy.deepcopy(self[key])
return new
def __merge_kpis(self, src, dst, sid):
"""
:param src: KPISet
:param dst: KPISet
:param sid: int
:return:
"""
for label, val in iteritems(src):
dest = dst.get(label, KPISet(self.perc_levels))
if not isinstance(val, KPISet):
val = KPISet.from_dict(val)
val.perc_levels = self.perc_levels
dest.merge_kpis(val, sid)
def recalculate(self):
"""
Recalculate all KPISet's
"""
for val in self[self.CURRENT].values():
val.recalculate()
for val in self[self.CUMULATIVE].values():
val.recalculate()
def merge_point(self, src):
"""
:type src: DataPoint
"""
if self[self.TIMESTAMP] != src[self.TIMESTAMP]:
self.log.warning("Tried to merge data for %s and %s", self[self.TIMESTAMP], src[self.TIMESTAMP])
raise ValueError("Cannot merge different timestamps")
self[DataPoint.SUBRESULTS].append(src)
self.__merge_kpis(src[self.CURRENT], self[self.CURRENT], src[DataPoint.SOURCE_ID])
self.__merge_kpis(src[self.CUMULATIVE], self[self.CUMULATIVE], src[DataPoint.SOURCE_ID])
self.recalculate()
class ResultsProvider(object):
"""
:type listeners: list[AggregatorListener]
"""
def __init__(self):
super(ResultsProvider, self).__init__()
self.cumulative = BetterDict()
self.track_percentiles = []
self.listeners = []
self.buffer_len = 2
self.min_buffer_len = 2
self.max_buffer_len = float('inf')
self.buffer_multiplier = 2
self.buffer_scale_idx = None
def add_listener(self, listener):
"""
Add aggregate results listener
:type listener: AggregatorListener
"""
self.listeners.append(listener)
def __merge_to_cumulative(self, current):
"""
Merge current KPISet to cumulative
:param current: KPISet
"""
for label, data in iteritems(current):
cumul = self.cumulative.get(label, KPISet(self.track_percentiles))
cumul.merge_kpis(data)
cumul.recalculate()
def datapoints(self, final_pass=False):
"""
Generator object that returns datapoints from the reader
:type final_pass: bool
"""
for datapoint in self._calculate_datapoints(final_pass):
current = datapoint[DataPoint.CURRENT]
self.__merge_to_cumulative(current)
datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative)
datapoint.recalculate()
for listener in self.listeners:
listener.aggregated_second(datapoint)
yield datapoint
@abstractmethod
def _calculate_datapoints(self, final_pass=False):
"""
:rtype : tuple
"""
pass
class ResultsReader(ResultsProvider):
"""
Aggregator that reads samples one by one,
supposed to be attached to every executor
"""
label_generalize_regexps = [
(re.compile(r"\b[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\b"), "U"),
(re.compile(r"\b[0-9a-fA-F]{2,}\b"), "U"),
# (re.compile(r"\b[0-9a-fA-F]{32}\b"), "U"), # implied by previous, maybe prev is too wide
(re.compile(r"\b\d{2,}\b"), "N")
]
def __init__(self, perc_levels=()):
super(ResultsReader, self).__init__()
self.generalize_labels = False
self.ignored_labels = []
self.log = logging.getLogger(self.__class__.__name__)
self.buffer = {}
self.min_timestamp = 0
self.track_percentiles = perc_levels
def __process_readers(self, final_pass=False):
"""
:param final_pass: True if in post-process stage
:return:
"""
for result in self._read(final_pass):
if result is None:
self.log.debug("No data from reader")
break
elif isinstance(result, list) or isinstance(result, tuple):
t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname = result
if label in self.ignored_labels:
continue
if t_stamp < self.min_timestamp:
self.log.debug("Putting sample %s into %s", t_stamp, self.min_timestamp)
t_stamp = self.min_timestamp
if t_stamp not in self.buffer:
self.buffer[t_stamp] = []
self.buffer[t_stamp].append((label, conc, r_time, con_time, latency, r_code, error, trname))
else:
raise ValueError("Unsupported results from reader: %s" % result)
def __aggregate_current(self, datapoint, samples):
"""
:param datapoint: DataPoint
:param samples: list of samples
:return:
"""
current = datapoint[DataPoint.CURRENT]
for sample in samples:
label, r_time, concur, con_time, latency, r_code, error, trname = sample
if label == '':
label = '[empty]'
if self.generalize_labels:
label = self.__generalize_label(label)
if label in current:
label = current[label]
else:
label = current.get(label, KPISet(self.track_percentiles))
# empty means overall
label.add_sample((r_time, concur, con_time, latency, r_code, error, trname))
overall = KPISet(self.track_percentiles)
for label in current.values():
overall.merge_kpis(label, datapoint[DataPoint.SOURCE_ID])
current[''] = overall
return current
def _calculate_datapoints(self, final_pass=False):
"""
A generator to read available datapoints
:type final_pass: bool
:rtype: DataPoint
"""
self.__process_readers(final_pass)
self.log.debug("Buffer len: %s", len(self.buffer))
if not self.buffer:
return
if self.cumulative and self.track_percentiles:
old_len = self.buffer_len
chosen_timing = self.cumulative[''][KPISet.PERCENTILES][self.buffer_scale_idx]
self.buffer_len = round(chosen_timing * self.buffer_multiplier)
self.buffer_len = max(self.min_buffer_len, self.buffer_len)
self.buffer_len = min(self.max_buffer_len, self.buffer_len)
if self.buffer_len != old_len:
self.log.info("Changed data analysis delay to %ds", self.buffer_len)
timestamps = sorted(self.buffer.keys())
while final_pass or (timestamps[-1] >= (timestamps[0] + self.buffer_len)):
timestamp = timestamps.pop(0)
self.min_timestamp = timestamp + 1
self.log.debug("Aggregating: %s", timestamp)
samples = self.buffer.pop(timestamp)
datapoint = self.__get_new_datapoint(timestamp)
self.__aggregate_current(datapoint, samples)
yield datapoint
if not timestamps:
break
def __get_new_datapoint(self, timestamp):
"""
:rtype: DataPoint
"""
point = DataPoint(timestamp, self.track_percentiles)
point[DataPoint.SOURCE_ID] = id(self)
return point
@abstractmethod
def _read(self, final_pass=False):
"""
:param final_pass: True if called from post-process stage, when reader
should report possible rests of results
:rtype: list
:return: timestamp, label, concurrency, rt, latency, rc, error
"""
yield
def __generalize_label(self, label):
for regexp, replacement in self.label_generalize_regexps:
label = regexp.sub(replacement, label)
return label
class ConsolidatingAggregator(EngineModule, ResultsProvider):
"""
:type underlings: list[bzt.modules.aggregator.ResultsProvider]
"""
# TODO: switch to underling-count-based completeness criteria
def __init__(self):
EngineModule.__init__(self)
ResultsProvider.__init__(self)
self.generalize_labels = False
self.ignored_labels = []
self.underlings = []
self.buffer = BetterDict()
def prepare(self):
"""
Read aggregation options
"""
super(ConsolidatingAggregator, self).prepare()
# make unique & sort
percentiles = self.settings.get("percentiles", self.track_percentiles)
percentiles = list(set(percentiles))
percentiles.sort()
self.track_percentiles = percentiles
self.settings['percentiles'] = percentiles
self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels)
self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels)
self.min_buffer_len = dehumanize_time(self.settings.get("min-buffer-len", self.min_buffer_len))
max_buffer_len = self.settings.get("max-buffer-len", self.max_buffer_len)
try: # for max_buffer_len == float('inf')
self.max_buffer_len = dehumanize_time(max_buffer_len)
except ValueError as verr:
if str(verr).find('inf') != -1:
self.max_buffer_len = max_buffer_len
else:
raise
self.buffer_multiplier = self.settings.get("buffer-multiplier", self.buffer_multiplier)
percentile = self.settings.get("buffer-scale-choice", 0.5)
count = len(self.track_percentiles)
if count == 1:
self.buffer_scale_idx = str(float(self.track_percentiles[0]))
if count > 1:
percentiles = [i / (count - 1.0) for i in range(count)]
distances = [abs(percentile - percentiles[i]) for i in range(count)]
index_position = distances.index(min(distances))
self.buffer_scale_idx = str(float(self.track_percentiles[index_position]))
debug_str = 'Buffer scaling setup: percentile %s from %s selected'
self.log.debug(debug_str, self.buffer_scale_idx, self.track_percentiles)
def add_underling(self, underling):
"""
Add source for aggregating
:type underling: ResultsProvider
"""
underling.track_percentiles = self.track_percentiles
if isinstance(underling, ResultsReader):
underling.ignored_labels = self.ignored_labels
underling.generalize_labels = self.generalize_labels
underling.min_buffer_len = self.min_buffer_len
underling.max_buffer_len = self.max_buffer_len
underling.buffer_multiplier = self.buffer_multiplier
underling.buffer_scale_idx = self.buffer_scale_idx
self.underlings.append(underling)
def check(self):
"""
Check if there is next aggregate data present
:rtype: bool
"""
for point in self.datapoints():
self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
return super(ConsolidatingAggregator, self).check()
def post_process(self):
"""
Process all remaining aggregate data
"""
super(ConsolidatingAggregator, self).post_process()
for point in self.datapoints(True):
self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
def _process_underlings(self, final_pass):
for underling in self.underlings:
for data in underling.datapoints(final_pass):
tstamp = data[DataPoint.TIMESTAMP]
if self.buffer:
mints = min(self.buffer.keys())
if tstamp < mints:
self.log.warning("Putting datapoint %s into %s", tstamp, mints)
data[DataPoint.TIMESTAMP] = mints
tstamp = mints
self.buffer.get(tstamp, []).append(data)
def _calculate_datapoints(self, final_pass=False):
"""
Override ResultsProvider._calculate_datapoints
"""
self._process_underlings(final_pass)
self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys())
if not self.buffer:
return
timestamps = sorted(self.buffer.keys())
while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)):
tstamp = timestamps.pop(0)
self.log.debug("Merging into %s", tstamp)
points_to_consolidate = self.buffer.pop(tstamp)
point = DataPoint(tstamp, self.track_percentiles)
for subresult in points_to_consolidate:
self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
point.merge_point(subresult)
point.recalculate()
yield point
class NoneAggregator(EngineModule, ResultsProvider):
"""
Dummy aggregator
"""
def __init__(self):
EngineModule.__init__(self)
ResultsProvider.__init__(self)
def _calculate_datapoints(self, final_pass=False):
pass
class AggregatorListener(object):
"""
Mixin for listeners of aggregator data
"""
@abstractmethod
def aggregated_second(self, data):
"""
Notification about new data point
:param data: bzt.modules.reporting.DataPoint
"""
pass
def finalize(self):
"""
This method is called at the end of run
to close open file descriptors etc.
"""
pass
| 1 | 13,874 | Why aggregator should have this translator? It's Selenium specifics for now... | Blazemeter-taurus | py |
@@ -136,6 +136,13 @@ public abstract class FieldComparator<T> {
}
}
+ /**
+ * Informs the comparator that sorting is done in reverse.
+ * This is necessary only for skipping functionality.
+ */
+ public void setReverse() {
+ }
+
/**
* Base FieldComparator class for numeric types | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
/**
* Expert: a FieldComparator compares hits so as to determine their
* sort order when collecting the top results with {@link
* TopFieldCollector}. The concrete public FieldComparator
* classes here correspond to the SortField types.
*
* <p>The document IDs passed to these methods must only
* move forwards, since they are using doc values iterators
* to retrieve sort values.</p>
*
* <p>This API is designed to achieve high performance
* sorting, by exposing a tight interaction with {@link
* FieldValueHitQueue} as it visits hits. Whenever a hit is
* competitive, it's enrolled into a virtual slot, which is
* an int ranging from 0 to numHits-1. Segment transitions are
* handled by creating a dedicated per-segment
* {@link LeafFieldComparator} which also needs to interact
* with the {@link FieldValueHitQueue} but can optimize based
* on the segment to collect.</p>
*
* <p>The following functions need to be implemented</p>
* <ul>
* <li> {@link #compare} Compare a hit at 'slot a'
* with hit 'slot b'.
*
* <li> {@link #setTopValue} This method is called by
* {@link TopFieldCollector} to notify the
* FieldComparator of the top most value, which is
* used by future calls to
* {@link LeafFieldComparator#compareTop}.
*
* <li> {@link #getLeafComparator(org.apache.lucene.index.LeafReaderContext)} Invoked
* when the search is switching to the next segment.
* You may need to update internal state of the
* comparator, for example retrieving new values from
* DocValues.
*
* <li> {@link #value} Return the sort value stored in
* the specified slot. This is only called at the end
* of the search, in order to populate {@link
* FieldDoc#fields} when returning the top results.
* </ul>
*
* @see LeafFieldComparator
* @lucene.experimental
*/
public abstract class FieldComparator<T> {
/**
* Compare hit at slot1 with hit at slot2.
*
* @param slot1 first slot to compare
* @param slot2 second slot to compare
* @return any {@code N < 0} if slot2's value is sorted after
* slot1, any {@code N > 0} if the slot2's value is sorted before
* slot1 and {@code 0} if they are equal
*/
public abstract int compare(int slot1, int slot2);
/**
* Record the top value, for future calls to {@link
* LeafFieldComparator#compareTop}. This is only called for searches that
* use searchAfter (deep paging), and is called before any
* calls to {@link #getLeafComparator(LeafReaderContext)}.
*/
public abstract void setTopValue(T value);
/**
* Return the actual value in the slot.
*
* @param slot the value
* @return value in this slot
*/
public abstract T value(int slot);
/**
* Get a per-segment {@link LeafFieldComparator} to collect the given
* {@link org.apache.lucene.index.LeafReaderContext}. All docIDs supplied to
* this {@link LeafFieldComparator} are relative to the current reader (you
* must add docBase if you need to map it to a top-level docID).
*
* @param context current reader context
* @return the comparator to use for this segment
* @throws IOException if there is a low-level IO error
*/
public abstract LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException;
/** Returns a negative integer if first is less than second,
* 0 if they are equal and a positive integer otherwise. Default
* impl to assume the type implements Comparable and
* invoke .compareTo; be sure to override this method if
* your FieldComparator's type isn't a Comparable or
* if your values may sometimes be null */
@SuppressWarnings("unchecked")
public int compareValues(T first, T second) {
if (first == null) {
if (second == null) {
return 0;
} else {
return -1;
}
} else if (second == null) {
return 1;
} else {
return ((Comparable<T>) first).compareTo(second);
}
}
/**
* Base FieldComparator class for numeric types
*/
public static abstract class NumericComparator<T extends Number> extends SimpleFieldComparator<T> {
protected final T missingValue;
protected final String field;
protected NumericDocValues currentReaderValues;
public NumericComparator(String field, T missingValue) {
this.field = field;
this.missingValue = missingValue;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
currentReaderValues = getNumericDocValues(context, field);
}
/** Retrieves the NumericDocValues for the field in this segment */
protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
return DocValues.getNumeric(context.reader(), field);
}
}
/** Parses field's values as double (using {@link
* org.apache.lucene.index.LeafReader#getNumericDocValues} and sorts by ascending value */
public static class DoubleComparator extends NumericComparator<Double> {
private final double[] values;
protected double bottom;
protected double topValue;
/**
* Creates a new comparator based on {@link Double#compare} for {@code numHits}.
* When a document has no value for the field, {@code missingValue} is substituted.
*/
public DoubleComparator(int numHits, String field, Double missingValue) {
super(field, missingValue != null ? missingValue : 0.0);
values = new double[numHits];
}
private double getValueForDoc(int doc) throws IOException {
if (currentReaderValues.advanceExact(doc)) {
return Double.longBitsToDouble(currentReaderValues.longValue());
} else {
return missingValue;
}
}
@Override
public int compare(int slot1, int slot2) {
return Double.compare(values[slot1], values[slot2]);
}
@Override
public int compareBottom(int doc) throws IOException {
return Double.compare(bottom, getValueForDoc(doc));
}
@Override
public void copy(int slot, int doc) throws IOException {
values[slot] = getValueForDoc(doc);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public void setTopValue(Double value) {
topValue = value;
}
@Override
public Double value(int slot) {
return Double.valueOf(values[slot]);
}
@Override
public int compareTop(int doc) throws IOException {
return Double.compare(topValue, getValueForDoc(doc));
}
}
/** Parses field's values as float (using {@link
* org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
public static class FloatComparator extends NumericComparator<Float> {
private final float[] values;
protected float bottom;
protected float topValue;
/**
* Creates a new comparator based on {@link Float#compare} for {@code numHits}.
* When a document has no value for the field, {@code missingValue} is substituted.
*/
public FloatComparator(int numHits, String field, Float missingValue) {
super(field, missingValue != null ? missingValue : 0.0f);
values = new float[numHits];
}
private float getValueForDoc(int doc) throws IOException {
if (currentReaderValues.advanceExact(doc)) {
return Float.intBitsToFloat((int) currentReaderValues.longValue());
} else {
return missingValue;
}
}
@Override
public int compare(int slot1, int slot2) {
return Float.compare(values[slot1], values[slot2]);
}
@Override
public int compareBottom(int doc) throws IOException {
return Float.compare(bottom, getValueForDoc(doc));
}
@Override
public void copy(int slot, int doc) throws IOException {
values[slot] = getValueForDoc(doc);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public void setTopValue(Float value) {
topValue = value;
}
@Override
public Float value(int slot) {
return Float.valueOf(values[slot]);
}
@Override
public int compareTop(int doc) throws IOException {
return Float.compare(topValue, getValueForDoc(doc));
}
}
/** Parses field's values as int (using {@link
* org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
public static class IntComparator extends NumericComparator<Integer> {
private final int[] values;
protected int bottom; // Value of bottom of queue
protected int topValue;
/**
* Creates a new comparator based on {@link Integer#compare} for {@code numHits}.
* When a document has no value for the field, {@code missingValue} is substituted.
*/
public IntComparator(int numHits, String field, Integer missingValue) {
super(field, missingValue != null ? missingValue : 0);
//System.out.println("IntComparator.init");
//new Throwable().printStackTrace(System.out);
values = new int[numHits];
}
private int getValueForDoc(int doc) throws IOException {
if (currentReaderValues.advanceExact(doc)) {
return (int) currentReaderValues.longValue();
} else {
return missingValue;
}
}
@Override
public int compare(int slot1, int slot2) {
return Integer.compare(values[slot1], values[slot2]);
}
@Override
public int compareBottom(int doc) throws IOException {
return Integer.compare(bottom, getValueForDoc(doc));
}
@Override
public void copy(int slot, int doc) throws IOException {
values[slot] = getValueForDoc(doc);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public void setTopValue(Integer value) {
topValue = value;
}
@Override
public Integer value(int slot) {
return Integer.valueOf(values[slot]);
}
@Override
public int compareTop(int doc) throws IOException {
return Integer.compare(topValue, getValueForDoc(doc));
}
}
/** Parses field's values as long (using {@link
* org.apache.lucene.index.LeafReader#getNumericDocValues(String)} and sorts by ascending value */
public static class LongComparator extends NumericComparator<Long> {
private final long[] values;
protected long bottom;
protected long topValue;
/**
* Creates a new comparator based on {@link Long#compare} for {@code numHits}.
* When a document has no value for the field, {@code missingValue} is substituted.
*/
public LongComparator(int numHits, String field, Long missingValue) {
super(field, missingValue != null ? missingValue : 0L);
values = new long[numHits];
}
private long getValueForDoc(int doc) throws IOException {
if (currentReaderValues.advanceExact(doc)) {
return currentReaderValues.longValue();
} else {
return missingValue;
}
}
@Override
public int compare(int slot1, int slot2) {
return Long.compare(values[slot1], values[slot2]);
}
@Override
public int compareBottom(int doc) throws IOException {
return Long.compare(bottom, getValueForDoc(doc));
}
@Override
public void copy(int slot, int doc) throws IOException {
values[slot] = getValueForDoc(doc);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public void setTopValue(Long value) {
topValue = value;
}
@Override
public Long value(int slot) {
return Long.valueOf(values[slot]);
}
@Override
public int compareTop(int doc) throws IOException {
return Long.compare(topValue, getValueForDoc(doc));
}
}
/** Sorts by descending relevance. NOTE: if you are
* sorting only by descending relevance and then
* secondarily by ascending docID, performance is faster
* using {@link TopScoreDocCollector} directly (which {@link
* IndexSearcher#search} uses when no {@link Sort} is
* specified). */
public static final class RelevanceComparator extends FieldComparator<Float> implements LeafFieldComparator {
private final float[] scores;
private float bottom;
private Scorable scorer;
private float topValue;
/** Creates a new comparator based on relevance for {@code numHits}. */
public RelevanceComparator(int numHits) {
scores = new float[numHits];
}
@Override
public int compare(int slot1, int slot2) {
return Float.compare(scores[slot2], scores[slot1]);
}
@Override
public int compareBottom(int doc) throws IOException {
float score = scorer.score();
assert !Float.isNaN(score);
return Float.compare(score, bottom);
}
@Override
public void copy(int slot, int doc) throws IOException {
scores[slot] = scorer.score();
assert !Float.isNaN(scores[slot]);
}
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) {
return this;
}
@Override
public void setBottom(final int bottom) {
this.bottom = scores[bottom];
}
@Override
public void setTopValue(Float value) {
topValue = value;
}
@Override
public void setScorer(Scorable scorer) {
// wrap with a ScoreCachingWrappingScorer so that successive calls to
// score() will not incur score computation over and
// over again.
if (!(scorer instanceof ScoreCachingWrappingScorer)) {
this.scorer = new ScoreCachingWrappingScorer(scorer);
} else {
this.scorer = scorer;
}
}
@Override
public Float value(int slot) {
return Float.valueOf(scores[slot]);
}
// Override because we sort reverse of natural Float order:
@Override
public int compareValues(Float first, Float second) {
// Reversed intentionally because relevance by default
// sorts descending:
return second.compareTo(first);
}
@Override
public int compareTop(int doc) throws IOException {
float docValue = scorer.score();
assert !Float.isNaN(docValue);
return Float.compare(docValue, topValue);
}
}
/** Sorts by ascending docID */
public static final class DocComparator extends FieldComparator<Integer> implements LeafFieldComparator {
private final int[] docIDs;
private int docBase;
private int bottom;
private int topValue;
/** Creates a new comparator based on document ids for {@code numHits} */
public DocComparator(int numHits) {
docIDs = new int[numHits];
}
@Override
public int compare(int slot1, int slot2) {
// No overflow risk because docIDs are non-negative
return docIDs[slot1] - docIDs[slot2];
}
@Override
public int compareBottom(int doc) {
// No overflow risk because docIDs are non-negative
return bottom - (docBase + doc);
}
@Override
public void copy(int slot, int doc) {
docIDs[slot] = docBase + doc;
}
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) {
// TODO: can we "map" our docIDs to the current
// reader? saves having to then subtract on every
// compare call
this.docBase = context.docBase;
return this;
}
@Override
public void setBottom(final int bottom) {
this.bottom = docIDs[bottom];
}
@Override
public void setTopValue(Integer value) {
topValue = value;
}
@Override
public Integer value(int slot) {
return Integer.valueOf(docIDs[slot]);
}
@Override
public int compareTop(int doc) {
int docValue = docBase + doc;
return Integer.compare(topValue, docValue);
}
@Override
public void setScorer(Scorable scorer) {}
}
/** Sorts by field's natural Term sort order, using
* ordinals. This is functionally equivalent to {@link
* org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the string
* to their relative ordinal positions (using the index
* returned by {@link org.apache.lucene.index.LeafReader#getSortedDocValues(String)}), and
* does most comparisons using the ordinals. For medium
* to large results, this comparator will be much faster
* than {@link org.apache.lucene.search.FieldComparator.TermValComparator}. For very small
* result sets it may be slower. */
public static class TermOrdValComparator extends FieldComparator<BytesRef> implements LeafFieldComparator {
/* Ords for each slot.
@lucene.internal */
final int[] ords;
/* Values for each slot.
@lucene.internal */
final BytesRef[] values;
private final BytesRefBuilder[] tempBRs;
/* Which reader last copied a value into the slot. When
we compare two slots, we just compare-by-ord if the
readerGen is the same; else we must compare the
values (slower).
@lucene.internal */
final int[] readerGen;
/* Gen of current reader we are on.
@lucene.internal */
int currentReaderGen = -1;
/* Current reader's doc ord/values.
@lucene.internal */
SortedDocValues termsIndex;
private final String field;
/* Bottom slot, or -1 if queue isn't full yet
@lucene.internal */
int bottomSlot = -1;
/* Bottom ord (same as ords[bottomSlot] once bottomSlot
is set). Cached for faster compares.
@lucene.internal */
int bottomOrd;
/* True if current bottom slot matches the current
reader.
@lucene.internal */
boolean bottomSameReader;
/* Bottom value (same as values[bottomSlot] once
bottomSlot is set). Cached for faster compares.
@lucene.internal */
BytesRef bottomValue;
/** Set by setTopValue. */
BytesRef topValue;
boolean topSameReader;
int topOrd;
/** -1 if missing values are sorted first, 1 if they are
* sorted last */
final int missingSortCmp;
/** Which ordinal to use for a missing value. */
final int missingOrd;
/** Creates this, sorting missing values first. */
public TermOrdValComparator(int numHits, String field) {
this(numHits, field, false);
}
/** Creates this, with control over how missing values
* are sorted. Pass sortMissingLast=true to put
* missing values at the end. */
public TermOrdValComparator(int numHits, String field, boolean sortMissingLast) {
ords = new int[numHits];
values = new BytesRef[numHits];
tempBRs = new BytesRefBuilder[numHits];
readerGen = new int[numHits];
this.field = field;
if (sortMissingLast) {
missingSortCmp = 1;
missingOrd = Integer.MAX_VALUE;
} else {
missingSortCmp = -1;
missingOrd = -1;
}
}
private int getOrdForDoc(int doc) throws IOException {
if (termsIndex.advanceExact(doc)) {
return termsIndex.ordValue();
} else {
return -1;
}
}
@Override
public int compare(int slot1, int slot2) {
if (readerGen[slot1] == readerGen[slot2]) {
return ords[slot1] - ords[slot2];
}
final BytesRef val1 = values[slot1];
final BytesRef val2 = values[slot2];
if (val1 == null) {
if (val2 == null) {
return 0;
}
return missingSortCmp;
} else if (val2 == null) {
return -missingSortCmp;
}
return val1.compareTo(val2);
}
@Override
public int compareBottom(int doc) throws IOException {
assert bottomSlot != -1;
int docOrd = getOrdForDoc(doc);
if (docOrd == -1) {
docOrd = missingOrd;
}
if (bottomSameReader) {
// ord is precisely comparable, even in the equal case
return bottomOrd - docOrd;
} else if (bottomOrd >= docOrd) {
// the equals case always means bottom is > doc
// (because we set bottomOrd to the lower bound in
// setBottom):
return 1;
} else {
return -1;
}
}
@Override
public void copy(int slot, int doc) throws IOException {
int ord = getOrdForDoc(doc);
if (ord == -1) {
ord = missingOrd;
values[slot] = null;
} else {
assert ord >= 0;
if (tempBRs[slot] == null) {
tempBRs[slot] = new BytesRefBuilder();
}
tempBRs[slot].copyBytes(termsIndex.lookupOrd(ord));
values[slot] = tempBRs[slot].get();
}
ords[slot] = ord;
readerGen[slot] = currentReaderGen;
}
/** Retrieves the SortedDocValues for the field in this segment */
protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException {
return DocValues.getSorted(context.reader(), field);
}
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
termsIndex = getSortedDocValues(context, field);
currentReaderGen++;
if (topValue != null) {
// Recompute topOrd/SameReader
int ord = termsIndex.lookupTerm(topValue);
if (ord >= 0) {
topSameReader = true;
topOrd = ord;
} else {
topSameReader = false;
topOrd = -ord-2;
}
} else {
topOrd = missingOrd;
topSameReader = true;
}
//System.out.println(" getLeafComparator topOrd=" + topOrd + " topSameReader=" + topSameReader);
if (bottomSlot != -1) {
// Recompute bottomOrd/SameReader
setBottom(bottomSlot);
}
return this;
}
@Override
public void setBottom(final int bottom) throws IOException {
bottomSlot = bottom;
bottomValue = values[bottomSlot];
if (currentReaderGen == readerGen[bottomSlot]) {
bottomOrd = ords[bottomSlot];
bottomSameReader = true;
} else {
if (bottomValue == null) {
// missingOrd is null for all segments
assert ords[bottomSlot] == missingOrd;
bottomOrd = missingOrd;
bottomSameReader = true;
readerGen[bottomSlot] = currentReaderGen;
} else {
final int ord = termsIndex.lookupTerm(bottomValue);
if (ord < 0) {
bottomOrd = -ord - 2;
bottomSameReader = false;
} else {
bottomOrd = ord;
// exact value match
bottomSameReader = true;
readerGen[bottomSlot] = currentReaderGen;
ords[bottomSlot] = bottomOrd;
}
}
}
}
@Override
public void setTopValue(BytesRef value) {
// null is fine: it means the last doc of the prior
// search was missing this value
topValue = value;
//System.out.println("setTopValue " + topValue);
}
@Override
public BytesRef value(int slot) {
return values[slot];
}
@Override
public int compareTop(int doc) throws IOException {
int ord = getOrdForDoc(doc);
if (ord == -1) {
ord = missingOrd;
}
if (topSameReader) {
// ord is precisely comparable, even in the equal
// case
//System.out.println("compareTop doc=" + doc + " ord=" + ord + " ret=" + (topOrd-ord));
return topOrd - ord;
} else if (ord <= topOrd) {
// the equals case always means doc is < value
// (because we set lastOrd to the lower bound)
return 1;
} else {
return -1;
}
}
@Override
public int compareValues(BytesRef val1, BytesRef val2) {
if (val1 == null) {
if (val2 == null) {
return 0;
}
return missingSortCmp;
} else if (val2 == null) {
return -missingSortCmp;
}
return val1.compareTo(val2);
}
@Override
public void setScorer(Scorable scorer) {}
}
/** Sorts by field's natural Term sort order. All
* comparisons are done using BytesRef.compareTo, which is
* slow for medium to large result sets but possibly
* very fast for very small results sets. */
public static class TermValComparator extends FieldComparator<BytesRef> implements LeafFieldComparator {
private final BytesRef[] values;
private final BytesRefBuilder[] tempBRs;
private BinaryDocValues docTerms;
private final String field;
private BytesRef bottom;
private BytesRef topValue;
private final int missingSortCmp;
/** Sole constructor. */
public TermValComparator(int numHits, String field, boolean sortMissingLast) {
values = new BytesRef[numHits];
tempBRs = new BytesRefBuilder[numHits];
this.field = field;
missingSortCmp = sortMissingLast ? 1 : -1;
}
private BytesRef getValueForDoc(int doc) throws IOException {
if (docTerms.advanceExact(doc)) {
return docTerms.binaryValue();
} else {
return null;
}
}
@Override
public int compare(int slot1, int slot2) {
final BytesRef val1 = values[slot1];
final BytesRef val2 = values[slot2];
return compareValues(val1, val2);
}
@Override
public int compareBottom(int doc) throws IOException {
final BytesRef comparableBytes = getValueForDoc(doc);
return compareValues(bottom, comparableBytes);
}
@Override
public void copy(int slot, int doc) throws IOException {
final BytesRef comparableBytes = getValueForDoc(doc);
if (comparableBytes == null) {
values[slot] = null;
} else {
if (tempBRs[slot] == null) {
tempBRs[slot] = new BytesRefBuilder();
}
tempBRs[slot].copyBytes(comparableBytes);
values[slot] = tempBRs[slot].get();
}
}
/** Retrieves the BinaryDocValues for the field in this segment */
protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String field) throws IOException {
return DocValues.getBinary(context.reader(), field);
}
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
docTerms = getBinaryDocValues(context, field);
return this;
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public void setTopValue(BytesRef value) {
// null is fine: it means the last doc of the prior
// search was missing this value
topValue = value;
}
@Override
public BytesRef value(int slot) {
return values[slot];
}
@Override
public int compareValues(BytesRef val1, BytesRef val2) {
// missing always sorts first:
if (val1 == null) {
if (val2 == null) {
return 0;
}
return missingSortCmp;
} else if (val2 == null) {
return -missingSortCmp;
}
return val1.compareTo(val2);
}
@Override
public int compareTop(int doc) throws IOException {
return compareValues(topValue, getValueForDoc(doc));
}
@Override
public void setScorer(Scorable scorer) {}
}
}
| 1 | 36,122 | I don't understand why this function is needed ? Can't you just pass the information when the `DocComparator` is created in the SortField ? | apache-lucene-solr | java |
@@ -11,5 +11,7 @@ class MailchimpFulfillmentJob < MailchimpJob
def subscribe(list_id, email)
client.list_subscribe(id: list_id, email_address: email, double_optin: false)
+ rescue Gibbon::MailChimpError => e
+ raise e unless MAILCHIMP_EMAIL_ERROR_CODES.include?(e.code)
end
end | 1 | class MailchimpFulfillmentJob < MailchimpJob
MASTER_LIST_ID = '66f4d45e54'
def perform
lists = client.lists(filters: { list_name: list_name })
subscribe(lists['data'].first['id'], email)
subscribe(MASTER_LIST_ID, email)
end
private
def subscribe(list_id, email)
client.list_subscribe(id: list_id, email_address: email, double_optin: false)
end
end
| 1 | 7,646 | This rescue is repeated exactly in `app/jobs/mailchimp_removal_job.rb`. Could we extract a method and pull it up into `MailChimpJob` for reuse? | thoughtbot-upcase | rb |
@@ -90,6 +90,9 @@ function fetchWithTimeout(input, init) {
url: xhr.responseURL,
headers: xhr.responseHeaders
};
+ if (!options.headers) {
+ options.headers = {'content-type': xhr.getResponseHeader('content-type')};
+ }
const body = 'response' in xhr ? xhr.response : xhr.responseText;
resolve(new Response(body, options));
}; | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
/* global fetch */
const AuthError = require('./errors').AuthError;
const permissionApis = require('./permission-api');
const merge = require('deepmerge');
const require_method = require;
const URL = require('url-parse');
const refreshTimers = {};
const retryInterval = 5 * 1000; // Amount of time between retrying authentication requests, if the first request failed.
const refreshBuffer = 20 * 1000; // A "safe" amount of time before a token expires that allow us to refresh it.
const refreshLowerBound = 10 * 1000; // Lower bound for refreshing tokens.
function node_require(module) {
return require_method(module);
}
function checkTypes(args, types) {
args = Array.prototype.slice.call(args);
for (var i = 0; i < types.length; ++i) {
if (args.length > i && typeof args[i] !== types[i]) {
throw new TypeError('param ' + i + ' must be of type ' + types[i]);
}
}
}
function checkObjectTypes(obj, types) {
for (const name of Object.getOwnPropertyNames(types)) {
const actualType = typeof obj[name];
let targetType = types[name];
const isOptional = targetType[targetType.length - 1] === '?';
if (isOptional) {
targetType = targetType.slice(0, -1);
}
if (!isOptional && actualType === 'undefined') {
throw new Error(`${name} is required, but a value was not provided.`);
}
if (actualType !== targetType) {
throw new TypeError(`${name} must be of type '${targetType}' but was of type '${actualType}' instead.`);
}
}
}
function normalizeSyncUrl(authUrl, syncUrl) {
const parsedAuthUrl = new URL(authUrl);
const realmProtocol = (parsedAuthUrl.protocol === "https:") ? "realms" : "realm";
// Inherit ports from the Auth url
const port = parsedAuthUrl.port ? `:${parsedAuthUrl.port}` : "";
const baseUrl = `${realmProtocol}://${parsedAuthUrl.hostname}${port}`;
if (!syncUrl) {
syncUrl = "/default";
}
return new URL(syncUrl, baseUrl, false).toString();
}
// node-fetch supports setting a timeout as a nonstandard extension, but normal fetch doesn't
function fetchWithTimeout(input, init) {
const request = new Request(input, init);
const xhr = new XMLHttpRequest();
xhr.timeout = init.timeout || 0;
return new Promise(function(resolve, reject) {
xhr.onload = () => {
const options = {
status: xhr.status,
statusText: xhr.statusText,
url: xhr.responseURL,
headers: xhr.responseHeaders
};
const body = 'response' in xhr ? xhr.response : xhr.responseText;
resolve(new Response(body, options));
};
xhr.onerror = () => reject(new TypeError('Network request failed'));
xhr.ontimeout = () => reject(new TypeError('Network request failed'));
xhr.open(request.method, request.url, true);
request.headers.forEach((value, name) => xhr.setRequestHeader(name, value));
xhr.send(typeof request._bodyInit === 'undefined' ? null : request._bodyInit);
});
}
// Perform a HTTP request, enqueuing it if too many requests are already in
// progress to avoid hammering the server.
const performFetch = (function() {
const doFetch = typeof fetch === 'undefined' ? node_require('node-fetch') : fetchWithTimeout;
const queue = [];
let count = 0;
const maxCount = 5;
const next = () => {
if (count >= maxCount) {
return;
}
const req = queue.shift();
if (!req) {
return;
}
const [url, options, resolve, reject] = req;
++count;
// node doesn't support Promise.prototype.finally until 10
doFetch(url, options)
.then(response => {
--count;
next();
resolve(response);
})
.catch(error => {
--count;
next();
reject(error);
});
};
return (url, options) => {
return new Promise((resolve, reject) => {
queue.push([url, options, resolve, reject]);
next();
});
};
})();
const url_parse = require('url-parse');
const postHeaders = {
'content-type': 'application/json;charset=utf-8',
'accept': 'application/json'
};
function append_url(server, path) {
return server + (server.charAt(server.length - 1) != '/' ? '/' : '') + path;
}
function scheduleAccessTokenRefresh(user, localRealmPath, realmUrl, expirationDate) {
let userTimers = refreshTimers[user.identity];
if (!userTimers) {
refreshTimers[user.identity] = userTimers = {};
}
// We assume that access tokens have ~ the same expiration time, so if someone already
// scheduled a refresh, it's likely to complete before the one we would have scheduled
if (!userTimers[localRealmPath]) {
const timeout = Math.max(expirationDate - Date.now() - refreshBuffer, refreshLowerBound);
userTimers[localRealmPath] = setTimeout(() => {
delete userTimers[localRealmPath];
refreshAccessToken(user, localRealmPath, realmUrl);
}, timeout);
}
}
function print_error() {
(console.error || console.log).apply(console, arguments);
}
function validateRefresh(user, localRealmPath, response, json) {
let session = user._sessionForOnDiskPath(localRealmPath);
if (!session) {
return;
}
const errorHandler = session.config.error;
if (response.status != 200) {
let error = new AuthError(json);
if (errorHandler) {
errorHandler(session, error);
} else {
print_error(`Unhandled session token refresh error for user ${user.identity} at path ${localRealmPath}`, error);
}
return;
}
if (session.state === 'invalid') {
return;
}
return session;
}
function refreshAdminToken(user, localRealmPath, realmUrl) {
const token = user.token;
const server = user.server;
// We don't need to actually refresh the token, but we need to let ROS know
// we're accessing the file and get the sync label for multiplexing
let parsedRealmUrl = url_parse(realmUrl);
const url = append_url(user.server, 'realms/files/' + encodeURIComponent(parsedRealmUrl.pathname));
performFetch(url, {method: 'GET', timeout: 10000.0, headers: {Authorization: user.token}})
.then((response) => {
// There may not be a Realm Directory Service running on the server
// we're talking to. If we're talking directly to the sync service
// we'll get a 404, and if we're running inside ROS we'll get a 503 if
// the directory service hasn't started yet (perhaps because we got
// called due to the directory service itself opening some Realms).
//
// In both of these cases we can just pretend we got a valid response.
if (response.status === 404 || response.status === 503) {
return {response: {status: 200}, json: {path: parsedRealmUrl.pathname, syncLabel: '_direct'}};
}
else {
return response.json().then((json) => { return { response, json }; });
}
})
.then((responseAndJson) => {
const response = responseAndJson.response;
const json = responseAndJson.json;
const credentials = credentialsMethods.adminToken(token)
const newUser = user.constructor.login(server, credentials);
const session = validateRefresh(newUser, localRealmPath, response, json);
if (session) {
parsedRealmUrl.set('pathname', json.path);
session._refreshAccessToken(user.token, parsedRealmUrl.href, json.syncLabel);
}
})
.catch((e) => {
print_error(e);
setTimeout(() => refreshAccessToken(user, localRealmPath, realmUrl), retryInterval);
});
}
function refreshAccessToken(user, localRealmPath, realmUrl) {
if (!user._sessionForOnDiskPath(localRealmPath)) {
// We're trying to refresh the token for a session that's closed. This could happen, for example,
// when the server is not reachable and we periodically try to refresh the token, but the user has
// already closed the Realm file.
return;
}
if (!user.server) {
throw new Error("Server for user must be specified");
}
const parsedRealmUrl = url_parse(realmUrl);
const path = parsedRealmUrl.pathname;
if (!path) {
throw new Error(`Unexpected Realm path inferred from url '${realmUrl}'. The path section of the url should be a non-empty string.`);
}
if (user.isAdminToken) {
return refreshAdminToken(user, localRealmPath, realmUrl);
}
const url = append_url(user.server, 'auth');
const options = {
method: 'POST',
body: JSON.stringify({
data: user.token,
path,
provider: 'realm',
app_id: ''
}),
headers: postHeaders,
// FIXME: This timeout appears to be necessary in order for some requests to be sent at all.
// See https://github.com/realm/realm-js-private/issues/338 for details.
timeout: 10000.0
};
const server = user.server;
const identity = user.identity;
performFetch(url, options)
.then((response) => response.json().then((json) => { return { response, json }; }))
.then((responseAndJson) => {
const response = responseAndJson.response;
const json = responseAndJson.json;
// Look up a fresh instance of the user.
// We do this because in React Native Remote Debugging
// `Realm.clearTestState()` will have invalidated the user object
let newUser = user.constructor._getExistingUser(server, identity);
if (!newUser) {
return;
}
const session = validateRefresh(newUser, localRealmPath, response, json);
if (!session) {
return;
}
const tokenData = json.access_token.token_data;
parsedRealmUrl.set('pathname', tokenData.path);
session._refreshAccessToken(json.access_token.token, parsedRealmUrl.href, tokenData.sync_label);
const errorHandler = session.config.error;
if (errorHandler && errorHandler._notifyOnAccessTokenRefreshed) {
errorHandler(session, errorHandler._notifyOnAccessTokenRefreshed)
}
const tokenExpirationDate = new Date(tokenData.expires * 1000);
scheduleAccessTokenRefresh(newUser, localRealmPath, realmUrl, tokenExpirationDate);
})
.catch((e) => {
print_error(e);
// in case something lower in the HTTP stack breaks, try again in `retryInterval` seconds
setTimeout(() => refreshAccessToken(user, localRealmPath, realmUrl), retryInterval);
})
}
/**
* The base authentication method. It fires a JSON POST to the server parameter plus the auth url
* For example, if the server parameter is `http://myapp.com`, this url will post to `http://myapp.com/auth`
* @param {object} userConstructor
* @param {string} server the http or https server url
* @param {object} json the json to post to the auth endpoint
* @param {Function} callback an optional callback with an error and user parameter
* @returns {Promise} only returns a promise if the callback parameter was omitted
*/
function _authenticate(userConstructor, server, json, retries) {
json.app_id = '';
const url = append_url(server, 'auth');
const options = {
method: 'POST',
body: JSON.stringify(json),
headers: postHeaders,
open_timeout: 5000,
timeout: 5000
};
let request = performFetch(url, options)
if (retries < 3) {
// Retry on network errors (which are different from the auth endpoint returning an error)
request = request.catch(() => {
return _authenticate(userConstructor, server, json, retries + 1);
});
}
return request.then((response) => {
const contentType = response.headers.get('Content-Type');
if (contentType.indexOf('application/json') === -1) {
return response.text().then((body) => {
throw new AuthError({
title: `Could not authenticate: Realm Object Server didn't respond with valid JSON`,
body,
});
});
} else if (!response.ok) {
return response.json().then((body) => Promise.reject(new AuthError(body)));
} else {
return response.json().then(function (body) {
// TODO: validate JSON
const token = body.refresh_token.token;
const identity = body.refresh_token.token_data.identity;
const isAdmin = body.refresh_token.token_data.is_admin;
return userConstructor.createUser(server, identity, token, false, isAdmin);
});
}
});
}
function _updateAccount(userConstructor, server, json) {
const url = append_url(server, 'auth/password/updateAccount');
const options = {
method: 'POST',
body: JSON.stringify(json),
headers: postHeaders,
open_timeout: 5000
};
return performFetch(url, options)
.then((response) => {
const contentType = response.headers.get('Content-Type');
if (contentType.indexOf('application/json') === -1) {
return response.text().then((body) => {
throw new AuthError({
title: `Could not update user account: Realm Object Server didn't respond with valid JSON`,
body,
});
});
}
if (!response.ok) {
return response.json().then((body) => Promise.reject(new AuthError(body)));
}
});
}
const credentialsMethods = {
usernamePassword(username, password, createUser) {
checkTypes(arguments, ['string', 'string', 'boolean']);
return new Credentials('password', username, { register: createUser, password });
},
facebook(token) {
checkTypes(arguments, ['string']);
return new Credentials('facebook', token);
},
google(token) {
checkTypes(arguments, ['string']);
return new Credentials('google', token);
},
anonymous() {
return new Credentials('anonymous');
},
nickname(value, isAdmin) {
checkTypes(arguments, ['string', 'boolean']);
return new Credentials('nickname', value, { is_admin: isAdmin || false });
},
azureAD(token) {
checkTypes(arguments, ['string']);
return new Credentials('azuread', token)
},
jwt(token, providerName) {
checkTypes(arguments, ['string', 'string']);
return new Credentials(providerName || 'jwt', token);
},
adminToken(token) {
checkTypes(arguments, ['string']);
return new Credentials('adminToken', token);
},
custom(providerName, token, userInfo) {
if (userInfo) {
checkTypes(arguments, ['string', 'string', 'object']);
} else {
checkTypes(arguments, ['string', 'string']);
}
return new Credentials(providerName, token, userInfo);
}
}
const staticMethods = {
get current() {
const allUsers = this.all;
const keys = Object.keys(allUsers);
if (keys.length === 0) {
return undefined;
} else if (keys.length > 1) {
throw new Error("Multiple users are logged in");
}
return allUsers[keys[0]];
},
login(server, credentials) {
if (arguments.length === 3) {
// Deprecated legacy signature.
checkTypes(arguments, ['string', 'string', 'string']);
console.warn("User.login is deprecated. Please use User.login(server, Credentials.usernamePassword(...)) instead.");
const newCredentials = credentialsMethods.usernamePassword(arguments[1], arguments[2], /* createUser */ false);
return this.login(server, newCredentials);
}
checkTypes(arguments, ['string', 'object']);
if (credentials.identityProvider === 'adminToken') {
let u = this._adminUser(server, credentials.token);
return u;
}
return _authenticate(this, server, credentials, 0);
},
deserialize(serialized) {
if (serialized.adminToken) {
checkObjectTypes(serialized, {
server: 'string',
adminToken: 'string',
});
return this._adminUser(serialized.server, serialized.adminToken);
}
checkObjectTypes(serialized, {
server: 'string',
identity: 'string',
refreshToken: 'string',
isAdmin: 'boolean',
});
return this.createUser(serialized.server, serialized.identity, serialized.refreshToken, false, serialized.isAdmin || false);
},
requestPasswordReset(server, email) {
checkTypes(arguments, ['string', 'string']);
const json = {
provider_id: email,
data: { action: 'reset_password' }
};
return _updateAccount(this, server, json);
},
completePasswordReset(server, resetToken, newPassword) {
checkTypes(arguments, ['string', 'string']);
const json = {
data: {
action: 'complete_reset',
token: resetToken,
new_password: newPassword
}
};
return _updateAccount(this, server, json);
},
requestEmailConfirmation(server, email) {
checkTypes(arguments, ['string', 'string']);
const json = {
provider_id: email,
data: { action: 'request_email_confirmation' }
};
return _updateAccount(this, server, json);
},
confirmEmail(server, confirmationToken) {
checkTypes(arguments, ['string', 'string']);
const json = {
data: {
action: 'confirm_email',
token: confirmationToken
}
};
return _updateAccount(this, server, json);
},
_refreshAccessToken: refreshAccessToken,
// Deprecated...
adminUser(token, server) {
checkTypes(arguments, ['string', 'string']);
console.warn("User.adminUser is deprecated. Please use User.login(server, Credentials.adminToken(token)) instead.");
const credentials = credentialsMethods.adminToken(token);
return this.login(server, credentials);
},
register(server, username, password) {
checkTypes(arguments, ['string', 'string', 'string']);
console.warn("User.register is deprecated. Please use User.login(server, Credentials.usernamePassword(...)) instead.");
const credentials = credentialsMethods.usernamePassword(username, password, /* createUser */ true);
return this.login(server, credentials);
},
registerWithProvider(server, options) {
checkTypes(arguments, ['string', 'object']);
console.warn("User.registerWithProvider is deprecated. Please use User.login(server, Credentials.SOME-PROVIDER(...)) instead.");
const credentials = credentialsMethods.custom(options.provider, options.providerToken, options.userInfo);
return this.login(server, credentials);
},
authenticate(server, provider, options) {
checkTypes(arguments, ['string', 'string', 'object'])
console.warn("User.authenticate is deprecated. Please use User.login(server, Credentials.SOME-PROVIDER(...)) instead.");
let credentials;
switch (provider.toLowerCase()) {
case 'jwt':
credentials = credentialsMethods.jwt(options.token, 'jwt');
break
case 'password':
credentials = credentialsMethods.usernamePassword(options.username, options.password);
break
default:
credentials = credentialsMethods.custom(provider, options.data, options.user_info || options.userInfo);
break;
}
return this.login(server, credentials);
},
};
const instanceMethods = {
logout() {
this._logout();
const userTimers = refreshTimers[this.identity];
if (userTimers) {
Object.keys(userTimers).forEach((key) => {
clearTimeout(userTimers[key]);
});
delete refreshTimers[this.identity];
}
const url = url_parse(this.server);
url.set('pathname', '/auth/revoke');
const headers = {
Authorization: this.token
};
const body = JSON.stringify({
token: this.token
});
const options = {
method: 'POST',
headers,
body: body,
open_timeout: 5000
};
return performFetch(url.href, options)
.catch((e) => print_error('An error occurred while logging out a user', e));
},
serialize() {
if (this.isAdminToken) {
return {
server: this.server,
adminToken: this.token,
}
}
return {
server: this.server,
refreshToken: this.token,
identity: this.identity,
isAdmin: this.isAdmin,
};
},
openManagementRealm() {
let url = url_parse(this.server);
if (url.protocol === 'http:') {
url.set('protocol', 'realm:');
} else if (url.protocol === 'https:') {
url.set('protocol', 'realms:');
} else {
throw new Error(`Unexpected user auth url: ${this.server}`);
}
url.set('pathname', '/~/__management');
return new this.constructor._realmConstructor({
schema: require('./management-schema'),
sync: {
user: this,
url: url.href
}
});
},
retrieveAccount(provider, provider_id) {
checkTypes(arguments, ['string', 'string']);
const url = url_parse(this.server);
url.set('pathname', `/auth/users/${provider}/${provider_id}`);
const headers = {
Authorization: this.token
};
const options = {
method: 'GET',
headers,
open_timeout: 5000
};
return performFetch(url.href, options)
.then((response) => {
if (response.status !== 200) {
return response.json()
.then(body => {
throw new AuthError(body);
});
} else {
return response.json();
}
});
},
createConfiguration(config) {
if (config && config.sync) {
if (config.sync.user && console.warn !== undefined) {
console.warn(`'user' property will be overridden by ${this.identity}`);
}
if (config.sync.partial !== undefined && config.sync.fullSynchronization !== undefined) {
throw new Error("'partial' and 'fullSynchronization' were both set. 'partial' has been deprecated, use only 'fullSynchronization'");
}
}
let defaultConfig = {
sync: {
user: this,
},
};
// Set query-based as the default setting if the user doesn't specified any other behaviour.
if (!(config && config.sync && config.sync.partial)) {
defaultConfig.sync.fullSynchronization = false;
}
// Merge default configuration with user provided config. User defined properties should aways win.
// Doing the naive merge in JS break objects that are backed by native objects, so these needs to
// be merged manually. This is currently only `sync.user`.
let mergedConfig = (config === undefined) ? defaultConfig : merge(defaultConfig, config);
mergedConfig.sync.user = this;
// Parsing the URL requires extra handling as some forms of input (e.g. relative URLS) should not completely
// override the default url.
mergedConfig.sync.url = normalizeSyncUrl(this.server, (config && config.sync) ? config.sync.url : undefined);
return mergedConfig;
},
};
class Credentials {
constructor(identityProvider, token, userInfo) {
this.identityProvider = identityProvider;
this.token = token;
this.userInfo = userInfo || {};
}
toJSON() {
return {
data: this.token,
provider: this.identityProvider,
user_info: this.userInfo,
};
}
}
// Append the permission apis
Object.assign(instanceMethods, permissionApis);
module.exports = {
static: staticMethods,
instance: instanceMethods,
credentials: credentialsMethods,
};
| 1 | 17,610 | It's not directly related to this PR, but it would be nice to flow the original error as we're going to print that if the promise is rejected and it'll be more informative than the vague `Network request failed`. Also, I'm not a js dev, so maybe I'm wrong, but throwing a TypeError feels a bit odd. | realm-realm-js | js |
@@ -107,8 +107,16 @@ noreturn static void exec_xwayland(struct wlr_xwayland_server *server) {
dup2(devnull, STDERR_FILENO);
}
+ const char *xwayland_path = getenv("WLR_XWAYLAND");
+ if (xwayland_path) {
+ wlr_log(WLR_INFO, "Using Xwayland binary to '%s' due to WLR_XWAYLAND",
+ xwayland_path);
+ } else {
+ xwayland_path = "Xwayland";
+ }
+
// This returns if and only if the call fails
- execvp("Xwayland", argv);
+ execvp(xwayland_path, argv);
wlr_log_errno(WLR_ERROR, "failed to exec Xwayland");
close(devnull); | 1 | #define _POSIX_C_SOURCE 200809L
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdlib.h>
#include <stdnoreturn.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <wayland-server-core.h>
#include <wlr/util/log.h>
#include <wlr/xwayland.h>
#include "sockets.h"
#include "util/signal.h"
static void safe_close(int fd) {
if (fd >= 0) {
close(fd);
}
}
static int fill_arg(char ***argv, const char *fmt, ...) {
int len;
char **cur_arg = *argv;
va_list args;
va_start(args, fmt);
len = vsnprintf(NULL, 0, fmt, args) + 1;
va_end(args);
while (*cur_arg) {
cur_arg++;
}
*cur_arg = malloc(len);
if (!*cur_arg) {
return -1;
}
*argv = cur_arg;
va_start(args, fmt);
len = vsnprintf(*cur_arg, len, fmt, args);
va_end(args);
return len;
}
noreturn static void exec_xwayland(struct wlr_xwayland_server *server) {
if (!set_cloexec(server->x_fd[0], false) ||
!set_cloexec(server->x_fd[1], false) ||
!set_cloexec(server->wl_fd[1], false)) {
wlr_log(WLR_ERROR, "Failed to unset CLOEXEC on FD");
_exit(EXIT_FAILURE);
}
if (server->enable_wm && !set_cloexec(server->wm_fd[1], false)) {
wlr_log(WLR_ERROR, "Failed to unset CLOEXEC on FD");
_exit(EXIT_FAILURE);
}
/* Make Xwayland signal us when it's ready */
signal(SIGUSR1, SIG_IGN);
char *argv[] = {
"Xwayland", NULL /* display, e.g. :1 */,
"-rootless", "-terminate",
"-listen", NULL /* x_fd[0] */,
"-listen", NULL /* x_fd[1] */,
"-wm", NULL /* wm_fd[1] */,
NULL,
};
char **cur_arg = argv;
if (fill_arg(&cur_arg, ":%d", server->display) < 0 ||
fill_arg(&cur_arg, "%d", server->x_fd[0]) < 0 ||
fill_arg(&cur_arg, "%d", server->x_fd[1]) < 0) {
wlr_log_errno(WLR_ERROR, "alloc/print failure");
_exit(EXIT_FAILURE);
}
if (server->enable_wm) {
if (fill_arg(&cur_arg, "%d", server->wm_fd[1]) < 0) {
wlr_log_errno(WLR_ERROR, "alloc/print failure");
_exit(EXIT_FAILURE);
}
} else {
cur_arg++;
*cur_arg = NULL;
}
char wayland_socket_str[16];
snprintf(wayland_socket_str, sizeof(wayland_socket_str), "%d", server->wl_fd[1]);
setenv("WAYLAND_SOCKET", wayland_socket_str, true);
wlr_log(WLR_INFO, "WAYLAND_SOCKET=%d Xwayland :%d -rootless -terminate -listen %d -listen %d -wm %d",
server->wl_fd[1], server->display, server->x_fd[0],
server->x_fd[1], server->wm_fd[1]);
// Closes stdout/stderr depending on log verbosity
enum wlr_log_importance verbosity = wlr_log_get_verbosity();
int devnull = open("/dev/null", O_WRONLY | O_CREAT | O_CLOEXEC, 0666);
if (devnull < 0) {
wlr_log_errno(WLR_ERROR, "XWayland: failed to open /dev/null");
_exit(EXIT_FAILURE);
}
if (verbosity < WLR_INFO) {
dup2(devnull, STDOUT_FILENO);
}
if (verbosity < WLR_ERROR) {
dup2(devnull, STDERR_FILENO);
}
// This returns if and only if the call fails
execvp("Xwayland", argv);
wlr_log_errno(WLR_ERROR, "failed to exec Xwayland");
close(devnull);
_exit(EXIT_FAILURE);
}
static void server_finish_process(struct wlr_xwayland_server *server) {
if (!server || server->display == -1) {
return;
}
if (server->x_fd_read_event[0]) {
wl_event_source_remove(server->x_fd_read_event[0]);
wl_event_source_remove(server->x_fd_read_event[1]);
server->x_fd_read_event[0] = server->x_fd_read_event[1] = NULL;
}
if (server->client) {
wl_list_remove(&server->client_destroy.link);
wl_client_destroy(server->client);
}
if (server->sigusr1_source) {
wl_event_source_remove(server->sigusr1_source);
}
safe_close(server->wl_fd[0]);
safe_close(server->wl_fd[1]);
safe_close(server->wm_fd[0]);
safe_close(server->wm_fd[1]);
memset(server, 0, offsetof(struct wlr_xwayland_server, display));
server->wl_fd[0] = server->wl_fd[1] = -1;
server->wm_fd[0] = server->wm_fd[1] = -1;
/* We do not kill the Xwayland process, it dies to broken pipe
* after we close our side of the wm/wl fds. This is more reliable
* than trying to kill something that might no longer be Xwayland.
*/
}
static void server_finish_display(struct wlr_xwayland_server *server) {
if (!server) {
return;
}
wl_list_remove(&server->display_destroy.link);
if (server->display == -1) {
return;
}
safe_close(server->x_fd[0]);
safe_close(server->x_fd[1]);
server->x_fd[0] = server->x_fd[1] = -1;
unlink_display_sockets(server->display);
server->display = -1;
server->display_name[0] = '\0';
}
static bool server_start(struct wlr_xwayland_server *server);
static bool server_start_lazy(struct wlr_xwayland_server *server);
static void handle_client_destroy(struct wl_listener *listener, void *data) {
struct wlr_xwayland_server *server =
wl_container_of(listener, server, client_destroy);
if (server->sigusr1_source) {
// Xwayland failed to start, let the sigusr1 handler deal with it
return;
}
// Don't call client destroy: it's being destroyed already
server->client = NULL;
wl_list_remove(&server->client_destroy.link);
server_finish_process(server);
if (time(NULL) - server->server_start > 5) {
if (server->lazy) {
wlr_log(WLR_INFO, "Restarting Xwayland (lazy)");
server_start_lazy(server);
} else {
wlr_log(WLR_INFO, "Restarting Xwayland");
server_start(server);
}
}
}
static void handle_display_destroy(struct wl_listener *listener, void *data) {
struct wlr_xwayland_server *server =
wl_container_of(listener, server, display_destroy);
// Don't call client destroy: the display is being destroyed, it's too late
if (server->client) {
server->client = NULL;
wl_list_remove(&server->client_destroy.link);
}
wlr_xwayland_server_destroy(server);
}
static int xserver_handle_ready(int signal_number, void *data) {
struct wlr_xwayland_server *server = data;
int stat_val = -1;
while (waitpid(server->pid, &stat_val, 0) < 0) {
if (errno == EINTR) {
continue;
}
wlr_log_errno(WLR_ERROR, "waitpid for Xwayland fork failed");
goto error;
}
if (stat_val) {
wlr_log(WLR_ERROR, "Xwayland startup failed, not setting up xwm");
goto error;
}
wlr_log(WLR_DEBUG, "Xserver is ready");
wl_event_source_remove(server->sigusr1_source);
server->sigusr1_source = NULL;
struct wlr_xwayland_server_ready_event event = {
.server = server,
.wm_fd = server->wm_fd[0],
};
wlr_signal_emit_safe(&server->events.ready, &event);
return 1; /* wayland event loop dispatcher's count */
error:
/* clean up */
server_finish_process(server);
server_finish_display(server);
return 1;
}
static bool server_start_display(struct wlr_xwayland_server *server,
struct wl_display *wl_display) {
server->display_destroy.notify = handle_display_destroy;
wl_display_add_destroy_listener(wl_display, &server->display_destroy);
server->display = open_display_sockets(server->x_fd);
if (server->display < 0) {
server_finish_display(server);
return false;
}
snprintf(server->display_name, sizeof(server->display_name),
":%d", server->display);
return true;
}
static bool server_start(struct wlr_xwayland_server *server) {
if (socketpair(AF_UNIX, SOCK_STREAM, 0, server->wl_fd) != 0) {
wlr_log_errno(WLR_ERROR, "socketpair failed");
server_finish_process(server);
return false;
}
if (!set_cloexec(server->wl_fd[0], true) ||
!set_cloexec(server->wl_fd[1], true)) {
wlr_log(WLR_ERROR, "Failed to set O_CLOEXEC on socket");
server_finish_process(server);
return false;
}
if (server->enable_wm) {
if (socketpair(AF_UNIX, SOCK_STREAM, 0, server->wm_fd) != 0) {
wlr_log_errno(WLR_ERROR, "socketpair failed");
server_finish_process(server);
return false;
}
if (!set_cloexec(server->wm_fd[0], true) ||
!set_cloexec(server->wm_fd[1], true)) {
wlr_log(WLR_ERROR, "Failed to set O_CLOEXEC on socket");
server_finish_process(server);
return false;
}
}
server->server_start = time(NULL);
server->client = wl_client_create(server->wl_display, server->wl_fd[0]);
if (!server->client) {
wlr_log_errno(WLR_ERROR, "wl_client_create failed");
server_finish_process(server);
return false;
}
server->wl_fd[0] = -1; /* not ours anymore */
server->client_destroy.notify = handle_client_destroy;
wl_client_add_destroy_listener(server->client, &server->client_destroy);
struct wl_event_loop *loop = wl_display_get_event_loop(server->wl_display);
server->sigusr1_source = wl_event_loop_add_signal(loop, SIGUSR1,
xserver_handle_ready, server);
server->pid = fork();
if (server->pid < 0) {
wlr_log_errno(WLR_ERROR, "fork failed");
server_finish_process(server);
return false;
} else if (server->pid == 0) {
/* Double-fork, but we need to forward SIGUSR1 once Xserver(1)
* is ready, or error if there was one. */
pid_t ppid = getppid();
sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, SIGUSR1);
sigaddset(&sigset, SIGCHLD);
sigprocmask(SIG_BLOCK, &sigset, NULL);
pid_t pid = fork();
if (pid < 0) {
wlr_log_errno(WLR_ERROR, "second fork failed");
_exit(EXIT_FAILURE);
} else if (pid == 0) {
exec_xwayland(server);
}
int sig;
sigwait(&sigset, &sig);
kill(ppid, SIGUSR1);
wlr_log(WLR_DEBUG, "sent SIGUSR1 to process %d", ppid);
if (sig == SIGCHLD) {
waitpid(pid, NULL, 0);
_exit(EXIT_FAILURE);
}
_exit(EXIT_SUCCESS);
}
/* close child fds */
/* remain managing x sockets for lazy start */
close(server->wl_fd[1]);
safe_close(server->wm_fd[1]);
server->wl_fd[1] = server->wm_fd[1] = -1;
return true;
}
static int xwayland_socket_connected(int fd, uint32_t mask, void *data) {
struct wlr_xwayland_server *server = data;
wl_event_source_remove(server->x_fd_read_event[0]);
wl_event_source_remove(server->x_fd_read_event[1]);
server->x_fd_read_event[0] = server->x_fd_read_event[1] = NULL;
server_start(server);
return 0;
}
static bool server_start_lazy(struct wlr_xwayland_server *server) {
struct wl_event_loop *loop = wl_display_get_event_loop(server->wl_display);
if (!(server->x_fd_read_event[0] = wl_event_loop_add_fd(loop, server->x_fd[0],
WL_EVENT_READABLE, xwayland_socket_connected, server))) {
return false;
}
if (!(server->x_fd_read_event[1] = wl_event_loop_add_fd(loop, server->x_fd[1],
WL_EVENT_READABLE, xwayland_socket_connected, server))) {
wl_event_source_remove(server->x_fd_read_event[0]);
server->x_fd_read_event[0] = NULL;
return false;
}
return true;
}
void wlr_xwayland_server_destroy(struct wlr_xwayland_server *server) {
if (!server) {
return;
}
server_finish_process(server);
server_finish_display(server);
wlr_signal_emit_safe(&server->events.destroy, NULL);
free(server);
}
struct wlr_xwayland_server *wlr_xwayland_server_create(
struct wl_display *wl_display,
struct wlr_xwayland_server_options *options) {
struct wlr_xwayland_server *server =
calloc(1, sizeof(struct wlr_xwayland_server));
if (!server) {
return NULL;
}
server->wl_display = wl_display;
server->lazy = options->lazy;
server->enable_wm = options->enable_wm;
server->x_fd[0] = server->x_fd[1] = -1;
server->wl_fd[0] = server->wl_fd[1] = -1;
server->wm_fd[0] = server->wm_fd[1] = -1;
wl_signal_init(&server->events.ready);
wl_signal_init(&server->events.destroy);
if (!server_start_display(server, wl_display)) {
goto error_alloc;
}
if (server->lazy) {
if (!server_start_lazy(server)) {
goto error_display;
}
} else {
if (!server_start(server)) {
goto error_display;
}
}
return server;
error_display:
server_finish_display(server);
error_alloc:
free(server);
return NULL;
}
| 1 | 16,078 | Can we print a debug message when this is used? Since it's a debugging variable, I wouldn't want users to have issues because of a left-over env. | swaywm-wlroots | c |
@@ -0,0 +1,13 @@
+// This object is imported into the documentation site. An example for the documentation site should be part of the pull request for the component. The object key is the kabob case of the "URL folder". In the case of `http://localhost:8080/components/app-launcher/`, `app-launcher` is the `key`. The folder name is created by `components.component` value in `package.json`. Keep in mind, some components like `forms/checkbox` will be changed to `forms-checkbox`. The following uses webpack's raw-loader plugin to get "text files" that will be eval()'d by CodeMirror within the documentation site on page load.
+
+/* eslint-env node */
+/* eslint-disable global-require */
+
+const siteStories = [
+ require('raw-loader!design-system-react/components/pill/__examples__/base.jsx'),
+ require('raw-loader!design-system-react/components/pill/__examples__/icon.jsx'),
+ require('raw-loader!design-system-react/components/pill/__examples__/container.jsx'),
+ require('raw-loader!design-system-react/components/pill/__examples__/listbox.jsx')
+];
+
+module.exports = siteStories; | 1 | 1 | 11,074 | Should we have an avatar, truncate, etc examples, too? | salesforce-design-system-react | js |
|
@@ -31,7 +31,6 @@ class _MissingPandasLikeIndex(object):
# Properties
nbytes = unsupported_property('nbytes')
- shape = unsupported_property('shape')
# Deprecated properties
strides = unsupported_property('strides', deprecated=True) | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks.koalas.missing import _unsupported_function, _unsupported_property, common
def unsupported_function(method_name, deprecated=False, reason=""):
return _unsupported_function(class_name='pd.Index', method_name=method_name,
deprecated=deprecated, reason=reason)
def unsupported_property(property_name, deprecated=False, reason=""):
return _unsupported_property(class_name='pd.Index', property_name=property_name,
deprecated=deprecated, reason=reason)
class _MissingPandasLikeIndex(object):
# Properties
nbytes = unsupported_property('nbytes')
shape = unsupported_property('shape')
# Deprecated properties
strides = unsupported_property('strides', deprecated=True)
data = unsupported_property('data', deprecated=True)
itemsize = unsupported_property('itemsize', deprecated=True)
base = unsupported_property('base', deprecated=True)
flags = unsupported_property('flags', deprecated=True)
# Functions
append = unsupported_function('append')
argmax = unsupported_function('argmax')
argmin = unsupported_function('argmin')
argsort = unsupported_function('argsort')
asof = unsupported_function('asof')
asof_locs = unsupported_function('asof_locs')
delete = unsupported_function('delete')
difference = unsupported_function('difference')
drop = unsupported_function('drop')
drop_duplicates = unsupported_function('drop_duplicates')
droplevel = unsupported_function('droplevel')
duplicated = unsupported_function('duplicated')
equals = unsupported_function('equals')
factorize = unsupported_function('factorize')
fillna = unsupported_function('fillna')
format = unsupported_function('format')
get_indexer = unsupported_function('get_indexer')
get_indexer_for = unsupported_function('get_indexer_for')
get_indexer_non_unique = unsupported_function('get_indexer_non_unique')
get_level_values = unsupported_function('get_level_values')
get_loc = unsupported_function('get_loc')
get_slice_bound = unsupported_function('get_slice_bound')
get_value = unsupported_function('get_value')
groupby = unsupported_function('groupby')
holds_integer = unsupported_function('holds_integer')
identical = unsupported_function('identical')
insert = unsupported_function('insert')
intersection = unsupported_function('intersection')
is_ = unsupported_function('is_')
is_lexsorted_for_tuple = unsupported_function('is_lexsorted_for_tuple')
is_mixed = unsupported_function('is_mixed')
is_type_compatible = unsupported_function('is_type_compatible')
join = unsupported_function('join')
map = unsupported_function('map')
max = unsupported_function('max')
min = unsupported_function('min')
nunique = unsupported_function('nunique')
putmask = unsupported_function('putmask')
ravel = unsupported_function('ravel')
reindex = unsupported_function('reindex')
repeat = unsupported_function('repeat')
searchsorted = unsupported_function('searchsorted')
set_names = unsupported_function('set_names')
set_value = unsupported_function('set_value')
slice_indexer = unsupported_function('slice_indexer')
slice_locs = unsupported_function('slice_locs')
sort = unsupported_function('sort')
sort_values = unsupported_function('sort_values')
sortlevel = unsupported_function('sortlevel')
take = unsupported_function('take')
to_flat_index = unsupported_function('to_flat_index')
to_frame = unsupported_function('to_frame')
to_native_types = unsupported_function('to_native_types')
union = unsupported_function('union')
value_counts = unsupported_function('value_counts')
view = unsupported_function('view')
where = unsupported_function('where')
# Deprecated functions
get_duplicates = unsupported_function('get_duplicates', deprecated=True)
summary = unsupported_function('summary', deprecated=True)
get_values = unsupported_function('get_values', deprecated=True)
item = unsupported_function('item', deprecated=True)
contains = unsupported_function('contains', deprecated=True)
# Properties we won't support.
values = common.values(unsupported_property)
array = common.array(unsupported_property)
# Functions we won't support.
memory_usage = common.memory_usage(unsupported_function)
to_list = common.to_list(unsupported_function)
tolist = common.tolist(unsupported_function)
__iter__ = common.__iter__(unsupported_function)
class _MissingPandasLikeMultiIndex(object):
# Properties
is_all_dates = unsupported_property('is_all_dates')
levshape = unsupported_property('levshape')
shape = unsupported_property('shape')
# Deprecated properties
strides = unsupported_property('strides', deprecated=True)
data = unsupported_property('data', deprecated=True)
base = unsupported_property('base', deprecated=True)
itemsize = unsupported_property('itemsize', deprecated=True)
labels = unsupported_property('labels', deprecated=True)
flags = unsupported_property('flags', deprecated=True)
# Functions
append = unsupported_function('append')
argmax = unsupported_function('argmax')
argmin = unsupported_function('argmin')
argsort = unsupported_function('argsort')
asof = unsupported_function('asof')
asof_locs = unsupported_function('asof_locs')
delete = unsupported_function('delete')
difference = unsupported_function('difference')
drop = unsupported_function('drop')
drop_duplicates = unsupported_function('drop_duplicates')
droplevel = unsupported_function('droplevel')
duplicated = unsupported_function('duplicated')
equal_levels = unsupported_function('equal_levels')
equals = unsupported_function('equals')
factorize = unsupported_function('factorize')
fillna = unsupported_function('fillna')
format = unsupported_function('format')
get_indexer = unsupported_function('get_indexer')
get_indexer_for = unsupported_function('get_indexer_for')
get_indexer_non_unique = unsupported_function('get_indexer_non_unique')
get_level_values = unsupported_function('get_level_values')
get_loc = unsupported_function('get_loc')
get_loc_level = unsupported_function('get_loc_level')
get_locs = unsupported_function('get_locs')
get_slice_bound = unsupported_function('get_slice_bound')
get_value = unsupported_function('get_value')
groupby = unsupported_function('groupby')
holds_integer = unsupported_function('holds_integer')
identical = unsupported_function('identical')
insert = unsupported_function('insert')
intersection = unsupported_function('intersection')
is_ = unsupported_function('is_')
is_lexsorted = unsupported_function('is_lexsorted')
is_lexsorted_for_tuple = unsupported_function('is_lexsorted_for_tuple')
is_mixed = unsupported_function('is_mixed')
is_type_compatible = unsupported_function('is_type_compatible')
join = unsupported_function('join')
map = unsupported_function('map')
max = unsupported_function('max')
min = unsupported_function('min')
nunique = unsupported_function('nunique')
putmask = unsupported_function('putmask')
ravel = unsupported_function('ravel')
reindex = unsupported_function('reindex')
remove_unused_levels = unsupported_function('remove_unused_levels')
reorder_levels = unsupported_function('reorder_levels')
repeat = unsupported_function('repeat')
searchsorted = unsupported_function('searchsorted')
set_codes = unsupported_function('set_codes')
set_labels = unsupported_function('set_labels')
set_levels = unsupported_function('set_levels')
set_names = unsupported_function('set_names')
set_value = unsupported_function('set_value')
slice_indexer = unsupported_function('slice_indexer')
slice_locs = unsupported_function('slice_locs')
sort = unsupported_function('sort')
sort_values = unsupported_function('sort_values')
sortlevel = unsupported_function('sortlevel')
swaplevel = unsupported_function('swaplevel')
take = unsupported_function('take')
to_flat_index = unsupported_function('to_flat_index')
to_frame = unsupported_function('to_frame')
to_native_types = unsupported_function('to_native_types')
truncate = unsupported_function('truncate')
union = unsupported_function('union')
value_counts = unsupported_function('value_counts')
view = unsupported_function('view')
where = unsupported_function('where')
# Deprecated functions
get_duplicates = unsupported_function('get_duplicates', deprecated=True)
summary = unsupported_function('summary', deprecated=True)
to_hierarchical = unsupported_function('to_hierarchical', deprecated=True)
get_values = unsupported_function('get_values', deprecated=True)
contains = unsupported_function('contains', deprecated=True)
item = unsupported_function('item', deprecated=True)
# Functions we won't support.
values = common.values(unsupported_property)
array = common.array(unsupported_property)
codes = unsupported_property(
'codes',
reason="'codes' requires to collect all data into the driver which is against the "
"design principle of Koalas. Alternatively, you could call 'to_pandas()' and"
" use 'codes' property in pandas.")
levels = unsupported_property(
'levels',
reason="'levels' requires to collect all data into the driver which is against the "
"design principle of Koalas. Alternatively, you could call 'to_pandas()' and"
" use 'levels' property in pandas.")
__iter__ = common.__iter__(unsupported_function)
# Properties we won't support.
memory_usage = common.memory_usage(unsupported_function)
to_list = common.to_list(unsupported_function)
tolist = common.tolist(unsupported_function)
| 1 | 13,254 | could you add this to `docs/source/reference/indexing.rst` ? | databricks-koalas | py |
@@ -3,10 +3,10 @@
# Copyright (C) 2003 Rational Discovery LLC
# All Rights Reserved
#
-import sys
+
+import six
from rdkit.VLib.Node import VLibNode
-from rdkit import six
class FilterNode(VLibNode): | 1 | # $Id$
#
# Copyright (C) 2003 Rational Discovery LLC
# All Rights Reserved
#
import sys
from rdkit.VLib.Node import VLibNode
from rdkit import six
class FilterNode(VLibNode):
""" base class for nodes which filter their input
Assumptions:
- filter function takes a number of arguments equal to the
number of inputs we have. It returns a bool
- inputs (parents) can be stepped through in lockstep
- we return a tuple if there's more than one input
Usage Example:
>>> from rdkit.VLib.Supply import SupplyNode
>>> def func(a,b):
... return a+b < 5
>>> filt = FilterNode(func=func)
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> suppl2 = SupplyNode(contents=[1,2,3,1])
>>> filt.AddParent(suppl1)
>>> filt.AddParent(suppl2)
>>> v = [x for x in filt]
>>> v
[(1, 1), (2, 2), (3, 1)]
>>> filt.reset()
>>> v = [x for x in filt]
>>> v
[(1, 1), (2, 2), (3, 1)]
>>> filt.Destroy()
Negation is also possible:
>>> filt = FilterNode(func=func,negate=1)
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> suppl2 = SupplyNode(contents=[1,2,3,1])
>>> filt.AddParent(suppl1)
>>> filt.AddParent(suppl2)
>>> v = [x for x in filt]
>>> v
[(3, 3)]
>>> filt.Destroy()
With no function, just return the inputs:
>>> filt = FilterNode()
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> filt.AddParent(suppl1)
>>> v = [x for x in filt]
>>> v
[1, 2, 3, 3]
>>> filt.Destroy()
"""
def __init__(self, func=None, negate=0, **kwargs):
VLibNode.__init__(self, **kwargs)
self._func = func
self._negate = negate
def SetNegate(self, state):
self._negate = state
def Negate(self):
return self._negate
def next(self):
done = 0
parents = self.GetParents()
while 1:
args = []
try:
for parent in parents:
args.append(next(parent))
except StopIteration:
raise StopIteration
args = tuple(args)
if self._func is not None:
r = self._func(*args)
if self._negate:
r = not r
#sys.stderr.write('\t\tNEGATE -> %d\n'%(r))
if r:
res = args
break
else:
res = args
break
if len(parents) == 1:
res = res[0]
return res
if six.PY3:
FilterNode.__next__ = FilterNode.next
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| 1 | 15,846 | Why the change from rdkit.six to six? | rdkit-rdkit | cpp |
@@ -78,6 +78,12 @@ def renderView(request):
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
+ targets = [target for target in requestOptions['targets'] if target.find(':') < 0]
+ if settings.REMOTE_PREFETCH_DATA and not requestOptions.get('localOnly'):
+ log.rendering("Prefetching remote data")
+ pathExpressions = extractPathExpressions(targets)
+ prefetchRemoteData(STORE.remote_stores, requestContext, pathExpressions)
+
for target in requestOptions['targets']:
if target.find(':') >= 0:
try: | 1 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
import pytz
import httplib
from datetime import datetime
from time import time
from random import shuffle
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from graphite.compat import HttpResponse
from graphite.user_util import getProfileByUsername
from graphite.util import json, unpickle
from graphite.remote_storage import extractForwardHeaders, prefetchRemoteData
from graphite.storage import STORE
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget, extractPathExpressions
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.cache import add_never_cache_headers, patch_response_headers
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'now': requestOptions['now'],
'localOnly' : requestOptions['localOnly'],
'template' : requestOptions['template'],
'tzinfo' : requestOptions['tzinfo'],
'forwardHeaders': extractForwardHeaders(request),
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
targets = requestOptions['targets']
if settings.REMOTE_PREFETCH_DATA and not requestOptions.get('localOnly'):
log.rendering("Prefetching remote data")
pathExpressions = extractPathExpressions(targets)
prefetchRemoteData(STORE.remote_stores, requestContext, pathExpressions)
for target in targets:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.add(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
jsonStart = time()
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
elif 'noNullPoints' in requestOptions and any(data):
for series in data:
values = []
for (index,v) in enumerate(series):
if v is not None:
timestamp = series.start + (index * series.step)
values.append((v,timestamp))
if len(values) > 0:
series_data.append(dict(target=series.name, datapoints=values))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
output = json.dumps(series_data).replace('None,', 'null,').replace('NaN,', 'null,').replace('Infinity,', '1e9999,')
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], output),
content_type='text/javascript')
else:
response = HttpResponse(
content=output,
content_type='application/json')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('JSON rendering time %6f' % (time() - jsonStart))
log.rendering('Total request processing time %6f' % (time() - start))
return response
if format == 'dygraph':
labels = ['Time']
result = '{}'
if data:
datapoints = [[ts] for ts in range(data[0].start, data[0].end, data[0].step)]
for series in data:
labels.append(series.name)
for i, point in enumerate(series):
if point is None:
point = 'null'
elif point == float('inf'):
point = 'Infinity'
elif point == float('-inf'):
point = '-Infinity'
elif math.isnan(point):
point = 'null'
datapoints[i].append(point)
line_template = '[%%s000%s]' % ''.join([', %s'] * len(data))
lines = [line_template % tuple(points) for points in datapoints]
result = '{"labels" : %s, "data" : [%s]}' % (json.dumps(labels), ', '.join(lines))
response = HttpResponse(content=result, content_type='application/json')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total dygraph rendering time %.6f' % (time() - start))
return response
if format == 'rickshaw':
series_data = []
for series in data:
timestamps = range(series.start, series.end, series.step)
datapoints = [{'x' : x, 'y' : y} for x, y in zip(timestamps, series)]
series_data.append( dict(target=series.name, datapoints=datapoints) )
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
mimetype='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data),
content_type='application/json')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rickshaw rendering time %.6f' % (time() - start))
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(repr,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
elif format == 'pdf':
graphOptions['outputFormat'] = 'pdf'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions, requestContext['forwardHeaders'])
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
elif graphOptions.get('outputFormat') == 'pdf':
response = buildResponse(image, 'application/x-pdf')
else:
response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
cacheTimeout = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
template = dict()
for key, val in queryParams.items():
if key.startswith("template["):
template[key[9:-1]] = val
requestOptions['template'] = template
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
if 'noNullPoints' in queryParams:
requestOptions['noNullPoints'] = True
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and 'color' not in opt.lower():
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'now' in queryParams:
now = parseATTime(queryParams['now'])
else:
now = datetime.now(tzinfo)
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo, now)
else:
untilTime = now
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo, now)
else:
fromTime = parseATTime('-1d', tzinfo, now)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
timeRange = endTime - startTime
queryTime = timeRange.days * 86400 + timeRange.seconds # convert the time delta to seconds
if settings.DEFAULT_CACHE_POLICY and not queryParams.get('cacheTimeout'):
timeouts = [timeout for period,timeout in settings.DEFAULT_CACHE_POLICY if period <= queryTime]
cacheTimeout = max(timeouts or (0,))
requestOptions['now'] = now
if cacheTimeout == 0:
requestOptions['noCache'] = True
requestOptions['cacheTimeout'] = cacheTimeout
return (graphOptions, requestOptions)
connectionPools = {}
def connector_class_selector(https_support=False):
return httplib.HTTPSConnection if https_support else httplib.HTTPConnection
def delegateRendering(graphType, graphOptions, headers=None):
if headers is None:
headers = {}
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = connector_class(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData, headers)
except httplib.CannotSendRequest:
connection = connector_class(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData, headers)
# Read the response
try: # Python 2.7+, use buffering of HTTP responses
response = connection.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
response = buildResponse(image)
add_never_cache_headers(response)
return response
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = request.GET.copy()
request_params.update(request.POST)
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
return HttpResponse(imageData, content_type=content_type)
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
| 1 | 11,687 | Looking good, let's just move this inside the `if` to avoid doing the extra work if we don't need it. | graphite-project-graphite-web | py |
@@ -1892,7 +1892,7 @@ describe('Bulk', function () {
expect(result).to.exist;
bulk.execute(err => {
- expect(err).to.match(/Batch cannot be re-executed/);
+ expect(err).to.match(/This batch has already been executed, create new batch to execute/);
done();
});
}); | 1 | 'use strict';
const {
withClient,
withClientV2,
withMonitoredClient,
setupDatabase,
ignoreNsNotFound
} = require('./shared');
const test = require('./shared').assert;
const { MongoDriverError } = require('../../src/error');
const { Long } = require('../../src');
const crypto = require('crypto');
const chai = require('chai');
const expect = chai.expect;
chai.use(require('chai-subset'));
const MAX_BSON_SIZE = 16777216;
describe('Bulk', function () {
before(function () {
return setupDatabase(this.configuration);
});
it('should correctly handle ordered single batch api write command error', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_10');
// Add unique index on b field causing all updates to fail
col.createIndex({ a: 1 }, { unique: true, sparse: false }, function (err) {
expect(err).to.not.exist;
var batch = col.initializeOrderedBulkOp();
batch.insert({ b: 1, a: 1 });
batch
.find({ b: 2 })
.upsert()
.updateOne({ $set: { a: 1 } });
batch.insert({ b: 3, a: 2 });
batch.execute(function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
result = err.result;
// Basic properties check
test.equal(1, result.nInserted);
test.equal(true, result.hasWriteErrors());
test.equal(1, result.getWriteErrorCount());
// Get the write error
var error = result.getWriteErrorAt(0);
test.equal(11000, error.code);
test.ok(error.errmsg != null);
// Get the operation that caused the error
var op = error.getOperation();
test.equal(2, op.q.b);
test.equal(1, op.u['$set'].a);
expect(op.multi).to.not.be.true;
test.equal(true, op.upsert);
// Get the first error
error = result.getWriteErrorAt(1);
expect(error).to.not.exist;
// Finish up test
client.close(done);
});
});
});
}
});
it('should use arrayFilters for updateMany', {
metadata: { requires: { mongodb: '>=3.6.x' } },
test: function (done) {
const configuration = this.configuration;
const client = configuration.newClient({ w: 1 });
client.connect((err, client) => {
const db = client.db(configuration.db);
const collection = db.collection('arrayfilterstest');
const docs = [{ a: [{ x: 1 }, { x: 2 }] }, { a: [{ x: 3 }, { x: 4 }] }];
const close = e => client.close(() => done(e));
collection.insertMany(docs).then(() =>
collection.updateMany(
{},
{ $set: { 'a.$[i].x': 5 } },
{ arrayFilters: [{ 'i.x': 5 }] },
(err, data) => {
expect(err).to.not.exist;
expect(data.matchedCount).to.equal(2);
close(err);
}
)
);
});
}
});
it('should ignore undefined values in unordered bulk operation if `ignoreUndefined` specified', {
metadata: {
requires: { topology: ['single'] }
},
test: function () {
const client = this.configuration.newClient(this.configuration.writeConcernMax(), {
maxPoolSize: 1
});
return client
.connect()
.then(client => {
const db = client.db(this.configuration.db);
const col = db.collection('batch_write_unordered_ops_1');
return col
.initializeUnorderedBulkOp({ ignoreUndefined: true })
.insert({ a: 1, b: undefined })
.execute()
.then(() => col.find({}).toArray())
.then(docs => {
expect(docs[0]['a']).to.equal(1);
expect(docs[0]['b']).to.not.exist;
});
})
.then(() => client.close());
}
});
it('should ignore undefined values in ordered bulk operation if `ignoreUndefined` specified', {
metadata: {
requires: { topology: ['single'] }
},
test: function () {
var client = this.configuration.newClient(this.configuration.writeConcernMax(), {
maxPoolSize: 1
});
return client.connect().then(client => {
var db = client.db(this.configuration.db);
var col = db.collection('batch_write_ordered_ops_3');
return col
.initializeOrderedBulkOp({ ignoreUndefined: true })
.insert({ a: 1, b: undefined })
.execute()
.then(() => col.find({}).toArray())
.then(docs => {
expect(docs[0]['a']).to.equal(1);
expect(docs[0]['b']).to.not.exist;
})
.then(() => client.close());
});
}
});
it('should inherit promote long false from db during unordered bulk operation', function () {
const client = this.configuration.newClient(this.configuration.writeConcernMax(), {
promoteLongs: true
});
return withClient.call(this, client, (client, done) => {
const db = client.db('shouldInheritPromoteLongFalseFromDb1', { promoteLongs: false });
const coll = db.collection('test');
const batch = coll.initializeUnorderedBulkOp();
batch.insert({ a: Long.fromNumber(10) });
batch.execute((err, result) => {
expect(err).to.not.exist;
expect(result).to.exist;
coll.findOne((err, item) => {
expect(err).to.not.exist;
expect(item.a).to.not.be.a('number');
expect(item.a).to.have.property('_bsontype');
expect(item.a._bsontype).to.be.equal('Long');
done();
});
});
});
});
it(
'should inherit promote long false from collection during unordered bulk operation',
withClient(function (client, done) {
const db = client.db('shouldInheritPromoteLongFalseFromColl1', { promoteLongs: true });
const coll = db.collection('test', { promoteLongs: false });
const batch = coll.initializeUnorderedBulkOp();
batch.insert({ a: Long.fromNumber(10) });
batch.execute((err, result) => {
expect(err).to.not.exist;
expect(result).to.exist;
coll.findOne((err, item) => {
expect(err).to.not.exist;
expect(item.a).to.not.be.a('number');
expect(item.a).to.have.property('_bsontype');
expect(item.a._bsontype).to.be.equal('Long');
done();
});
});
})
);
it('should inherit promote long false from db during ordered bulk operation', function () {
const client = this.configuration.newClient(this.configuration.writeConcernMax(), {
promoteLongs: true
});
return withClient.call(this, client, (client, done) => {
const db = client.db('shouldInheritPromoteLongFalseFromDb2', { promoteLongs: false });
const coll = db.collection('test');
const batch = coll.initializeOrderedBulkOp();
batch.insert({ a: Long.fromNumber(10) });
batch.execute((err, result) => {
expect(err).to.not.exist;
expect(result).to.exist;
coll.findOne((err, item) => {
expect(err).to.not.exist;
expect(item.a).to.not.be.a('number');
expect(item.a).to.have.property('_bsontype');
expect(item.a._bsontype).to.be.equal('Long');
done();
});
});
});
});
it(
'should inherit promote long false from collection during ordered bulk operation',
withClient(function (client, done) {
const db = client.db('shouldInheritPromoteLongFalseFromColl2', { promoteLongs: true });
const coll = db.collection('test', { promoteLongs: false });
const batch = coll.initializeOrderedBulkOp();
batch.insert({ a: Long.fromNumber(10) });
batch.execute((err, result) => {
expect(err).to.not.exist;
expect(result).to.exist;
coll.findOne((err, item) => {
expect(err).to.not.exist;
expect(item.a).to.not.be.a('number');
expect(item.a).to.have.property('_bsontype');
expect(item.a._bsontype).to.be.equal('Long');
done();
});
});
})
);
it('should correctly handle ordered multiple batch api write command errors', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_2');
// Add unique index on field `a` causing all updates to fail
col.createIndex({ a: 1 }, { unique: true, sparse: false }, function (err) {
expect(err).to.not.exist;
var batch = col.initializeOrderedBulkOp();
batch.insert({ b: 1, a: 1 });
batch
.find({ b: 2 })
.upsert()
.updateOne({ $set: { a: 1 } });
batch
.find({ b: 3 })
.upsert()
.updateOne({ $set: { a: 2 } });
batch
.find({ b: 2 })
.upsert()
.updateOne({ $set: { a: 1 } });
batch.insert({ b: 4, a: 3 });
batch.insert({ b: 5, a: 1 });
batch.execute(function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
// Basic properties check
result = err.result;
test.equal(err instanceof Error, true);
test.equal(1, result.nInserted);
test.equal(true, result.hasWriteErrors());
test.ok(1, result.getWriteErrorCount());
// Individual error checking
var error = result.getWriteErrorAt(0);
test.equal(1, error.index);
test.equal(11000, error.code);
test.ok(error.errmsg != null);
test.equal(2, error.getOperation().q.b);
test.equal(1, error.getOperation().u['$set'].a);
expect(error.getOperation().multi).to.not.be.true;
test.equal(true, error.getOperation().upsert);
// Finish up test
client.close(done);
});
});
});
}
});
it('should fail due to ordered document being to big', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var coll = db.collection('batch_write_ordered_ops_3');
// Set up a giant string to blow through the max message size
var hugeString = '';
// Create it bigger than 16MB
for (var i = 0; i < 1024 * 1100; i++) {
hugeString = hugeString + '1234567890123456';
}
// Set up the batch
var batch = coll.initializeOrderedBulkOp();
batch.insert({ b: 1, a: 1 });
// should fail on insert due to string being to big
try {
batch.insert({ string: hugeString });
test.ok(false);
} catch (err) {} // eslint-disable-line
// Finish up test
client.close(done);
});
}
});
it('should correctly split up ordered messages into more batches', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var coll = db.collection('batch_write_ordered_ops_4');
// Set up a giant string to blow through the max message size
var hugeString = '';
// Create it bigger than 16MB
for (var i = 0; i < 1024 * 256; i++) {
hugeString = hugeString + '1234567890123456';
}
// Insert the string a couple of times, should force split into multiple batches
var batch = coll.initializeOrderedBulkOp();
batch.insert({ a: 1, b: hugeString });
batch.insert({ a: 2, b: hugeString });
batch.insert({ a: 3, b: hugeString });
batch.insert({ a: 4, b: hugeString });
batch.insert({ a: 5, b: hugeString });
batch.insert({ a: 6, b: hugeString });
// Execute the operations
batch.execute(function (err, result) {
// Basic properties check
test.equal(6, result.nInserted);
test.equal(false, result.hasWriteErrors());
// Finish up test
client.close(done);
});
});
}
});
it(
'should Correctly Execute Ordered Batch of Write Operations with duplicate key errors on updates',
{
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_6');
// Add unique index on b field causing all updates to fail
col.createIndex({ b: 1 }, { unique: true, sparse: false }, function (err) {
expect(err).to.not.exist;
var batch = col.initializeOrderedBulkOp();
// Add some operations to be executed in order
batch.insert({ a: 1 });
batch.find({ a: 1 }).update({ $set: { b: 1 } });
batch.insert({ b: 1 });
// Execute the operations
batch.execute(function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
// Test basic settings
result = err.result;
test.equal(1, result.nInserted);
test.equal(1, result.nMatched);
test.ok(1 === result.nModified || result.nModified == null);
test.equal(true, result.hasWriteErrors());
test.ok(1, result.getWriteErrorCount());
// Individual error checking
var error = result.getWriteErrorAt(0);
test.equal(2, error.index);
test.equal(11000, error.code);
test.ok(error.errmsg != null);
test.equal(1, error.getOperation().b);
client.close(done);
});
});
});
}
}
);
it(
'should Correctly Execute Ordered Batch of Write Operations with upserts causing duplicate key errors on updates',
{
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_7');
// Add unique index on b field causing all updates to fail
col.createIndex({ b: 1 }, { unique: true, sparse: false }, function (err) {
expect(err).to.not.exist;
var batch = col.initializeOrderedBulkOp();
batch.insert({ a: 1 });
batch.find({ a: 1 }).update({ $set: { b: 1 } });
batch
.find({ a: 2 })
.upsert()
.update({ $set: { b: 2 } });
batch
.find({ a: 3 })
.upsert()
.update({ $set: { b: 3 } });
batch.insert({ b: 1 });
// Execute the operations
batch.execute(function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
// Test basic settings
result = err.result;
test.equal(1, result.nInserted);
test.equal(2, result.nUpserted);
test.equal(1, result.nMatched);
test.ok(1 === result.nModified || result.nModified == null);
test.equal(true, result.hasWriteErrors());
test.ok(1, result.getWriteErrorCount());
// Individual error checking
var error = result.getWriteErrorAt(0);
test.equal(4, error.index);
test.equal(11000, error.code);
test.ok(error.errmsg != null);
test.equal(1, error.getOperation().b);
// Check for upserted values
var ids = result.getUpsertedIds();
test.equal(2, ids.length);
test.equal(2, ids[0].index);
test.ok(ids[0]._id != null);
test.equal(3, ids[1].index);
test.ok(ids[1]._id != null);
client.close(done);
});
});
});
}
}
);
it('should correctly perform ordered upsert with custom _id', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_8');
var batch = col.initializeOrderedBulkOp();
// Add some operations to be executed in order
batch
.find({ _id: 2 })
.upsert()
.updateOne({ $set: { b: 2 } });
// Execute the operations
batch.execute(function (err, result) {
// Check state of result
test.equal(1, result.nUpserted);
test.equal(0, result.nInserted);
test.equal(0, result.nMatched);
test.ok(0 === result.nModified || result.nModified == null);
test.equal(0, result.nRemoved);
var upserts = result.getUpsertedIds();
test.equal(1, upserts.length);
test.equal(0, upserts[0].index);
test.equal(2, upserts[0]._id);
// Finish up test
client.close(done);
});
});
}
});
it('should return an error when no operations in ordered batch', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_8');
col.initializeOrderedBulkOp().execute(function (err) {
expect(err).to.be.instanceOf(MongoDriverError);
expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty');
client.close(done);
});
});
}
});
it('should correctly execute ordered batch using w:0', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_9');
var bulk = col.initializeOrderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({ a: 1 });
}
bulk.find({ b: 1 }).upsert().update({ b: 1 });
bulk.find({ c: 1 }).delete();
bulk.execute({ writeConcern: { w: 0 } }, function (err, result) {
expect(err).to.not.exist;
test.equal(0, result.nUpserted);
test.equal(0, result.nInserted);
test.equal(0, result.nMatched);
test.ok(0 === result.nModified || result.nModified == null);
test.equal(0, result.nRemoved);
test.equal(false, result.hasWriteErrors());
client.close(done);
});
});
}
});
it('should correctly handle single unordered batch API', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_unordered_ops_legacy_1');
// Add unique index on b field causing all updates to fail
col.createIndex({ a: 1 }, { unique: true, sparse: false }, function (err) {
expect(err).to.not.exist;
// Initialize the unordered Batch
var batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch.insert({ b: 1, a: 1 });
batch
.find({ b: 2 })
.upsert()
.updateOne({ $set: { a: 1 } });
batch.insert({ b: 3, a: 2 });
// Execute the operations
batch.execute(function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
// Basic properties check
result = err.result;
test.equal(err instanceof Error, true);
test.equal(2, result.nInserted);
test.equal(0, result.nUpserted);
test.equal(0, result.nMatched);
test.ok(0 === result.nModified || result.nModified == null);
test.equal(true, result.hasWriteErrors());
test.equal(1, result.getWriteErrorCount());
// Get the first error
var error = result.getWriteErrorAt(0);
test.equal(11000, error.code);
test.ok(error.errmsg != null);
// Get the operation that caused the error
var op = error.getOperation();
test.equal(2, op.q.b);
test.equal(1, op.u['$set'].a);
expect(op.multi).to.not.be.true;
test.equal(true, op.upsert);
// Get the first error
error = result.getWriteErrorAt(1);
expect(error).to.not.exist;
// Finish up test
client.close(done);
});
});
});
}
});
it('should correctly handle multiple unordered batch API', function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect((err, client) => {
const db = client.db(configuration.db);
const col = db.collection('batch_write_unordered_ops_legacy_2');
// Add unique index on b field causing all updates to fail
col.createIndex({ a: 1 }, { unique: true, sparse: false }, err => {
expect(err).to.not.exist;
// Initialize the unordered Batch
const batch = col.initializeUnorderedBulkOp({ useLegacyOps: true });
// Add some operations to be executed in order
batch.insert({ b: 1, a: 1 });
batch.insert({ b: 5, a: 1 });
// Execute the operations
batch.execute((err, result) => {
expect(err).to.exist;
expect(result).to.not.exist;
// Basic properties check
result = err.result;
expect(result.nInserted).to.equal(1);
expect(result.hasWriteErrors()).to.equal(true);
expect(result.getWriteErrorCount()).to.equal(1);
// Go over the error
const error = result.getWriteErrorAt(0);
expect(error.code).to.equal(11000);
expect(error.errmsg).to.exist;
expect(error.getOperation().b).to.equal(5);
expect(error.getOperation().a).to.equal(1);
// Finish up test
client.close(done);
});
});
});
});
it('should fail due to document being to big for unordered batch', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var coll = db.collection('batch_write_unordered_ops_legacy_3');
// Set up a giant string to blow through the max message size
var hugeString = '';
// Create it bigger than 16MB
for (var i = 0; i < 1024 * 1100; i++) {
hugeString = hugeString + '1234567890123456';
}
// Set up the batch
var batch = coll.initializeUnorderedBulkOp();
batch.insert({ b: 1, a: 1 });
// should fail on insert due to string being to big
try {
batch.insert({ string: hugeString });
test.ok(false);
} catch (err) {} // eslint-disable-line
// Finish up test
client.close(done);
});
}
});
it('should correctly split up messages into more batches for unordered batches', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var coll = db.collection('batch_write_unordered_ops_legacy_4');
// Set up a giant string to blow through the max message size
var hugeString = '';
// Create it bigger than 16MB
for (var i = 0; i < 1024 * 256; i++) {
hugeString = hugeString + '1234567890123456';
}
// Insert the string a couple of times, should force split into multiple batches
var batch = coll.initializeUnorderedBulkOp();
batch.insert({ a: 1, b: hugeString });
batch.insert({ a: 2, b: hugeString });
batch.insert({ a: 3, b: hugeString });
batch.insert({ a: 4, b: hugeString });
batch.insert({ a: 5, b: hugeString });
batch.insert({ a: 6, b: hugeString });
// Execute the operations
batch.execute(function (err, result) {
// Basic properties check
test.equal(6, result.nInserted);
test.equal(false, result.hasWriteErrors());
// Finish up test
client.close(done);
});
});
}
});
it('should Correctly Execute Unordered Batch with duplicate key errors on updates', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_unordered_ops_legacy_6');
// Write concern
var writeConcern = self.configuration.writeConcernMax();
writeConcern.unique = true;
writeConcern.sparse = false;
// Add unique index on b field causing all updates to fail
col.createIndex({ b: 1 }, writeConcern, function (err) {
expect(err).to.not.exist;
// Initialize the unordered Batch
var batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch.insert({ a: 1 });
batch.find({ a: 1 }).update({ $set: { b: 1 } });
batch.insert({ b: 1 });
batch.insert({ b: 1 });
batch.insert({ b: 1 });
batch.insert({ b: 1 });
// Execute the operations
batch.execute(self.configuration.writeConcernMax(), function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
// Test basic settings
result = err.result;
test.equal(2, result.nInserted);
test.equal(true, result.hasWriteErrors());
test.ok(result.getWriteErrorCount() === 4 || result.getWriteErrorCount() === 3);
// Individual error checking
var error = result.getWriteErrorAt(0);
test.ok(error.code === 11000 || error.code === 11001);
test.ok(error.errmsg != null);
client.close(done);
});
});
});
}
});
it('should provide descriptive error message for unordered batch with duplicate key errors on inserts', function (done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect((err, client) => {
const db = client.db(configuration.db);
const col = db.collection('err_batch_write_unordered_ops_legacy_6');
// Add unique index on a field causing all inserts to fail
col.createIndexes(
[
{
name: 'err_batch_write_unordered_ops_legacy_6',
key: { a: 1 },
unique: true
}
],
err => {
expect(err).to.not.exist;
// Initialize the unordered Batch
const batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch.insert({ a: 1 });
batch.insert({ a: 1 });
// Execute the operations
batch.execute(configuration.writeConcernMax(), (err, result) => {
expect(err).to.exist;
expect(result).to.not.exist;
// Test basic settings
result = err.result;
expect(result.nInserted).to.equal(1);
expect(result.hasWriteErrors()).to.equal(true);
expect(result.getWriteErrorCount() === 1).to.equal(true);
// Individual error checking
const error = result.getWriteErrorAt(0);
expect(error.code === 11000).to.equal(true);
expect(error.errmsg).to.exist;
expect(err.message).to.equal(error.errmsg);
client.close(done);
});
}
);
});
});
it(
'should Correctly Execute Unordered Batch of with upserts causing duplicate key errors on updates',
{
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_unordered_ops_legacy_7');
// Add unique index on b field causing all updates to fail
col.createIndex({ b: 1 }, { unique: true, sparse: false }, function (err) {
expect(err).to.not.exist;
// Initialize the unordered Batch
var batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch.insert({ a: 1 });
batch.find({ a: 1 }).update({ $set: { b: 1 } });
batch
.find({ a: 2 })
.upsert()
.update({ $set: { b: 2 } });
batch
.find({ a: 3 })
.upsert()
.update({ $set: { b: 3 } });
batch.find({ a: 1 }).update({ $set: { b: 1 } });
batch.insert({ b: 1 });
// Execute the operations
batch.execute(self.configuration.writeConcernMax(), function (err, result) {
expect(err).to.exist;
expect(result).to.not.exist;
// Test basic settings
result = err.result;
test.equal(2, result.nInserted);
test.equal(2, result.nUpserted);
test.ok(0 === result.nModified || result.nModified == null);
test.equal(0, result.nRemoved);
test.equal(true, result.hasWriteErrors());
test.ok(1, result.getWriteErrorCount());
// Individual error checking
var error = result.getWriteErrorAt(0);
test.ok(error.code === 11000 || error.code === 11001);
test.ok(error.errmsg != null);
test.equal(1, error.getOperation().u['$set'].b);
// Check for upserted values
var ids = result.getUpsertedIds();
test.equal(2, ids.length);
test.equal(2, ids[0].index);
test.ok(ids[0]._id != null);
test.equal(3, ids[1].index);
test.ok(ids[1]._id != null);
client.close(done);
});
});
});
}
}
);
it('should correctly perform unordered upsert with custom _id', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_unordered_ops_legacy_8');
var batch = col.initializeUnorderedBulkOp();
// Add some operations to be executed in order
batch
.find({ _id: 2 })
.upsert()
.updateOne({ $set: { b: 2 } });
// Execute the operations
batch.execute(self.configuration.writeConcernMax(), function (err, result) {
// Check state of result
test.equal(1, result.nUpserted);
test.equal(0, result.nInserted);
test.equal(0, result.nMatched);
test.ok(0 === result.nModified || result.nModified == null);
test.equal(0, result.nRemoved);
var upserts = result.getUpsertedIds();
test.equal(1, upserts.length);
test.equal(0, upserts[0].index);
test.equal(2, upserts[0]._id);
// Finish up test
client.close(done);
});
});
}
});
it('should prohibit batch finds with no selector', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
expect(err).to.not.exist;
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_unordered_ops_legacy_9');
var unorderedBatch = col.initializeUnorderedBulkOp();
var orderedBatch = col.initializeOrderedBulkOp();
try {
unorderedBatch.find();
test.ok(false);
} catch (e) {
expect(e).to.match(/Bulk find operation must specify a selector/);
}
try {
orderedBatch.find();
test.ok(false);
} catch (e) {
expect(e).to.match(/Bulk find operation must specify a selector/);
}
client.close(done);
});
}
});
it('should return an error when no operations in unordered batch', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_8');
col
.initializeUnorderedBulkOp()
.execute(self.configuration.writeConcernMax(), function (err) {
expect(err).to.be.instanceOf(MongoDriverError);
expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty');
client.close(done);
});
});
}
});
it('should correctly execute unordered batch using w:0', {
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_ordered_ops_9');
var bulk = col.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
bulk.insert({ a: 1 });
}
bulk.find({ b: 1 }).upsert().update({ b: 1 });
bulk.find({ c: 1 }).delete();
bulk.execute({ writeConcern: { w: 0 } }, function (err, result) {
expect(err).to.not.exist;
test.equal(0, result.nUpserted);
test.equal(0, result.nInserted);
test.equal(0, result.nMatched);
test.ok(0 === result.nModified || result.nModified == null);
test.equal(0, result.nRemoved);
test.equal(false, result.hasWriteErrors());
client.close(done);
});
});
}
});
/*******************************************************************
*
* Ordered
*
*******************************************************************/
it('should provide an accessor for operations on ordered bulk ops', function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('bulk_get_operations_test');
var batch = col.initializeOrderedBulkOp();
batch.insert({ b: 1, a: 1 });
batch
.find({ b: 2 })
.upsert()
.updateOne({ $set: { a: 1 } });
batch.insert({ b: 3, a: 2 });
const batches = batch.batches;
expect(batches).to.have.lengthOf(3);
expect(batches[0].operations[0]).to.containSubset({ b: 1, a: 1 });
expect(batches[1].operations[0]).to.containSubset({
q: { b: 2 },
u: { $set: { a: 1 } },
upsert: true
});
expect(batches[2].operations[0]).to.containSubset({ b: 3, a: 2 });
client.close(done);
});
});
it('should fail with w:2 and wtimeout write concern due single mongod instance ordered', {
metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_concerns_ops_1');
var batch = col.initializeOrderedBulkOp();
batch.insert({ a: 1 });
batch.insert({ a: 2 });
batch.execute({ writeConcern: { w: 2, wtimeoutMS: 1000 } }, function (err) {
test.ok(err != null);
test.ok(err.code != null);
test.ok(err.errmsg != null);
client.close(done);
});
});
}
});
it('should correctly handle bulk operation split for ordered bulk operation', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
mongodb: '>=2.6.0',
topology: 'single'
}
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var docs = [];
for (var i = 0; i < 5; i++) {
docs.push({
s: new Array(6000000).join('x')
});
}
db.collection('bigdocs_ordered').insertMany(docs, function (err) {
expect(err).to.not.exist;
db.collection('bigdocs_ordered').count(function (err, c) {
expect(err).to.not.exist;
test.equal(5, c);
client.close(done);
});
});
});
}
});
/*******************************************************************
*
* Unordered
*
*******************************************************************/
it('should provide an accessor for operations on unordered bulk ops', function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('bulk_get_operations_test');
var batch = col.initializeUnorderedBulkOp();
batch.insert({ b: 1, a: 1 });
batch
.find({ b: 2 })
.upsert()
.updateOne({ $set: { a: 1 } });
batch.insert({ b: 3, a: 2 });
const batches = batch.batches;
expect(batches).to.have.lengthOf(2);
expect(batches[0].operations[0]).to.containSubset({ b: 1, a: 1 });
expect(batches[0].operations[1]).to.containSubset({ b: 3, a: 2 });
expect(batches[1].operations[0]).to.containSubset({
q: { b: 2 },
u: { $set: { a: 1 } },
upsert: true
});
client.close(done);
});
});
it('should fail with w:2 and wtimeout write concern due single mongod instance unordered', {
metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_concerns_ops_1');
var batch = col.initializeUnorderedBulkOp();
batch.insert({ a: 1 });
batch.insert({ a: 2 });
batch.execute({ writeConcern: { w: 2, wtimeoutMS: 1000 } }, function (err) {
test.ok(err != null);
test.ok(err.code != null);
test.ok(err.errmsg != null);
client.close(done);
});
});
}
});
it('should correctly return the number of operations in the bulk', {
metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var col = db.collection('batch_write_concerns_ops_1');
var batch = col.initializeOrderedBulkOp();
batch.insert({ a: 1 });
batch
.find({})
.upsert()
.update({ $set: { b: 1 } });
test.equal(2, batch.length);
batch = col.initializeUnorderedBulkOp();
batch.insert({ a: 1 });
batch
.find({})
.upsert()
.update({ $set: { b: 1 } });
test.equal(2, batch.length);
client.close(done);
});
}
});
it('should correctly split unordered bulk batch', {
metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var insertFirst = false;
var batchSize = 1000;
var collection = db.collection('batch_write_unordered_split_test');
var operation = collection.initializeUnorderedBulkOp(),
documents = [];
for (var i = 0; i < 10000; i++) {
var document = { name: 'bob' + i };
documents.push(document);
operation.insert(document);
}
operation.execute(function (err) {
expect(err).to.not.exist;
operation = collection.initializeUnorderedBulkOp();
if (insertFirst) {
// if you add the inserts to the batch first, it works fine.
insertDocuments();
replaceDocuments();
} else {
// if you add the updates to the batch first, it fails with the error "insert must contain at least one document"
replaceDocuments();
insertDocuments();
}
operation.execute(function (err) {
expect(err).to.not.exist;
client.close(done);
});
});
function insertDocuments() {
for (i = 10000; i < 10200; i++) {
operation.insert({ name: 'bob' + i });
}
}
function replaceDocuments() {
for (var i = 0; i < batchSize; i++) {
operation.find({ _id: documents[i]._id }).replaceOne({ name: 'joe' + i });
}
}
});
}
});
it('should correctly split ordered bulk batch', {
metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var insertFirst = false;
var batchSize = 1000;
var collection = db.collection('batch_write_ordered_split_test');
var operation = collection.initializeOrderedBulkOp(),
documents = [];
for (var i = 0; i < 10000; i++) {
var document = { name: 'bob' + i };
documents.push(document);
operation.insert(document);
}
operation.execute(function (err) {
expect(err).to.not.exist;
operation = collection.initializeOrderedBulkOp();
if (insertFirst) {
// if you add the inserts to the batch first, it works fine.
insertDocuments();
replaceDocuments();
} else {
// if you add the updates to the batch first, it fails with the error "insert must contain at least one document"
replaceDocuments();
insertDocuments();
}
operation.execute(function (err) {
expect(err).to.not.exist;
client.close(done);
});
});
function insertDocuments() {
for (i = 10000; i < 10200; i++) {
operation.insert({ name: 'bob' + i });
}
}
function replaceDocuments() {
for (var i = 0; i < batchSize; i++) {
operation.find({ _id: documents[i]._id }).replaceOne({ name: 'joe' + i });
}
}
});
}
});
it('should correctly handle bulk operation split for unordered bulk operation', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
mongodb: '>=2.6.0',
topology: 'single'
}
},
test: function (done) {
var self = this;
var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
var docs = [];
for (var i = 0; i < 5; i++) {
docs.push({
s: new Array(6000000).join('x')
});
}
db.collection('bigdocs_unordered').insertMany(docs, { ordered: false }, function (err) {
expect(err).to.not.exist;
db.collection('bigdocs_unordered').count(function (err, c) {
expect(err).to.not.exist;
test.equal(5, c);
client.close(done);
});
});
});
}
});
it(
'should return an error instead of throwing when no operations are provided for ordered bulk operation execute',
{
metadata: { requires: { mongodb: '>=2.6.0', topology: 'single' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
db.collection('doesnt_matter').insertMany([], function (err) {
expect(err).to.be.instanceOf(MongoDriverError);
expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty');
client.close(done);
});
});
}
}
);
it(
'should return an error instead of throwing when no operations are provided for unordered bulk operation execute',
{
metadata: { requires: { mongodb: '>=2.6.0', topology: 'single' } },
test: function (done) {
var self = this;
var client = self.configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
client.connect(function (err, client) {
var db = client.db(self.configuration.db);
db.collection('doesnt_matter').insertMany([], { ordered: false }, function (err) {
expect(err).to.be.instanceOf(MongoDriverError);
expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty');
client.close(done);
});
});
}
}
);
it('should return an error instead of throwing when an empty bulk operation is submitted (with promise)', function () {
var self = this;
var client = self.configuration.newClient({ w: 1 }, { maxPoolSize: 1 });
return client
.connect()
.then(function () {
var db = client.db(self.configuration.db);
return db.collection('doesnt_matter').insertMany([]);
})
.then(function () {
test.equal(false, true); // this should not happen!
})
.catch(function (err) {
expect(err).to.be.instanceOf(MongoDriverError);
expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty');
})
.then(function () {
return client.close();
});
});
it('should properly account for array key size in bulk unordered inserts', function (done) {
const client = this.configuration.newClient();
const documents = new Array(20000).fill('').map(() => ({
arr: new Array(19).fill('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
}));
let db;
client
.connect()
// NOTE: Hack to get around unrelated strange error in bulkWrites for right now.
.then(() => {
db = client.db(this.configuration.db);
return db.dropCollection('doesnt_matter').catch(() => {});
})
.then(() => {
return db.createCollection('doesnt_matter');
})
.then(() => {
const coll = db.collection('doesnt_matter');
coll.insertMany(documents, { ordered: false }, err => {
client.close(() => {
done(err);
});
});
});
});
it('should properly account for array key size in bulk ordered inserts', function (done) {
const client = this.configuration.newClient();
const documents = new Array(20000).fill('').map(() => ({
arr: new Array(19).fill('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
}));
let db;
client
.connect()
// NOTE: Hack to get around unrelated strange error in bulkWrites for right now.
.then(() => {
db = client.db(this.configuration.db);
return db.dropCollection('doesnt_matter').catch(() => {});
})
.then(() => {
return db.createCollection('doesnt_matter');
})
.then(() => {
const coll = db.collection('doesnt_matter');
coll.insertMany(documents, { ordered: true }, err => {
client.close(() => {
done(err);
});
});
});
});
it('properly accounts for bson size in bytes in bulk ordered inserts', function () {
const client = this.configuration.newClient();
const size = MAX_BSON_SIZE / 2;
const largeString = crypto.randomBytes(size - 100).toString('hex');
const documents = [{ s: largeString }, { s: largeString }];
let db;
return client
.connect()
.then(() => {
db = client.db(this.configuration.db);
return db.dropCollection('doesnt_matter').catch(() => {});
})
.then(() => {
return db.createCollection('doesnt_matter');
})
.then(() => {
const coll = db.collection('doesnt_matter');
return coll.insertMany(documents, { ordered: true });
})
.finally(() => client.close());
});
it('properly accounts for bson size in bytes in bulk unordered inserts', function () {
const client = this.configuration.newClient();
const size = MAX_BSON_SIZE / 2;
const largeString = crypto.randomBytes(size - 100).toString('hex');
const documents = [{ s: largeString }, { s: largeString }];
let db;
return client
.connect()
.then(() => {
db = client.db(this.configuration.db);
return db.dropCollection('doesnt_matter').catch(() => {});
})
.then(() => {
return db.createCollection('doesnt_matter');
})
.then(() => {
const coll = db.collection('doesnt_matter');
return coll.insertMany(documents, { ordered: false });
})
.finally(() => client.close());
});
function testPropagationOfBulkWriteError(bulk) {
return bulk.execute().then(
() => {
throw new Error('Expected execute to error but it passed');
},
err => {
expect(err).to.be.an.instanceOf(MongoDriverError);
}
);
}
it('should propagate the proper error from executing an empty ordered batch', function () {
const client = this.configuration.newClient();
return client
.connect()
.then(() => {
const collection = client.db(this.configuration.db).collection('doesnt_matter');
return testPropagationOfBulkWriteError(collection.initializeOrderedBulkOp());
})
.then(() => client.close());
});
it('should propagate the proper error from executing an empty unordered batch', function () {
const client = this.configuration.newClient();
return client
.connect()
.then(() => {
const collection = client.db(this.configuration.db).collection('doesnt_matter');
return testPropagationOfBulkWriteError(collection.initializeUnorderedBulkOp());
})
.then(() => client.close());
});
it('should promote a single error to the top-level message, and preserve writeErrors', function () {
const client = this.configuration.newClient();
return client.connect().then(() => {
this.defer(() => client.close());
const coll = client.db().collection('single_bulk_write_error');
return coll
.drop()
.catch(ignoreNsNotFound)
.then(() => coll.insert(Array.from({ length: 4 }, (_, i) => ({ _id: i, a: i }))))
.then(() =>
coll.bulkWrite([{ insertOne: { _id: 5, a: 0 } }, { insertOne: { _id: 5, a: 0 } }])
)
.then(
() => {
throw new Error('expected a bulk error');
},
err => {
expect(err)
.property('message')
.to.match(/E11000/);
expect(err).to.have.property('writeErrors').with.length(1);
}
);
});
});
it('should preserve order of operation index in unordered bulkWrite', function () {
const client = this.configuration.newClient();
return client.connect().then(() => {
this.defer(() => client.close());
const coll = client.db().collection('bulk_write_ordering_test');
return coll
.drop()
.catch(ignoreNsNotFound)
.then(() => coll.insert(Array.from({ length: 4 }, (_, i) => ({ _id: i, a: i }))))
.then(() =>
coll
.createIndex({ a: 1 }, { unique: true })
.then(() =>
coll.bulkWrite(
[
{ insertOne: { _id: 5, a: 0 } },
{ updateOne: { filter: { _id: 1 }, update: { $set: { a: 15 } } } },
{ insertOne: { _id: 6, a: 0 } },
{ updateOne: { filter: { _id: 2 }, update: { $set: { a: 42 } } } }
],
{ ordered: false }
)
)
)
.then(
() => {
throw new Error('expected a bulk error');
},
err => {
expect(err).to.have.property('writeErrors').with.length(2);
expect(err).to.have.nested.property('writeErrors[0].err.index', 0);
expect(err).to.have.nested.property('writeErrors[1].err.index', 2);
}
);
});
});
it('should preserve order of operation index in unordered bulk operation', function () {
const client = this.configuration.newClient();
return client.connect().then(() => {
this.defer(() => client.close());
const coll = client.db().collection('unordered_preserve_order');
return coll
.drop()
.catch(ignoreNsNotFound)
.then(() => {
const batch = coll.initializeUnorderedBulkOp();
batch.insert({ _id: 1, a: 0 });
batch.insert({ _id: 1, a: 0 });
batch.insert({ _id: 2, a: 0 });
batch.insert({ _id: 2, a: 0 });
return batch.execute();
})
.then(
() => {
throw new Error('expected a bulk error');
},
err => {
expect(err).to.have.property('writeErrors').with.length(2);
expect(err).to.have.nested.property('writeErrors[0].err.index', 1);
expect(err).to.have.nested.property('writeErrors[1].err.index', 3);
}
);
});
});
it('should not fail on the first error in an unorderd bulkWrite', function () {
const client = this.configuration.newClient();
return client.connect().then(() => {
this.defer(() => client.close());
const coll = client.db().collection('bulk_op_ordering_test');
return coll
.drop()
.catch(ignoreNsNotFound)
.then(() => coll.createIndex({ email: 1 }, { unique: 1, background: false }))
.then(() =>
Promise.all([
coll.updateOne(
{ email: '[email protected]' },
{ $set: { name: 'Adam Smith', age: 29 } },
{ upsert: true }
),
coll.updateOne(
{ email: '[email protected]' },
{ $set: { name: 'John Doe', age: 32 } },
{ upsert: true }
)
])
)
.then(() =>
coll.bulkWrite(
[
{
updateOne: {
filter: { email: '[email protected]' },
update: { $set: { age: 39 } }
}
},
{
insertOne: {
document: {
email: '[email protected]'
}
}
}
],
{ ordered: false }
)
)
.then(
() => {
throw new Error('expected a bulk error');
},
err => expect(err).property('code').to.equal(11000)
)
.then(() => coll.findOne({ email: '[email protected]' }))
.then(updatedAdam => expect(updatedAdam).property('age').to.equal(39));
});
});
it(
'should return correct ids for documents with generated ids',
withClientV2(function (client, done) {
const bulk = client.db().collection('coll').initializeUnorderedBulkOp();
for (let i = 0; i < 2; i++) bulk.insert({ x: 1 });
bulk.execute((err, result) => {
expect(err).to.not.exist;
expect(result).property('insertedIds').to.exist;
expect(Object.keys(result.insertedIds)).to.have.length(2);
expect(result.insertedIds[0]).to.exist;
expect(result.insertedIds[1]).to.exist;
done();
});
})
);
it(
'should throw an error if bulk execute is called more than once',
withClientV2(function (client, done) {
const bulk = client.db().collection('coll').initializeUnorderedBulkOp();
bulk.insert({});
bulk.execute((err, result) => {
expect(err).to.not.exist;
expect(result).to.exist;
bulk.execute(err => {
expect(err).to.match(/Batch cannot be re-executed/);
done();
});
});
})
);
it('should apply collation via FindOperators', {
metadata: { requires: { mongodb: '>= 3.4' } },
test: withMonitoredClient(['update', 'delete'], function (client, events, done) {
const locales = ['fr', 'de', 'es'];
const bulk = client.db().collection('coll').initializeOrderedBulkOp();
// updates
bulk
.find({ b: 1 })
.collation({ locale: locales[0] })
.updateOne({ $set: { b: 2 } });
bulk
.find({ b: 2 })
.collation({ locale: locales[1] })
.update({ $set: { b: 3 } });
bulk.find({ b: 3 }).collation({ locale: locales[2] }).replaceOne({ b: 2 });
// deletes
bulk.find({ b: 2 }).collation({ locale: locales[0] }).deleteOne();
bulk.find({ b: 1 }).collation({ locale: locales[1] }).delete();
bulk.execute(err => {
expect(err).to.not.exist;
expect(events).to.be.an('array').with.length.at.least(1);
expect(events[0]).property('commandName').to.equal('update');
const updateCommand = events[0].command;
expect(updateCommand).property('updates').to.be.an('array').with.length(3);
updateCommand.updates.forEach((statement, idx) => {
expect(statement).property('collation').to.eql({ locale: locales[idx] });
});
expect(events[1]).property('commandName').to.equal('delete');
const deleteCommand = events[1].command;
expect(deleteCommand).property('deletes').to.be.an('array').with.length(2);
deleteCommand.deletes.forEach((statement, idx) => {
expect(statement).property('collation').to.eql({ locale: locales[idx] });
});
client.close(done);
});
})
});
it('should apply arrayFilters to bulk updates via FindOperators', {
metadata: { requires: { mongodb: '>= 3.6' } },
test: withMonitoredClient(['update', 'delete'], function (client, events, done) {
client.db().dropCollection('bulkArrayFilters', () => {
const coll = client.db().collection('bulkArrayFilters');
const bulk = coll.initializeOrderedBulkOp();
bulk.insert({ person: 'Foo', scores: [4, 9, 12] });
bulk.insert({ person: 'Bar', scores: [13, 0, 52] });
bulk
.find({ scores: { $lt: 1 } })
.arrayFilters([{ e: { $lt: 1 } }])
.updateOne({ $set: { 'scores.$[e]': 1 } });
bulk
.find({ scores: { $gte: 10 } })
.arrayFilters([{ e: { $gte: 10 } }])
.update({ $set: { 'scores.$[e]': 10 } });
bulk.execute(err => {
expect(err).to.not.exist;
expect(events).to.be.an('array').with.lengthOf(1);
expect(events[0]).to.have.property('commandName', 'update');
const updateCommand = events[0].command;
expect(updateCommand).property('updates').to.be.an('array').with.lengthOf(2);
updateCommand.updates.forEach(update => expect(update).to.have.property('arrayFilters'));
coll.find({}).toArray((err, result) => {
expect(err).to.not.exist;
expect(result[0]).to.containSubset({
person: 'Foo',
scores: [4, 9, 10]
});
expect(result[1]).to.containSubset({
person: 'Bar',
scores: [10, 1, 10]
});
client.close(done);
});
});
});
})
});
it('should throw an error if raw operations are passed to bulkWrite', function () {
const client = this.configuration.newClient();
return client.connect().then(() => {
this.defer(() => client.close());
const coll = client.db().collection('single_bulk_write_error');
return coll
.bulkWrite([
{ updateOne: { q: { a: 2 }, u: { $set: { a: 2 } }, upsert: true } },
{ deleteOne: { q: { c: 1 } } }
])
.then(
() => {
throw new Error('expected a bulk error');
},
err => {
expect(err).to.match(/Raw operations are not allowed/);
}
);
});
});
describe('Bulk operation transaction rollback', () => {
/** @type {import('../../src/index').MongoClient} */
let client;
/** @type {import('../../src/index').Collection<{ answer: number }>} */
let collection;
beforeEach(async function () {
const config = this.configuration;
client = config.newClient();
await client.connect();
try {
await client
.db('bulk_operation_writes_test')
.collection('bulk_write_transaction_test')
.drop();
} catch (_) {
// do not care
}
collection = await client
.db('bulk_operation_writes_test')
.createCollection('bulk_write_transaction_test');
await collection.deleteMany({});
});
afterEach(async () => {
if (client) await client.close();
});
it('should abort ordered bulk operation writes', {
metadata: { requires: { mongodb: '>= 4.2', topology: ['replicaset'] } },
async test() {
const session = client.startSession();
session.startTransaction({
readConcern: { level: 'local' },
writeConcern: { w: 'majority' }
});
let bulk = undefined;
bulk = collection.initializeOrderedBulkOp({ session });
bulk.insert({ answer: 42 });
await bulk.execute();
await session.abortTransaction();
await session.endSession();
const documents = await collection.find().toArray();
expect(documents).to.have.lengthOf(
0,
'bulk operation writes were made outside of transaction'
);
}
});
it('should abort unordered bulk operation writes', {
metadata: { requires: { mongodb: '>= 4.2', topology: ['replicaset'] } },
async test() {
const session = client.startSession();
session.startTransaction({
readConcern: { level: 'local' },
writeConcern: { w: 'majority' }
});
let bulk = undefined;
bulk = collection.initializeUnorderedBulkOp({ session });
bulk.insert({ answer: 42 });
await bulk.execute();
await session.abortTransaction();
await session.endSession();
const documents = await collection.find().toArray();
expect(documents).to.have.lengthOf(
0,
'bulk operation writes were made outside of transaction'
);
}
});
it('should abort unordered bulk operation writes using withTransaction', {
metadata: { requires: { mongodb: '>= 4.2', topology: ['replicaset'] } },
async test() {
const session = client.startSession();
await session.withTransaction(
async () => {
let bulk = undefined;
bulk = collection.initializeUnorderedBulkOp({ session });
bulk.insert({ answer: 42 });
await bulk.execute();
await session.abortTransaction();
},
{ readConcern: { level: 'local' }, writeConcern: { w: 'majority' } }
);
await session.endSession();
const documents = await collection.find().toArray();
expect(documents).to.have.lengthOf(
0,
'bulk operation writes were made outside of transaction'
);
}
});
it('should abort ordered bulk operation writes using withTransaction', {
metadata: { requires: { mongodb: '>= 4.2', topology: ['replicaset'] } },
async test() {
const session = client.startSession();
await session.withTransaction(
async () => {
let bulk = undefined;
bulk = collection.initializeOrderedBulkOp({ session });
bulk.insert({ answer: 42 });
await bulk.execute();
await session.abortTransaction();
},
{ readConcern: { level: 'local' }, writeConcern: { w: 'majority' } }
);
await session.endSession();
const documents = await collection.find().toArray();
expect(documents).to.have.lengthOf(
0,
'bulk operation writes were made outside of transaction'
);
}
});
});
});
| 1 | 20,693 | I think we should change this to an instanceOf check for MongoBatchReExecutionError | mongodb-node-mongodb-native | js |
@@ -34,7 +34,7 @@ type ProfileDecoder struct {
}
func NewProfileDecoder(callbacks passthruCallbacks) *ProfileDecoder {
- return &ProfileDecoder{callbacks: callbacks, converter: conversion.Converter{}}
+ return &ProfileDecoder{callbacks: callbacks, converter: conversion.NewConverter()}
}
func (p *ProfileDecoder) RegisterWith(d *dispatcher.Dispatcher) { | 1 | // Copyright (c) 2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package calc
import (
"strings"
"github.com/projectcalico/felix/dispatcher"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
"github.com/projectcalico/libcalico-go/lib/backend/model"
log "github.com/sirupsen/logrus"
)
// ProfileDecoder takes updates from a dispatcher, determines if the profile is a Kubernetes Service Account or
// Kubernetes Namespace, and if it is, generates a dataplane update or remove for it.
type ProfileDecoder struct {
callbacks passthruCallbacks
converter conversion.Converter
}
func NewProfileDecoder(callbacks passthruCallbacks) *ProfileDecoder {
return &ProfileDecoder{callbacks: callbacks, converter: conversion.Converter{}}
}
func (p *ProfileDecoder) RegisterWith(d *dispatcher.Dispatcher) {
d.Register(model.ProfileLabelsKey{}, p.OnUpdate)
}
func (p *ProfileDecoder) OnUpdate(update api.Update) (filterOut bool) {
// This type assertion is safe because we only registered for ProfileLabels updates.
key := update.Key.(model.ProfileLabelsKey)
log.WithField("key", key.String()).Debug("Decoding ProfileLabels")
idInterface := p.classifyProfile(key)
switch id := idInterface.(type) {
case nil:
log.WithField("key", key.String()).Debug("Ignoring ProfileLabels")
case proto.ServiceAccountID:
if update.Value == nil {
p.callbacks.OnServiceAccountRemove(id)
} else {
labels := update.Value.(map[string]string)
msg := proto.ServiceAccountUpdate{
Id: &id, Labels: decodeLabels(conversion.ServiceAccountLabelPrefix, labels)}
p.callbacks.OnServiceAccountUpdate(&msg)
}
case proto.NamespaceID:
if update.Value == nil {
p.callbacks.OnNamespaceRemove(id)
} else {
labels := update.Value.(map[string]string)
msg := proto.NamespaceUpdate{
Id: &id, Labels: decodeLabels(conversion.NamespaceLabelPrefix, labels)}
p.callbacks.OnNamespaceUpdate(&msg)
}
}
return false
}
func (p *ProfileDecoder) classifyProfile(key model.ProfileLabelsKey) interface{} {
namespace, name, err := p.converter.ProfileNameToServiceAccount(key.Name)
if err == nil {
return proto.ServiceAccountID{Name: name, Namespace: namespace}
}
name, err = p.converter.ProfileNameToNamespace(key.Name)
if err == nil {
return proto.NamespaceID{Name: name}
}
return nil
}
// decodeLabels strips the special prefix we add to Profile labels when converting. This gives us the original labels on
// the ServiceAccount or Namespace object.
func decodeLabels(prefix string, in map[string]string) map[string]string {
out := make(map[string]string)
for k, v := range in {
k = strings.TrimPrefix(k, prefix)
out[k] = v
}
return out
}
| 1 | 17,619 | Required by the libcalico-go changes | projectcalico-felix | go |
@@ -1193,3 +1193,12 @@ type MutableBareRootMetadataNoImplError struct {
func (e MutableBareRootMetadataNoImplError) Error() string {
return "Does not implement MutableBareRootMetadata"
}
+
+// blockNonExistentError is returned when a block doesn't exist.
+type blockNonExistentError struct {
+ id BlockID
+}
+
+func (e blockNonExistentError) Error() string {
+ return fmt.Sprintf("block %s does not exist", e.id)
+} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
)
// ErrorFile is the name of the virtual file in KBFS that should
// contain the last reported error(s).
var ErrorFile = ".kbfs_error"
// WrapError simply wraps an error in a fmt.Stringer interface, so
// that it can be reported.
type WrapError struct {
Err error
}
// String implements the fmt.Stringer interface for WrapError
func (e WrapError) String() string {
return e.Err.Error()
}
// NameExistsError indicates that the user tried to create an entry
// for a name that already existed in a subdirectory.
type NameExistsError struct {
Name string
}
// Error implements the error interface for NameExistsError
func (e NameExistsError) Error() string {
return fmt.Sprintf("%s already exists", e.Name)
}
// NoSuchNameError indicates that the user tried to access a
// subdirectory entry that doesn't exist.
type NoSuchNameError struct {
Name string
}
// Error implements the error interface for NoSuchNameError
func (e NoSuchNameError) Error() string {
return fmt.Sprintf("%s doesn't exist", e.Name)
}
// NoSuchUserError indicates that the given user couldn't be resolved.
type NoSuchUserError struct {
Input string
}
// Error implements the error interface for NoSuchUserError
func (e NoSuchUserError) Error() string {
return fmt.Sprintf("%s is not a Keybase user", e.Input)
}
// ToStatus implements the keybase1.ToStatusAble interface for NoSuchUserError
func (e NoSuchUserError) ToStatus() keybase1.Status {
return keybase1.Status{
Name: "NotFound",
Code: int(keybase1.StatusCode_SCNotFound),
Desc: e.Error(),
}
}
// BadTLFNameError indicates a top-level folder name that has an
// incorrect format.
type BadTLFNameError struct {
Name string
}
// Error implements the error interface for BadTLFNameError.
func (e BadTLFNameError) Error() string {
return fmt.Sprintf("TLF name %s is in an incorrect format", e.Name)
}
// InvalidBlockRefError indicates an invalid block reference was
// encountered.
type InvalidBlockRefError struct {
ref blockRef
}
func (e InvalidBlockRefError) Error() string {
return fmt.Sprintf("Invalid block ref %s", e.ref)
}
// InvalidPathError indicates an invalid path was encountered.
type InvalidPathError struct {
p path
}
// Error implements the error interface for InvalidPathError.
func (e InvalidPathError) Error() string {
return fmt.Sprintf("Invalid path %s", e.p.DebugString())
}
// InvalidParentPathError indicates a path without a valid parent was
// encountered.
type InvalidParentPathError struct {
p path
}
// Error implements the error interface for InvalidParentPathError.
func (e InvalidParentPathError) Error() string {
return fmt.Sprintf("Path with invalid parent %s", e.p.DebugString())
}
// DirNotEmptyError indicates that the user tried to unlink a
// subdirectory that was not empty.
type DirNotEmptyError struct {
Name string
}
// Error implements the error interface for DirNotEmptyError
func (e DirNotEmptyError) Error() string {
return fmt.Sprintf("Directory %s is not empty and can't be removed", e.Name)
}
// TlfAccessError that the user tried to perform an unpermitted
// operation on a top-level folder.
type TlfAccessError struct {
ID TlfID
}
// Error implements the error interface for TlfAccessError
func (e TlfAccessError) Error() string {
return fmt.Sprintf("Operation not permitted on folder %s", e.ID)
}
// RenameAcrossDirsError indicates that the user tried to do an atomic
// rename across directories.
type RenameAcrossDirsError struct {
}
// Error implements the error interface for RenameAcrossDirsError
func (e RenameAcrossDirsError) Error() string {
return fmt.Sprintf("Cannot rename across directories")
}
// ErrorFileAccessError indicates that the user tried to perform an
// operation on the ErrorFile that is not allowed.
type ErrorFileAccessError struct {
}
// Error implements the error interface for ErrorFileAccessError
func (e ErrorFileAccessError) Error() string {
return fmt.Sprintf("Operation not allowed on file %s", ErrorFile)
}
// ReadAccessError indicates that the user tried to read from a
// top-level folder without read permission.
type ReadAccessError struct {
User libkb.NormalizedUsername
Tlf CanonicalTlfName
Public bool
}
// Error implements the error interface for ReadAccessError
func (e ReadAccessError) Error() string {
return fmt.Sprintf("%s does not have read access to directory %s",
e.User, buildCanonicalPath(e.Public, e.Tlf))
}
// WriteAccessError indicates that the user tried to read from a
// top-level folder without read permission.
type WriteAccessError struct {
User libkb.NormalizedUsername
Tlf CanonicalTlfName
Public bool
}
// Error implements the error interface for WriteAccessError
func (e WriteAccessError) Error() string {
return fmt.Sprintf("%s does not have write access to directory %s",
e.User, buildCanonicalPath(e.Public, e.Tlf))
}
// NewReadAccessError constructs a ReadAccessError for the given
// directory and user.
func NewReadAccessError(h *TlfHandle, username libkb.NormalizedUsername) error {
tlfname := h.GetCanonicalName()
return ReadAccessError{username, tlfname, h.IsPublic()}
}
// NewWriteAccessError constructs a WriteAccessError for the given
// directory and user.
func NewWriteAccessError(h *TlfHandle, username libkb.NormalizedUsername) error {
tlfname := h.GetCanonicalName()
return WriteAccessError{username, tlfname, h.IsPublic()}
}
// NeedSelfRekeyError indicates that the folder in question needs to
// be rekeyed for the local device, and can be done so by one of the
// other user's devices.
type NeedSelfRekeyError struct {
Tlf CanonicalTlfName
}
// Error implements the error interface for NeedSelfRekeyError
func (e NeedSelfRekeyError) Error() string {
return fmt.Sprintf("This device does not yet have read access to "+
"directory %s, log into Keybase from one of your other "+
"devices to grant access", buildCanonicalPath(false, e.Tlf))
}
// NeedOtherRekeyError indicates that the folder in question needs to
// be rekeyed for the local device, and can only done so by one of the
// other users.
type NeedOtherRekeyError struct {
Tlf CanonicalTlfName
}
// Error implements the error interface for NeedOtherRekeyError
func (e NeedOtherRekeyError) Error() string {
return fmt.Sprintf("This device does not yet have read access to "+
"directory %s, ask one of the other directory participants to "+
"log into Keybase to grant you access automatically",
buildCanonicalPath(false, e.Tlf))
}
// NotFileBlockError indicates that a file block was expected but a
// block of a different type was found.
//
// ptr and branch should be filled in, but p may be empty.
type NotFileBlockError struct {
ptr BlockPointer
branch BranchName
p path
}
func (e NotFileBlockError) Error() string {
return fmt.Sprintf("The block at %s is not a file block (branch=%s, path=%s)", e.ptr, e.branch, e.p)
}
// NotDirBlockError indicates that a file block was expected but a
// block of a different type was found.
//
// ptr and branch should be filled in, but p may be empty.
type NotDirBlockError struct {
ptr BlockPointer
branch BranchName
p path
}
func (e NotDirBlockError) Error() string {
return fmt.Sprintf("The block at %s is not a dir block (branch=%s, path=%s)", e.ptr, e.branch, e.p)
}
// NotFileError indicates that the user tried to perform a
// file-specific operation on something that isn't a file.
type NotFileError struct {
path path
}
// Error implements the error interface for NotFileError
func (e NotFileError) Error() string {
return fmt.Sprintf("%s is not a file (folder %s)", e.path, e.path.Tlf)
}
// NotDirError indicates that the user tried to perform a
// dir-specific operation on something that isn't a directory.
type NotDirError struct {
path path
}
// Error implements the error interface for NotDirError
func (e NotDirError) Error() string {
return fmt.Sprintf("%s is not a directory (folder %s)", e.path, e.path.Tlf)
}
// BlockDecodeError indicates that a block couldn't be decoded as
// expected; probably it is the wrong type.
type BlockDecodeError struct {
decodeErr error
}
// Error implements the error interface for BlockDecodeError
func (e BlockDecodeError) Error() string {
return fmt.Sprintf("Decode error for a block: %v", e.decodeErr)
}
// BadDataError indicates that KBFS is storing corrupt data for a block.
type BadDataError struct {
ID BlockID
}
// Error implements the error interface for BadDataError
func (e BadDataError) Error() string {
return fmt.Sprintf("Bad data for block %v", e.ID)
}
// NoSuchBlockError indicates that a block for the associated ID doesn't exist.
type NoSuchBlockError struct {
ID BlockID
}
// Error implements the error interface for NoSuchBlockError
func (e NoSuchBlockError) Error() string {
return fmt.Sprintf("Couldn't get block %v", e.ID)
}
// BadCryptoError indicates that KBFS performed a bad crypto operation.
type BadCryptoError struct {
ID BlockID
}
// Error implements the error interface for BadCryptoError
func (e BadCryptoError) Error() string {
return fmt.Sprintf("Bad crypto for block %v", e.ID)
}
// BadCryptoMDError indicates that KBFS performed a bad crypto
// operation, specifically on a MD object.
type BadCryptoMDError struct {
ID TlfID
}
// Error implements the error interface for BadCryptoMDError
func (e BadCryptoMDError) Error() string {
return fmt.Sprintf("Bad crypto for the metadata of directory %v", e.ID)
}
// BadMDError indicates that the system is storing corrupt MD object
// for the given TLF ID.
type BadMDError struct {
ID TlfID
}
// Error implements the error interface for BadMDError
func (e BadMDError) Error() string {
return fmt.Sprintf("Wrong format for metadata for directory %v", e.ID)
}
// MDMissingDataError indicates that we are trying to take get the
// metadata ID of a MD object with no serialized data field.
type MDMissingDataError struct {
ID TlfID
}
// Error implements the error interface for MDMissingDataError
func (e MDMissingDataError) Error() string {
return fmt.Sprintf("No serialized private data in the metadata "+
"for directory %v", e.ID)
}
// MDMismatchError indicates an inconsistent or unverifiable MD object
// for the given top-level folder.
type MDMismatchError struct {
Revision MetadataRevision
Dir string
TlfID TlfID
Err error
}
// Error implements the error interface for MDMismatchError
func (e MDMismatchError) Error() string {
return fmt.Sprintf("Could not verify metadata (revision=%d) for directory %s (id=%s): %s",
e.Revision, e.Dir, e.TlfID, e.Err)
}
// NoSuchMDError indicates that there is no MD object for the given
// folder, revision, and merged status.
type NoSuchMDError struct {
Tlf TlfID
Rev MetadataRevision
BID BranchID
}
// Error implements the error interface for NoSuchMDError
func (e NoSuchMDError) Error() string {
return fmt.Sprintf("Couldn't get metadata for folder %v, revision %d, "+
"%s", e.Tlf, e.Rev, e.BID)
}
// InvalidMetadataVersionError indicates that an invalid metadata version was
// used.
type InvalidMetadataVersionError struct {
Tlf TlfID
MetadataVer MetadataVer
}
// Error implements the error interface for InvalidMetadataVersionError.
func (e InvalidMetadataVersionError) Error() string {
return fmt.Sprintf("Invalid metadata version %d for folder %s",
int(e.MetadataVer), e.Tlf)
}
// NewMetadataVersionError indicates that the metadata for the given
// folder has been written using a new metadata version that our
// client doesn't understand.
type NewMetadataVersionError struct {
Tlf TlfID
MetadataVer MetadataVer
}
// Error implements the error interface for NewMetadataVersionError.
func (e NewMetadataVersionError) Error() string {
return fmt.Sprintf(
"The metadata for folder %s is of a version (%d) that we can't read",
e.Tlf, e.MetadataVer)
}
// InvalidDataVersionError indicates that an invalid data version was
// used.
type InvalidDataVersionError struct {
DataVer DataVer
}
// Error implements the error interface for InvalidDataVersionError.
func (e InvalidDataVersionError) Error() string {
return fmt.Sprintf("Invalid data version %d", int(e.DataVer))
}
// NewDataVersionError indicates that the data at the given path has
// been written using a new data version that our client doesn't
// understand.
type NewDataVersionError struct {
path path
DataVer DataVer
}
// Error implements the error interface for NewDataVersionError.
func (e NewDataVersionError) Error() string {
return fmt.Sprintf(
"The data at path %s is of a version (%d) that we can't read "+
"(in folder %s)",
e.path, e.DataVer, e.path.Tlf)
}
// OutdatedVersionError indicates that we have encountered some new
// data version we don't understand, and the user should be prompted
// to upgrade.
type OutdatedVersionError struct {
}
// Error implements the error interface for OutdatedVersionError.
func (e OutdatedVersionError) Error() string {
return "Your software is out of date, and cannot read this data. " +
"Please use `keybase update check` to upgrade your software."
}
// InvalidKeyGenerationError indicates that an invalid key generation
// was used.
type InvalidKeyGenerationError struct {
TlfID TlfID
KeyGen KeyGen
}
// Error implements the error interface for InvalidKeyGenerationError.
func (e InvalidKeyGenerationError) Error() string {
return fmt.Sprintf("Invalid key generation %d for %s", int(e.KeyGen), e.TlfID)
}
// NewKeyGenerationError indicates that the data at the given path has
// been written using keys that our client doesn't have.
type NewKeyGenerationError struct {
TlfID TlfID
KeyGen KeyGen
}
// Error implements the error interface for NewKeyGenerationError.
func (e NewKeyGenerationError) Error() string {
return fmt.Sprintf(
"The data for %v is keyed with a key generation (%d) that "+
"we don't know", e.TlfID, e.KeyGen)
}
// BadSplitError indicates that the BlockSplitter has an error.
type BadSplitError struct {
}
// Error implements the error interface for BadSplitError
func (e BadSplitError) Error() string {
return "Unexpected bad block split"
}
// TooLowByteCountError indicates that size of a block is smaller than
// the expected size.
type TooLowByteCountError struct {
ExpectedMinByteCount int
ByteCount int
}
// Error implements the error interface for TooLowByteCountError
func (e TooLowByteCountError) Error() string {
return fmt.Sprintf("Expected at least %d bytes, got %d bytes",
e.ExpectedMinByteCount, e.ByteCount)
}
// InconsistentEncodedSizeError is raised when a dirty block has a
// non-zero encoded size.
type InconsistentEncodedSizeError struct {
info BlockInfo
}
// Error implements the error interface for InconsistentEncodedSizeError
func (e InconsistentEncodedSizeError) Error() string {
return fmt.Sprintf("Block pointer to dirty block %v with non-zero "+
"encoded size = %d bytes", e.info.ID, e.info.EncodedSize)
}
// MDWriteNeededInRequest indicates that the system needs MD write
// permissions to successfully complete an operation, so it should
// retry in mdWrite mode.
type MDWriteNeededInRequest struct {
}
// Error implements the error interface for MDWriteNeededInRequest
func (e MDWriteNeededInRequest) Error() string {
return "This request needs MD write access, but doesn't have it."
}
// UnknownSigVer indicates that we can't process a signature because
// it has an unknown version.
type UnknownSigVer struct {
sigVer SigVer
}
// Error implements the error interface for UnknownSigVer
func (e UnknownSigVer) Error() string {
return fmt.Sprintf("Unknown signature version %d", int(e.sigVer))
}
// KeyNotFoundError indicates that a key matching the given KID
// couldn't be found.
type KeyNotFoundError struct {
kid keybase1.KID
}
// Error implements the error interface for KeyNotFoundError.
func (e KeyNotFoundError) Error() string {
return fmt.Sprintf("Could not find key with kid=%s", e.kid)
}
// UnverifiableTlfUpdateError indicates that a MD update could not be
// verified.
type UnverifiableTlfUpdateError struct {
Tlf string
User libkb.NormalizedUsername
Err error
}
// Error implements the error interface for UnverifiableTlfUpdateError.
func (e UnverifiableTlfUpdateError) Error() string {
return fmt.Sprintf("%s was last written by an unknown device claiming "+
"to belong to user %s. The device has possibly been revoked by the "+
"user. Use `keybase log send` to file an issue with the Keybase "+
"admins.", e.Tlf, e.User)
}
// KeyCacheMissError indicates that a key matching the given TlfID
// and key generation wasn't found in cache.
type KeyCacheMissError struct {
tlf TlfID
keyGen KeyGen
}
// Error implements the error interface for KeyCacheMissError.
func (e KeyCacheMissError) Error() string {
return fmt.Sprintf("Could not find key with tlf=%s, keyGen=%d", e.tlf, e.keyGen)
}
// KeyCacheHitError indicates that a key matching the given TlfID
// and key generation was found in cache but the object type was unknown.
type KeyCacheHitError struct {
tlf TlfID
keyGen KeyGen
}
// Error implements the error interface for KeyCacheHitError.
func (e KeyCacheHitError) Error() string {
return fmt.Sprintf("Invalid key with tlf=%s, keyGen=%d", e.tlf, e.keyGen)
}
// UnexpectedShortCryptoRandRead indicates that fewer bytes were read
// from crypto.rand.Read() than expected.
type UnexpectedShortCryptoRandRead struct {
}
// Error implements the error interface for UnexpectedShortRandRead.
func (e UnexpectedShortCryptoRandRead) Error() string {
return "Unexpected short read from crypto.rand.Read()"
}
// UnknownEncryptionVer indicates that we can't decrypt an
// encryptedData object because it has an unknown version.
type UnknownEncryptionVer struct {
ver EncryptionVer
}
// Error implements the error interface for UnknownEncryptionVer.
func (e UnknownEncryptionVer) Error() string {
return fmt.Sprintf("Unknown encryption version %d", int(e.ver))
}
// InvalidNonceError indicates that an invalid cryptographic nonce was
// detected.
type InvalidNonceError struct {
nonce []byte
}
// Error implements the error interface for InvalidNonceError.
func (e InvalidNonceError) Error() string {
return fmt.Sprintf("Invalid nonce %v", e.nonce)
}
// NoKeysError indicates that no keys were provided for a decryption allowing
// multiple device keys
type NoKeysError struct{}
func (e NoKeysError) Error() string {
return "No keys provided"
}
// InvalidPublicTLFOperation indicates that an invalid operation was
// attempted on a public TLF.
type InvalidPublicTLFOperation struct {
id TlfID
opName string
}
// Error implements the error interface for InvalidPublicTLFOperation.
func (e InvalidPublicTLFOperation) Error() string {
return fmt.Sprintf("Tried to do invalid operation %s on public TLF %v",
e.opName, e.id)
}
// WrongOpsError indicates that an unexpected path got passed into a
// FolderBranchOps instance
type WrongOpsError struct {
nodeFB FolderBranch
opsFB FolderBranch
}
// Error implements the error interface for WrongOpsError.
func (e WrongOpsError) Error() string {
return fmt.Sprintf("Ops for folder %v, branch %s, was given path %s, "+
"branch %s", e.opsFB.Tlf, e.opsFB.Branch, e.nodeFB.Tlf, e.nodeFB.Branch)
}
// NodeNotFoundError indicates that we tried to find a node for the
// given BlockPointer and failed.
type NodeNotFoundError struct {
ptr BlockPointer
}
// Error implements the error interface for NodeNotFoundError.
func (e NodeNotFoundError) Error() string {
return fmt.Sprintf("No node found for pointer %v", e.ptr)
}
// ParentNodeNotFoundError indicates that we tried to update a Node's
// parent with a BlockPointer that we don't yet know about.
type ParentNodeNotFoundError struct {
parent blockRef
}
// Error implements the error interface for ParentNodeNotFoundError.
func (e ParentNodeNotFoundError) Error() string {
return fmt.Sprintf("No such parent node found for %v", e.parent)
}
// EmptyNameError indicates that the user tried to use an empty name
// for the given blockRef.
type EmptyNameError struct {
ref blockRef
}
// Error implements the error interface for EmptyNameError.
func (e EmptyNameError) Error() string {
return fmt.Sprintf("Cannot use empty name for %v", e.ref)
}
// PaddedBlockReadError occurs if the number of bytes read do not
// equal the number of bytes specified.
type PaddedBlockReadError struct {
ActualLen int
ExpectedLen int
}
// Error implements the error interface of PaddedBlockReadError.
func (e PaddedBlockReadError) Error() string {
return fmt.Sprintf("Reading block data out of padded block resulted in %d bytes, expected %d",
e.ActualLen, e.ExpectedLen)
}
// NotDirectFileBlockError indicates that a direct file block was
// expected, but something else (e.g., an indirect file block) was
// given instead.
type NotDirectFileBlockError struct {
}
func (e NotDirectFileBlockError) Error() string {
return fmt.Sprintf("Unexpected block type; expected a direct file block")
}
// KeyHalfMismatchError is returned when the key server doesn't return the expected key half.
type KeyHalfMismatchError struct {
Expected TLFCryptKeyServerHalfID
Actual TLFCryptKeyServerHalfID
}
// Error implements the error interface for KeyHalfMismatchError.
func (e KeyHalfMismatchError) Error() string {
return fmt.Sprintf("Key mismatch, expected ID: %s, actual ID: %s",
e.Expected, e.Actual)
}
// InvalidHashError is returned whenever an invalid hash is
// detected.
type InvalidHashError struct {
H Hash
}
func (e InvalidHashError) Error() string {
return fmt.Sprintf("Invalid hash %s", e.H)
}
// InvalidTlfID indicates whether the TLF ID string is not parseable
// or invalid.
type InvalidTlfID struct {
id string
}
func (e InvalidTlfID) Error() string {
return fmt.Sprintf("Invalid TLF ID %q", e.id)
}
// InvalidBranchID indicates whether the branch ID string is not
// parseable or invalid.
type InvalidBranchID struct {
id string
}
func (e InvalidBranchID) Error() string {
return fmt.Sprintf("Invalid branch ID %q", e.id)
}
// UnknownHashTypeError is returned whenever a hash with an unknown
// hash type is attempted to be used for verification.
type UnknownHashTypeError struct {
T HashType
}
func (e UnknownHashTypeError) Error() string {
return fmt.Sprintf("Unknown hash type %s", e.T)
}
// HashMismatchError is returned whenever a hash mismatch is detected.
type HashMismatchError struct {
ExpectedH Hash
ActualH Hash
}
func (e HashMismatchError) Error() string {
return fmt.Sprintf("Hash mismatch: expected %s, got %s",
e.ExpectedH, e.ActualH)
}
// MDServerDisconnected indicates the MDServer has been disconnected for clients waiting
// on an update channel.
type MDServerDisconnected struct {
}
// Error implements the error interface for MDServerDisconnected.
func (e MDServerDisconnected) Error() string {
return "MDServer is disconnected"
}
// MDRevisionMismatch indicates that we tried to apply a revision that
// was not the next in line.
type MDRevisionMismatch struct {
rev MetadataRevision
curr MetadataRevision
}
// Error implements the error interface for MDRevisionMismatch.
func (e MDRevisionMismatch) Error() string {
return fmt.Sprintf("MD revision %d isn't next in line for our "+
"current revision %d", e.rev, e.curr)
}
// MDTlfIDMismatch indicates that the ID field of a successor MD
// doesn't match the ID field of its predecessor.
type MDTlfIDMismatch struct {
currID TlfID
nextID TlfID
}
func (e MDTlfIDMismatch) Error() string {
return fmt.Sprintf("TLF ID %s doesn't match successor TLF ID %s",
e.currID, e.nextID)
}
// MDPrevRootMismatch indicates that the PrevRoot field of a successor
// MD doesn't match the metadata ID of its predecessor.
type MDPrevRootMismatch struct {
prevRoot MdID
expectedPrevRoot MdID
}
func (e MDPrevRootMismatch) Error() string {
return fmt.Sprintf("PrevRoot %s doesn't match expected %s",
e.prevRoot, e.expectedPrevRoot)
}
// MDDiskUsageMismatch indicates an inconsistency in the DiskUsage
// field of a RootMetadata object.
type MDDiskUsageMismatch struct {
expectedDiskUsage uint64
actualDiskUsage uint64
}
func (e MDDiskUsageMismatch) Error() string {
return fmt.Sprintf("Disk usage %d doesn't match expected %d",
e.actualDiskUsage, e.expectedDiskUsage)
}
// MDUpdateInvertError indicates that we tried to apply a revision that
// was not the next in line.
type MDUpdateInvertError struct {
rev MetadataRevision
curr MetadataRevision
}
// Error implements the error interface for MDUpdateInvertError.
func (e MDUpdateInvertError) Error() string {
return fmt.Sprintf("MD revision %d isn't next in line for our "+
"current revision %d while inverting", e.rev, e.curr)
}
// NotPermittedWhileDirtyError indicates that some operation failed
// because of outstanding dirty files, and may be retried later.
type NotPermittedWhileDirtyError struct {
}
// Error implements the error interface for NotPermittedWhileDirtyError.
func (e NotPermittedWhileDirtyError) Error() string {
return "Not permitted while writes are dirty"
}
// NoChainFoundError indicates that a conflict resolution chain
// corresponding to the given pointer could not be found.
type NoChainFoundError struct {
ptr BlockPointer
}
// Error implements the error interface for NoChainFoundError.
func (e NoChainFoundError) Error() string {
return fmt.Sprintf("No chain found for %v", e.ptr)
}
// DisallowedPrefixError indicates that the user attempted to create
// an entry using a name with a disallowed prefix.
type DisallowedPrefixError struct {
name string
prefix string
}
// Error implements the error interface for NoChainFoundError.
func (e DisallowedPrefixError) Error() string {
return fmt.Sprintf("Cannot create %s because it has the prefix %s",
e.name, e.prefix)
}
// FileTooBigError indicates that the user tried to write a file that
// would be bigger than KBFS's supported size.
type FileTooBigError struct {
p path
size int64
maxAllowedBytes uint64
}
// Error implements the error interface for FileTooBigError.
func (e FileTooBigError) Error() string {
return fmt.Sprintf("File %s would have increased to %d bytes, which is "+
"over the supported limit of %d bytes", e.p, e.size, e.maxAllowedBytes)
}
// NameTooLongError indicates that the user tried to write a directory
// entry name that would be bigger than KBFS's supported size.
type NameTooLongError struct {
name string
maxAllowedBytes uint32
}
// Error implements the error interface for NameTooLongError.
func (e NameTooLongError) Error() string {
return fmt.Sprintf("New directory entry name %s has more than the maximum "+
"allowed number of bytes (%d)", e.name, e.maxAllowedBytes)
}
// DirTooBigError indicates that the user tried to write a directory
// that would be bigger than KBFS's supported size.
type DirTooBigError struct {
p path
size uint64
maxAllowedBytes uint64
}
// Error implements the error interface for DirTooBigError.
func (e DirTooBigError) Error() string {
return fmt.Sprintf("Directory %s would have increased to at least %d "+
"bytes, which is over the supported limit of %d bytes", e.p,
e.size, e.maxAllowedBytes)
}
// TlfNameNotCanonical indicates that a name isn't a canonical, and
// that another (not necessarily canonical) name should be tried.
type TlfNameNotCanonical struct {
Name, NameToTry string
}
func (e TlfNameNotCanonical) Error() string {
return fmt.Sprintf("TLF name %s isn't canonical: try %s instead",
e.Name, e.NameToTry)
}
// NoCurrentSessionError indicates that the daemon has no current
// session. This is basically a wrapper for session.ErrNoSession,
// needed to give the correct return error code to the OS.
type NoCurrentSessionError struct {
}
// Error implements the error interface for NoCurrentSessionError.
func (e NoCurrentSessionError) Error() string {
return "You are not logged into Keybase. Try `keybase login`."
}
// NoCurrentSessionExpectedError is the error text that will get
// converted into a NoCurrentSessionError.
var NoCurrentSessionExpectedError = "no current session"
// RekeyPermissionError indicates that the user tried to rekey a
// top-level folder in a manner inconsistent with their permissions.
type RekeyPermissionError struct {
User libkb.NormalizedUsername
Dir string
}
// Error implements the error interface for RekeyPermissionError
func (e RekeyPermissionError) Error() string {
return fmt.Sprintf("%s is trying to rekey directory %s in a manner "+
"inconsistent with their role", e.User, e.Dir)
}
// NewRekeyPermissionError constructs a RekeyPermissionError for the given
// directory and user.
func NewRekeyPermissionError(
dir *TlfHandle, username libkb.NormalizedUsername) error {
dirname := dir.GetCanonicalPath()
return RekeyPermissionError{username, dirname}
}
// RekeyIncompleteError is returned when a rekey is partially done but
// needs a writer to finish it.
type RekeyIncompleteError struct{}
func (e RekeyIncompleteError) Error() string {
return fmt.Sprintf("Rekey did not complete due to insufficient user permissions")
}
// InvalidKIDError is returned whenever an invalid KID is detected.
type InvalidKIDError struct {
kid keybase1.KID
}
func (e InvalidKIDError) Error() string {
return fmt.Sprintf("Invalid KID %s", e.kid)
}
// InvalidByte32DataError is returned whenever invalid data for a
// 32-byte type is detected.
type InvalidByte32DataError struct {
data []byte
}
func (e InvalidByte32DataError) Error() string {
return fmt.Sprintf("Invalid byte32 data %v", e.data)
}
// TimeoutError is just a replacement for context.DeadlineExceeded
// with a more friendly error string.
type TimeoutError struct {
}
func (e TimeoutError) Error() string {
return "Operation timed out"
}
// InvalidOpError is returned when an operation is called that isn't supported
// by the current implementation.
type InvalidOpError struct {
op string
}
func (e InvalidOpError) Error() string {
return fmt.Sprintf("Invalid operation: %s", e.op)
}
// CRAbandonStagedBranchError indicates that conflict resolution had to
// abandon a staged branch due to an unresolvable error.
type CRAbandonStagedBranchError struct {
Err error
Bid BranchID
}
func (e CRAbandonStagedBranchError) Error() string {
return fmt.Sprintf("Abandoning staged branch %s due to an error: %v",
e.Bid, e.Err)
}
// NoSuchFolderListError indicates that the user tried to access a
// subdirectory of /keybase that doesn't exist.
type NoSuchFolderListError struct {
Name string
PrivName string
PubName string
}
// Error implements the error interface for NoSuchFolderListError
func (e NoSuchFolderListError) Error() string {
return fmt.Sprintf("/keybase/%s is not a Keybase folder. "+
"All folders begin with /keybase/%s or /keybase/%s.",
e.Name, e.PrivName, e.PubName)
}
// UnexpectedUnmergedPutError indicates that we tried to do an
// unmerged put when that was disallowed.
type UnexpectedUnmergedPutError struct {
}
// Error implements the error interface for UnexpectedUnmergedPutError
func (e UnexpectedUnmergedPutError) Error() string {
return "Unmerged puts are not allowed"
}
// NoSuchTlfHandleError indicates we were unable to resolve a folder
// ID to a folder handle.
type NoSuchTlfHandleError struct {
ID TlfID
}
// Error implements the error interface for NoSuchTlfHandleError
func (e NoSuchTlfHandleError) Error() string {
return fmt.Sprintf("Folder handle for %s not found", e.ID)
}
// TlfHandleExtensionMismatchError indicates the expected extension
// doesn't match the server's extension for the given handle.
type TlfHandleExtensionMismatchError struct {
Expected TlfHandleExtension
// Actual may be nil.
Actual *TlfHandleExtension
}
// Error implements the error interface for TlfHandleExtensionMismatchError
func (e TlfHandleExtensionMismatchError) Error() string {
return fmt.Sprintf("Folder handle extension mismatch, "+
"expected: %s, actual: %s", e.Expected, e.Actual)
}
// MetadataIsFinalError indicates that we tried to make or set a
// successor to a finalized folder.
type MetadataIsFinalError struct {
}
// Error implements the error interface for MetadataIsFinalError.
func (e MetadataIsFinalError) Error() string {
return "Metadata is final"
}
// IncompatibleHandleError indicates that somethine tried to update
// the head of a TLF with a RootMetadata with an incompatible handle.
type IncompatibleHandleError struct {
oldName CanonicalTlfName
partiallyResolvedOldName CanonicalTlfName
newName CanonicalTlfName
}
func (e IncompatibleHandleError) Error() string {
return fmt.Sprintf(
"old head %q resolves to %q instead of new head %q",
e.oldName, e.partiallyResolvedOldName, e.newName)
}
// ShutdownHappenedError indicates that shutdown has happened.
type ShutdownHappenedError struct {
}
// Error implements the error interface for ShutdownHappenedError.
func (e ShutdownHappenedError) Error() string {
return "Shutdown happened"
}
// UnmergedError indicates that fbo is on an unmerged local revision
type UnmergedError struct {
}
// Error implements the error interface for UnmergedError.
func (e UnmergedError) Error() string {
return "fbo is on an unmerged local revision"
}
// ExclOnUnmergedError happens when an operation with O_EXCL set when fbo is on
// an unmerged local revision
type ExclOnUnmergedError struct {
}
// Error implements the error interface for ExclOnUnmergedError.
func (e ExclOnUnmergedError) Error() string {
return "an operation with O_EXCL set is called but fbo is on an unmerged local version"
}
// OverQuotaWarning indicates that the user is over their quota, and
// is being slowed down by the server.
type OverQuotaWarning struct {
UsageBytes int64
LimitBytes int64
}
// Error implements the error interface for OverQuotaWarning.
func (w OverQuotaWarning) Error() string {
return fmt.Sprintf("You are using %d bytes, and your plan limits you "+
"to %d bytes. Please delete some data.", w.UsageBytes, w.LimitBytes)
}
// OpsCantHandleFavorite means that folderBranchOps wasn't able to
// deal with a favorites request.
type OpsCantHandleFavorite struct {
Msg string
}
// Error implements the error interface for OpsCantHandleFavorite.
func (e OpsCantHandleFavorite) Error() string {
return fmt.Sprintf("Couldn't handle the favorite operation: %s", e.Msg)
}
// TlfHandleFinalizedError is returned when something attempts to modify
// a finalized TLF handle.
type TlfHandleFinalizedError struct {
}
// Error implements the error interface for TlfHandleFinalizedError.
func (e TlfHandleFinalizedError) Error() string {
return "Attempt to modify finalized TLF handle"
}
// NoSigChainError means that a user we were trying to identify does
// not have a sigchain.
type NoSigChainError struct {
User libkb.NormalizedUsername
}
// Error implements the error interface for NoSigChainError.
func (e NoSigChainError) Error() string {
return fmt.Sprintf("%s has not yet installed Keybase and set up the "+
"Keybase filesystem. Please ask them to.", e.User)
}
// RekeyConflictError indicates a conflict happened while trying to rekey.
type RekeyConflictError struct {
Err error
}
// Error implements the error interface for RekeyConflictError.
func (e RekeyConflictError) Error() string {
return fmt.Sprintf("Conflict during a rekey, not retrying: %v", e.Err)
}
// UnmergedSelfConflictError indicates that we hit a conflict on the
// unmerged branch, so a previous MD PutUnmerged we thought had
// failed, had actually succeeded.
type UnmergedSelfConflictError struct {
Err error
}
// Error implements the error interface for UnmergedSelfConflictError.
func (e UnmergedSelfConflictError) Error() string {
return fmt.Sprintf("Unmerged self conflict: %v", e.Err)
}
// MutableBareRootMetadataNoImplError is returned when an interface expected
// to implement MutableBareRootMetadata does not do so.
type MutableBareRootMetadataNoImplError struct {
}
// Error implements the error interface for MutableBareRootMetadataNoImplError
func (e MutableBareRootMetadataNoImplError) Error() string {
return "Does not implement MutableBareRootMetadata"
}
| 1 | 13,008 | Explain why we don't reuse the corresponding server error locally? | keybase-kbfs | go |
@@ -417,7 +417,7 @@ public class SolrSearchResult {
.add("is_unpublished_state", this.isUnpublishedState())
.add("is_published", this.isPublishedState())
.add("is_deaccesioned", this.isDeaccessionedState())
- .add("date_to_display_on_card", this.dateToDisplayOnCard);
+ .add("date_to_display_on_card", DateUtil.formatDate(this.releaseOrCreateDate));
// Add is_deaccessioned attribute, even though MyData currently screens any deaccessioned info out
// | 1 | package edu.harvard.iq.dataverse.search;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DvObject;
import edu.harvard.iq.dataverse.api.Util;
import edu.harvard.iq.dataverse.dataset.DatasetThumbnail;
import edu.harvard.iq.dataverse.util.DateUtil;
import edu.harvard.iq.dataverse.util.json.JsonPrinter;
import edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.logging.Logger;
import javax.json.Json;
import javax.json.JsonArrayBuilder;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import static edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder.jsonObjectBuilder;
public class SolrSearchResult {
private static final Logger logger = Logger.getLogger(SolrSearchResult.class.getCanonicalName());
private String id;
private Long entityId;
private DvObject entity;
private String identifier;
private String type;
private String htmlUrl;
private String persistentUrl;
private String downloadUrl;
private String apiUrl;
/**
* This is called "imageUrl" because it used to really be a URL. While
* performance improvements were being made in the 4.2 timeframe, we started
* putting base64 representations of images in this String instead, which
* broke the Search API and probably things built on top of it such as
* MyData. See "`image_url` from Search API results no longer yields a
* downloadable image" at https://github.com/IQSS/dataverse/issues/3616
*/
private String imageUrl;
private DatasetThumbnail datasetThumbnail;
private String query;
private String name;
private String nameSort;
private String status;
private Date releaseOrCreateDate;
private String dateToDisplayOnCard;
private List<String> publicationStatuses = new ArrayList<>();
/**
* @todo: how important is it to differentiate between name and title?
*/
private String title;
private String descriptionNoSnippet;
private List<String> datasetAuthors = new ArrayList<>();
private String deaccessionReason;
private List<Highlight> highlightsAsList = new ArrayList<>();
private Map<SolrField, Highlight> highlightsMap;
private Map<String, Highlight> highlightsAsMap;
// parent can be dataverse or dataset, store the name and id
/**
* The "identifier" of a file's parent (a dataset) is a globalId (often a
* doi).
*/
public static String PARENT_IDENTIFIER = "identifier";
private Map<String, String> parent;
private String dataverseAffiliation;
private String citation;
private String citationHtml;
/**
* Files and datasets might have a UNF. Dataverses don't.
*/
private String unf;
private String filetype;
private String fileContentType;
private Long fileSizeInBytes;
/**
* fileMD5 is here for legacy and backward-compatibility reasons. It might be deprecated some day in favor of "fileChecksumType" and "fileChecksumValue"
*/
private String fileMd5;
private DataFile.ChecksumType fileChecksumType;
private String fileChecksumValue;
private String dataverseAlias;
private String dataverseParentAlias;
// private boolean statePublished;
/**
* @todo Investigate/remove this "unpublishedState" variable. For files that
* have been published along with a dataset it says "true", which makes no
* sense.
*/
private boolean publishedState = false;
private boolean unpublishedState = false;
private boolean draftState = false;
private boolean inReviewState = false;
private boolean deaccessionedState = false;
private long datasetVersionId;
private String versionNumberFriendly;
//Determine if the search result is owned by any of the dvs in the tree of the DV displayed
private boolean isInTree;
private float score;
private List<String> userRole;
private boolean harvested = false;
private String dvTree;
private String harvestingDescription = null;
private List<String> fileCategories = null;
private List<String> tabularDataTags = null;
private String identifierOfDataverse = null;
private String nameOfDataverse = null;
private String filePersistentId = null;
public String getDvTree() {
return dvTree;
}
public void setDvTree(String dvTree) {
this.dvTree = dvTree;
}
public boolean isIsInTree() {
return isInTree;
}
public void setIsInTree(boolean isInTree) {
this.isInTree = isInTree;
}
public boolean isHarvested() {
return harvested;
}
public void setHarvested(boolean harvested) {
this.harvested = harvested;
}
public String getHarvestingDescription() {
//if (this.isHarvested()) {
return harvestingDescription;
//}
//return null;
}
public void setHarvestingDescription(String harvestingDescription) {
this.harvestingDescription = harvestingDescription;
}
// public boolean isStatePublished() {
// return statePublished;
// }
// public void setStatePublished(boolean statePublished) {
// this.statePublished = statePublished;
// }
public boolean isPublishedState() {
return publishedState;
}
public void setPublishedState(boolean publishedState) {
this.publishedState = publishedState;
}
public boolean isUnpublishedState() {
return unpublishedState;
}
public void setUnpublishedState(boolean unpublishedState) {
this.unpublishedState = unpublishedState;
}
public void setPublicationStatuses(List<String> statuses) {
if (statuses == null) {
this.publicationStatuses = new ArrayList<>();
return;
}
this.publicationStatuses = statuses;
// set booleans for individual statuses
//
for (String status : this.publicationStatuses) {
if (status.equals(IndexServiceBean.getUNPUBLISHED_STRING())) {
this.setUnpublishedState(true);
} else if (status.equals(IndexServiceBean.getPUBLISHED_STRING())) {
this.setPublishedState(true);
} else if (status.equals(IndexServiceBean.getDRAFT_STRING())) {
this.setDraftState(true);
} else if (status.equals(IndexServiceBean.getIN_REVIEW_STRING())) {
this.setInReviewState(true);
} else if (status.equals(IndexServiceBean.getDEACCESSIONED_STRING())) {
this.setDeaccessionedState(true);
}
}
} // setPublicationStatuses
/**
* Never return null, return an empty list instead
*
* @return
*/
public List<String> getPublicationStatuses() {
if (this.publicationStatuses == null) {
this.publicationStatuses = new ArrayList<>();
}
return this.publicationStatuses;
}
public JsonArrayBuilder getPublicationStatusesAsJSON() {
JsonArrayBuilder statuses = Json.createArrayBuilder();
for (String status : this.getPublicationStatuses()) {
statuses.add(status);
}
return statuses;
}
public boolean isDraftState() {
return draftState;
}
public void setDraftState(boolean draftState) {
this.draftState = draftState;
}
public boolean isInReviewState() {
return inReviewState;
}
public void setInReviewState(boolean inReviewState) {
this.inReviewState = inReviewState;
}
public boolean isDeaccessionedState() {
return deaccessionedState;
}
public void setDeaccessionedState(boolean deaccessionedState) {
this.deaccessionedState = deaccessionedState;
}
/**
* @todo: used? remove
*/
private List<String> matchedFields;
/**
* @todo: remove name?
*/
SolrSearchResult(String queryFromUser, String name) {
this.query = queryFromUser;
// this.name = name;
}
public Map<String, Highlight> getHighlightsAsMap() {
return highlightsAsMap;
}
public void setHighlightsAsMap(Map<String, Highlight> highlightsAsMap) {
this.highlightsAsMap = highlightsAsMap;
}
public String getNameHighlightSnippet() {
Highlight highlight = highlightsAsMap.get(SearchFields.NAME);
if (highlight != null) {
String firstSnippet = highlight.getSnippets().get(0);
if (firstSnippet != null) {
return firstSnippet;
}
}
return null;
}
public String getDataverseAffiliationHighlightSnippet() {
Highlight highlight = highlightsAsMap.get(SearchFields.AFFILIATION);
if (highlight != null) {
String firstSnippet = highlight.getSnippets().get(0);
if (firstSnippet != null) {
return firstSnippet;
}
}
return null;
}
public String getFileTypeHighlightSnippet() {
Highlight highlight = highlightsAsMap.get(SearchFields.FILE_TYPE_FRIENDLY);
if (highlight != null) {
String firstSnippet = highlight.getSnippets().get(0);
if (firstSnippet != null) {
return firstSnippet;
}
}
return null;
}
public String getTitleHighlightSnippet() {
/**
* @todo: don't hard-code title, look it up properly... or start
* indexing titles as names:
* https://redmine.hmdc.harvard.edu/issues/3798#note-2
*/
Highlight highlight = highlightsAsMap.get("title");
if (highlight != null) {
String firstSnippet = highlight.getSnippets().get(0);
if (firstSnippet != null) {
return firstSnippet;
}
}
return null;
}
public List<String> getDescriptionSnippets() {
for (Map.Entry<SolrField, Highlight> entry : highlightsMap.entrySet()) {
SolrField solrField = entry.getKey();
Highlight highlight = entry.getValue();
logger.fine("SolrSearchResult class: " + solrField.getNameSearchable() + ":" + highlight.getSnippets());
}
Highlight highlight = highlightsAsMap.get(SearchFields.DESCRIPTION);
if (type.equals("datasets")) {
highlight = highlightsAsMap.get(SearchFields.DATASET_DESCRIPTION);
}
if (highlight != null) {
return highlight.getSnippets();
} else {
return new ArrayList<>();
}
}
public Map<SolrField, Highlight> getHighlightsMap() {
return highlightsMap;
}
public void setHighlightsMap(Map<SolrField, Highlight> highlightsMap) {
this.highlightsMap = highlightsMap;
}
public List<String> getMatchedFields() {
return matchedFields;
}
public void setMatchedFields(List<String> matchedFields) {
this.matchedFields = matchedFields;
}
@Override
public String toString() {
if (this.name != null) {
return this.id + ":" + this.name + ":" + this.entityId;
} else {
return this.id + ":" + this.title + ":" + this.entityId;
}
}
public JsonArrayBuilder getRelevance() {
JsonArrayBuilder matchedFieldsArray = Json.createArrayBuilder();
JsonObjectBuilder matchedFieldObject = Json.createObjectBuilder();
for (Map.Entry<SolrField, Highlight> entry : highlightsMap.entrySet()) {
SolrField solrField = entry.getKey();
Highlight snippets = entry.getValue();
JsonArrayBuilder snippetArrayBuilder = Json.createArrayBuilder();
JsonObjectBuilder matchedFieldDetails = Json.createObjectBuilder();
for (String highlight : snippets.getSnippets()) {
snippetArrayBuilder.add(highlight);
}
/**
* @todo for the Search API, it might be nice to return offset
* numbers rather than html snippets surrounded by span tags or
* whatever.
*
* That's what the GitHub Search API does: "Requests can opt to
* receive those text fragments in the response, and every fragment
* is accompanied by numeric offsets identifying the exact location
* of each matching search term."
* https://developer.github.com/v3/search/#text-match-metadata
*
* It's not clear if getting the offset values is possible with
* Solr, however:
* stackoverflow.com/questions/13863118/can-solr-highlighting-also-indicate-the-position-or-offset-of-the-returned-fragments-within-the-original-field
*/
matchedFieldDetails.add("snippets", snippetArrayBuilder);
/**
* @todo In addition to the name of the field used by Solr , it
* would be nice to show the "friendly" name of the field we show in
* the GUI.
*/
// matchedFieldDetails.add("friendly", "FIXME");
matchedFieldObject.add(solrField.getNameSearchable(), matchedFieldDetails);
matchedFieldsArray.add(matchedFieldObject);
}
return matchedFieldsArray;
}
public JsonObject toJsonObject(boolean showRelevance, boolean showEntityIds, boolean showApiUrls) {
return json(showRelevance, showEntityIds, showApiUrls).build();
}
/**
* Add additional fields for the MyData page
*
* @return
*/
public JsonObjectBuilder getJsonForMyData() {
JsonObjectBuilder myDataJson = json(true, true, true);//boolean showRelevance, boolean showEntityIds, boolean showApiUrls)
myDataJson.add("publication_statuses", this.getPublicationStatusesAsJSON())
.add("is_draft_state", this.isDraftState())
.add("is_in_review_state", this.isInReviewState())
.add("is_unpublished_state", this.isUnpublishedState())
.add("is_published", this.isPublishedState())
.add("is_deaccesioned", this.isDeaccessionedState())
.add("date_to_display_on_card", this.dateToDisplayOnCard);
// Add is_deaccessioned attribute, even though MyData currently screens any deaccessioned info out
//
if ((this.isDeaccessionedState()) && (this.getPublicationStatuses().size() == 1)) {
myDataJson.add("deaccesioned_is_only_pubstatus", true);
}
if ((this.getParent() != null) && (!this.getParent().isEmpty())) {
//System.out.println("keys:" + parent.keySet().toString());
if (this.entity.isInstanceofDataFile()) {
myDataJson.add("parentIdentifier", this.getParent().get(SolrSearchResult.PARENT_IDENTIFIER))
.add("parentName", this.getParent().get("name"));
} else {
// for Dataverse and Dataset, get parent which is a Dataverse
myDataJson.add("parentId", this.getParent().get("id"))
.add("parentName", this.getParent().get("name"));
}
}
return myDataJson;
} //getJsonForMydata
public JsonObjectBuilder json(boolean showRelevance, boolean showEntityIds, boolean showApiUrls) {
if (this.type == null) {
return jsonObjectBuilder();
}
String displayName = null;
String identifierLabel = null;
String datasetCitation = null;
String datasetName = null;
String datasetId = null;
String datasetPersistentId = null;
String filePersistentId = null;
String preferredUrl = null;
String apiUrl = null;
if (this.type.equals(SearchConstants.DATAVERSES)) {
displayName = this.name;
identifierLabel = "identifier";
preferredUrl = getHtmlUrl();
} else if (this.type.equals(SearchConstants.DATASETS)) {
displayName = this.title;
identifierLabel = "global_id";
preferredUrl = getPersistentUrl();
/**
* @todo Should we show the name of the parent dataverse?
*/
} else if (this.type.equals(SearchConstants.FILES)) {
displayName = this.name;
identifierLabel = "file_id";
preferredUrl = getDownloadUrl();
/**
* @todo show more information for a file's parent, such as the
* title of the dataset it belongs to.
*/
datasetCitation = parent.get("citation");
datasetName = parent.get("name");
datasetId = parent.get("id");
datasetPersistentId = parent.get(SolrSearchResult.PARENT_IDENTIFIER);
}
//displayName = null; // testing NullSafeJsonBuilder
// because we are using NullSafeJsonBuilder key/value pairs will be dropped if the value is null
NullSafeJsonBuilder nullSafeJsonBuilder = jsonObjectBuilder()
.add("name", displayName)
.add("type", getDisplayType(getType()))
.add("url", preferredUrl)
.add("image_url", getImageUrl())
// .add("persistent_url", this.persistentUrl)
// .add("download_url", this.downloadUrl)
/**
* @todo How much value is there in exposing the identifier for
* dataverses? For
*/
.add(identifierLabel, this.identifier)
/**
* @todo Get dataset description from dsDescriptionValue. Also,
* is descriptionNoSnippet the right field to use generally?
*
* @todo What about the fact that datasets can now have multiple
* descriptions? Should we create an array called
* "additional_descriptions" that gets populated if there is
* more than one dataset description?
*
* @todo Why aren't file descriptions ever null? They always
* have an empty string at least.
*/
.add("description", this.descriptionNoSnippet)
/**
* @todo In the future we'd like to support non-public datasets
* per https://github.com/IQSS/dataverse/issues/1299 but for now
* we are only supporting non-public searches.
*/
.add("published_at", getDateTimePublished())
/**
* @todo Expose MIME Type:
* https://github.com/IQSS/dataverse/issues/1595
*/
.add("file_type", this.filetype)
.add("file_content_type", this.fileContentType)
.add("size_in_bytes", getFileSizeInBytes())
/**
* "md5" was the only possible value so it's hard-coded here but
* we might want to deprecate it someday since we now put the
* MD5 or SHA-1 in "checksum".
*/
.add("md5", getFileMd5())
.add("checksum", JsonPrinter.getChecksumTypeAndValue(getFileChecksumType(), getFileChecksumValue()))
.add("unf", getUnf())
.add("file_persistent_id", this.filePersistentId)
.add("dataset_name", datasetName)
.add("dataset_id", datasetId)
.add("dataset_persistent_id", datasetPersistentId)
.add("dataset_citation", datasetCitation)
.add("deaccession_reason", this.deaccessionReason)
.add("citationHtml", this.citationHtml)
.add("identifier_of_dataverse", this.identifierOfDataverse)
.add("name_of_dataverse", this.nameOfDataverse)
.add("citation", this.citation);
// Now that nullSafeJsonBuilder has been instatiated, check for null before adding to it!
if (showRelevance) {
nullSafeJsonBuilder.add("matches", getRelevance());
nullSafeJsonBuilder.add("score", getScore());
}
if (showEntityIds) {
if (this.entityId != null) {
nullSafeJsonBuilder.add("entity_id", this.entityId);
}
}
if (showApiUrls) {
/**
* @todo We should probably have a metadata_url or api_url concept
* enabled by default, not hidden behind an undocumented boolean.
* For datasets, this would be http://example.com/api/datasets/10 or
* whatever (to get more detailed JSON), but right now this requires
* an API token. Discuss at
* https://docs.google.com/document/d/1d8sT2GLSavgiAuMTVX8KzTCX0lROEET1edhvHHRDZOs/edit?usp=sharing";
*/
if (getApiUrl() != null) {
nullSafeJsonBuilder.add("api_url", getApiUrl());
}
}
// NullSafeJsonBuilder is awesome but can't build null safe arrays. :(
if (!datasetAuthors.isEmpty()) {
JsonArrayBuilder authors = Json.createArrayBuilder();
for (String datasetAuthor : datasetAuthors) {
authors.add(datasetAuthor);
}
nullSafeJsonBuilder.add("authors", authors);
}
return nullSafeJsonBuilder;
}
private String getDateTimePublished() {
String datePublished = null;
if (draftState == false) {
datePublished = releaseOrCreateDate == null ? null : Util.getDateTimeFormat().format(releaseOrCreateDate);
}
return datePublished;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Long getEntityId() {
return entityId;
}
public void setEntityId(Long entityId) {
this.entityId = entityId;
}
public DvObject getEntity() {
return entity;
}
public void setEntity(DvObject entity) {
this.entity = entity;
}
public String getIdentifier() {
return identifier;
}
public void setIdentifier(String identifier) {
this.identifier = identifier;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getHtmlUrl() {
return htmlUrl;
}
public void setHtmlUrl(String htmlUrl) {
this.htmlUrl = htmlUrl;
}
public String getPersistentUrl() {
return persistentUrl;
}
public void setPersistentUrl(String persistentUrl) {
this.persistentUrl = persistentUrl;
}
public String getDownloadUrl() {
return downloadUrl;
}
public void setDownloadUrl(String downloadUrl) {
this.downloadUrl = downloadUrl;
}
public String getApiUrl() {
return apiUrl;
}
public void setApiUrl(String apiUrl) {
this.apiUrl = apiUrl;
}
public String getImageUrl() {
return imageUrl;
}
public void setImageUrl(String imageUrl) {
this.imageUrl = imageUrl;
}
public DatasetThumbnail getDatasetThumbnail() {
return datasetThumbnail;
}
public void setDatasetThumbnail(DatasetThumbnail datasetThumbnail) {
this.datasetThumbnail = datasetThumbnail;
}
public String getQuery() {
return query;
}
public void setQuery(String query) {
this.query = query;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getDescriptionNoSnippet() {
return descriptionNoSnippet;
}
public void setDescriptionNoSnippet(String descriptionNoSnippet) {
this.descriptionNoSnippet = descriptionNoSnippet;
}
public List<String> getDatasetAuthors() {
return datasetAuthors;
}
public void setDatasetAuthors(List<String> datasetAuthors) {
this.datasetAuthors = datasetAuthors;
}
public String getDeaccessionReason() {
return deaccessionReason;
}
public void setDeaccessionReason(String deaccessionReason) {
this.deaccessionReason = deaccessionReason;
}
public List<Highlight> getHighlightsAsListOrig() {
return highlightsAsList;
}
public List<Highlight> getHighlightsAsList() {
List<Highlight> filtered = new ArrayList<>();
for (Highlight highlight : highlightsAsList) {
String field = highlight.getSolrField().getNameSearchable();
/**
* @todo don't hard code "title" here. And should we collapse name
* and title together anyway?
*/
if (!field.equals(SearchFields.NAME)
&& !field.equals(SearchFields.DESCRIPTION)
&& !field.equals(SearchFields.DATASET_DESCRIPTION)
&& !field.equals(SearchFields.AFFILIATION)
&& !field.equals("title")) {
filtered.add(highlight);
}
}
return filtered;
}
public void setHighlightsAsList(List<Highlight> highlightsAsList) {
this.highlightsAsList = highlightsAsList;
}
public List<String> getFileCategories() {
return fileCategories;
}
public void setFileCategories(List<String> fileCategories) {
this.fileCategories = fileCategories;
}
public List<String> getTabularDataTags() {
return tabularDataTags;
}
public void setTabularDataTags(List<String> tabularDataTags) {
this.tabularDataTags = tabularDataTags;
}
public Map<String, String> getParent() {
return parent;
}
public Long getParentIdAsLong() {
if (this.getParent() == null) {
return null;
}
if (!this.getParent().containsKey("id")) {
return null;
}
String parentIdString = getParent().get("id");
if (parentIdString == null) {
return null;
}
try {
return Long.parseLong(parentIdString);
} catch (NumberFormatException ex) {
return null;
}
}
public void setParent(Map<String, String> parent) {
this.parent = parent;
}
public String getDataverseAffiliation() {
return dataverseAffiliation;
}
public void setDataverseAffiliation(String dataverseAffiliation) {
this.dataverseAffiliation = dataverseAffiliation;
}
public String getCitation() {
return citation;
}
public void setCitation(String citation) {
this.citation = citation;
}
public String getCitationHtml() {
return citationHtml;
}
public void setCitationHtml(String citationHtml) {
this.citationHtml = citationHtml;
}
public String getFiletype() {
return filetype;
}
public void setFiletype(String filetype) {
this.filetype = filetype;
}
public String getFileContentType() {
return fileContentType;
}
public void setFileContentType(String fileContentType) {
this.fileContentType = fileContentType;
}
public String getUnf() {
return unf;
}
public void setUnf(String unf) {
this.unf = unf;
}
public Long getFileSizeInBytes() {
return fileSizeInBytes;
}
public void setFileSizeInBytes(Long fileSizeInBytes) {
this.fileSizeInBytes = fileSizeInBytes;
}
public String getFileMd5() {
if (DataFile.ChecksumType.MD5.equals(getFileChecksumType())) {
return fileMd5;
} else {
return null;
}
}
public void setFileMd5(String fileMd5) {
this.fileMd5 = fileMd5;
}
public DataFile.ChecksumType getFileChecksumType() {
return fileChecksumType;
}
public void setFileChecksumType(DataFile.ChecksumType fileChecksumType) {
this.fileChecksumType = fileChecksumType;
}
public String getFileChecksumValue() {
return fileChecksumValue;
}
public void setFileChecksumValue(String fileChecksumValue) {
this.fileChecksumValue = fileChecksumValue;
}
public String getNameSort() {
return nameSort;
}
public void setNameSort(String nameSort) {
this.nameSort = nameSort;
}
public String getStatus() {
return status;
}
void setStatus(String status) {
this.status = status;
}
public Date getReleaseOrCreateDate() {
return releaseOrCreateDate;
}
public void setReleaseOrCreateDate(Date releaseOrCreateDate) {
this.releaseOrCreateDate = releaseOrCreateDate;
}
public String getDateToDisplayOnCard() {
return DateUtil.formatDate(dateToDisplayOnCard,"MMM dd, yyyy");
}
public void setDateToDisplayOnCard(String dateToDisplayOnCard) {
this.dateToDisplayOnCard = dateToDisplayOnCard;
}
public long getDatasetVersionId() {
return datasetVersionId;
}
public void setDatasetVersionId(long datasetVersionId) {
this.datasetVersionId = datasetVersionId;
}
public String getVersionNumberFriendly() {
return versionNumberFriendly;
}
public void setVersionNumberFriendly(String versionNumberFriendly) {
this.versionNumberFriendly = versionNumberFriendly;
}
public String getDatasetUrl() {
String failSafeUrl = "/dataset.xhtml?id=" + entityId + "&versionId=" + datasetVersionId;
if (identifier != null) {
/**
* Unfortunately, colons in the globalId (doi:10...) are converted
* to %3A (doi%3A10...). To prevent this we switched many JSF tags
* to a plain "a" tag with an href as suggested at
* http://stackoverflow.com/questions/24733959/houtputlink-value-escaped
*/
String badString = "null";
if (!identifier.contains(badString)) {
if (entity != null && entity instanceof Dataset) {
if (this.isHarvested() && ((Dataset)entity).getHarvestedFrom() != null) {
String remoteArchiveUrl = ((Dataset) entity).getRemoteArchiveURL();
if (remoteArchiveUrl != null) {
return remoteArchiveUrl;
}
return null;
}
}
if (isDraftState()) {
return "/dataset.xhtml?persistentId=" + identifier + "&version=DRAFT";
}
return "/dataset.xhtml?persistentId=" + identifier;
} else {
logger.info("Dataset identifier/globalId contains \"" + badString + "\" perhaps due to https://github.com/IQSS/dataverse/issues/1147 . Fix data in database and reindex. Returning failsafe URL: " + failSafeUrl);
return failSafeUrl;
}
} else {
logger.info("Dataset identifier/globalId was null. Returning failsafe URL: " + failSafeUrl);
return failSafeUrl;
}
}
public String getFileParentIdentifier() {
if (entity == null) {
return null;
}
if (entity instanceof DataFile) {
return parent.get(PARENT_IDENTIFIER); // Dataset globalID
}
return null;
//if (entity)
}
public String getFilePersistentId() {
return filePersistentId;
}
public void setFilePersistentId(String pid) {
filePersistentId = pid;
}
public String getFileUrl() {
// Nothing special needs to be done for harvested file URLs:
// simply directing these to the local dataset.xhtml for this dataset
// will take care of it - because DatasetPage will issue a redirect
// to the remote archive URL.
// This is true AS OF 4.2.4, FEB. 2016! - We'll probably want to make
// .getRemoteArchiveURL() methods, both in DataFile and Dataset objects,
// work again at some point in the future.
/*
if (entity != null && entity instanceof DataFile && this.isHarvested()) {
String remoteArchiveUrl = ((DataFile) entity).getRemoteArchiveURL();
if (remoteArchiveUrl != null) {
return remoteArchiveUrl;
}
return null;
}*/
if (entity.getIdentifier() != null){
return "/file.xhtml?persistentId=" + entity.getGlobalIdString();
}
return "/file.xhtml?fileId=" + entity.getId() + "&datasetVersionId=" + datasetVersionId;
/*
if (parentDatasetGlobalId != null) {
return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId;
} else {
return "/dataset.xhtml?id=" + parent.get(SearchFields.ID) + "&versionId=" + datasetVersionId;
}*/
}
public String getFileDatasetUrl() {
// See the comment in the getFileUrl() method above. -- L.A. 4.2.4
/*
if (entity != null && entity instanceof DataFile && this.isHarvested()) {
String remoteArchiveUrl = ((DataFile) entity).getRemoteArchiveURL();
if (remoteArchiveUrl != null) {
return remoteArchiveUrl;
}
return null;
}*/
String parentDatasetGlobalId = parent.get(PARENT_IDENTIFIER);
if (parentDatasetGlobalId != null) {
return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId;
} else {
return "/dataset.xhtml?id=" + parent.get(SearchFields.ID) + "&versionId=" + datasetVersionId;
}
}
/**
* @return the dataverseAlias
*/
public String getDataverseAlias() {
return dataverseAlias;
}
/**
* @param dataverseAlias the dataverseAlias to set
*/
public void setDataverseAlias(String dataverseAlias) {
this.dataverseAlias = dataverseAlias;
}
/**
* @return the dataverseParentAlias
*/
public String getDataverseParentAlias() {
return dataverseParentAlias;
}
/**
* @param dataverseParentAlias the dataverseParentAlias to set
*/
public void setDataverseParentAlias(String dataverseParentAlias) {
this.dataverseParentAlias = dataverseParentAlias;
}
public float getScore() {
return score;
}
public void setScore(float score) {
this.score = score;
}
private String getDisplayType(String type) {
if (type.equals(SearchConstants.DATAVERSES)) {
return SearchConstants.DATAVERSE;
} else if (type.equals(SearchConstants.DATASETS)) {
return SearchConstants.DATASET;
} else if (type.equals(SearchConstants.FILES)) {
return SearchConstants.FILE;
} else {
return null;
}
}
/*
public JsonArrayBuilder getUserRolesAsJson() {
JsonArrayBuilder jsonRoleStrings = Json.createArrayBuilder();
for (String role : this.getUserRole()) {
jsonRoleStrings.add(role);
}
return jsonRoleStrings;
}*/
public List<String> getUserRole() {
return userRole;
}
public void setUserRole(List<String> userRole) {
this.userRole = userRole;
}
public String getIdentifierOfDataverse() {
return identifierOfDataverse;
}
public void setIdentifierOfDataverse(String id) {
this.identifierOfDataverse = id;
}
public String getNameOfDataverse() {
return nameOfDataverse;
}
public void setNameOfDataverse(String id) {
this.nameOfDataverse = id;
}
}
| 1 | 39,514 | @JayanthyChengan This change is still missing (and should be last one!): change this line to call getDateToDisplayOnCard(), that way the logic is in only one place (that method). | IQSS-dataverse | java |
@@ -20,8 +20,6 @@ var (
ErrNoConnection = errors.New("no connection exists")
// ErrAlreadyExists error indicates that aciton applieto to manager expects no active connection (i.e. connect)
ErrAlreadyExists = errors.New("connection already exists")
- // ErrConnectionCancelled indicates that connection in progress was cancelled by request of api user
- ErrConnectionCancelled = errors.New("connection was cancelled")
// ErrOpenvpnProcessDied indicates that Connect method didn't reach "Connected" phase due to openvpn error
ErrOpenvpnProcessDied = errors.New("openvpn process died")
) | 1 | package connection
import (
"errors"
log "github.com/cihub/seelog"
"github.com/mysterium/node/communication"
"github.com/mysterium/node/identity"
"github.com/mysterium/node/openvpn"
"github.com/mysterium/node/openvpn/middlewares/client/bytescount"
"github.com/mysterium/node/server"
"github.com/mysterium/node/service_discovery/dto"
"github.com/mysterium/node/session"
"github.com/mysterium/node/utils"
)
const managerLogPrefix = "[connection-manager] "
var (
// ErrNoConnection error indicates that action applied to manager expects active connection (i.e. disconnect)
ErrNoConnection = errors.New("no connection exists")
// ErrAlreadyExists error indicates that aciton applieto to manager expects no active connection (i.e. connect)
ErrAlreadyExists = errors.New("connection already exists")
// ErrConnectionCancelled indicates that connection in progress was cancelled by request of api user
ErrConnectionCancelled = errors.New("connection was cancelled")
// ErrOpenvpnProcessDied indicates that Connect method didn't reach "Connected" phase due to openvpn error
ErrOpenvpnProcessDied = errors.New("openvpn process died")
)
type connectionManager struct {
//these are passed on creation
mysteriumClient server.Client
newDialog DialogCreator
newVpnClient VpnClientCreator
statsKeeper bytescount.SessionStatsKeeper
//these are populated by Connect at runtime
status ConnectionStatus
cleanConnection func()
}
// NewManager creates connection manager with given dependencies
func NewManager(mysteriumClient server.Client, dialogCreator DialogCreator,
vpnClientCreator VpnClientCreator, statsKeeper bytescount.SessionStatsKeeper) *connectionManager {
return &connectionManager{
mysteriumClient: mysteriumClient,
newDialog: dialogCreator,
newVpnClient: vpnClientCreator,
statsKeeper: statsKeeper,
status: statusNotConnected(),
cleanConnection: warnOnClean,
}
}
func (manager *connectionManager) Connect(consumerID, providerID identity.Identity) (err error) {
if manager.status.State != NotConnected {
return ErrAlreadyExists
}
manager.status = statusConnecting()
defer func() {
if err != nil {
manager.status = statusNotConnected()
}
}()
cancelable := utils.NewCancelable()
manager.cleanConnection = utils.CallOnce(func() {
log.Info(managerLogPrefix, "Canceling connection initiation")
manager.status = statusDisconnecting()
cancelable.Cancel()
})
val, err := cancelable.
NewRequest(func() (interface{}, error) {
return manager.findProposalByProviderID(providerID)
}).
Call()
if err != nil {
return err
}
proposal := val.(*dto.ServiceProposal)
val, err = cancelable.
NewRequest(func() (interface{}, error) {
return manager.newDialog(consumerID, providerID, proposal.ProviderContacts[0])
}).
Cleanup(utils.InvokeOnSuccess(func(val interface{}) {
val.(communication.Dialog).Close()
})).
Call()
if err != nil {
return err
}
dialog := val.(communication.Dialog)
val, err = cancelable.
NewRequest(func() (interface{}, error) {
return session.RequestSessionCreate(dialog, proposal.ID)
}).
Call()
if err != nil {
dialog.Close()
return err
}
vpnSession := val.(*session.SessionDto)
stateChannel := make(chan openvpn.State, 10)
val, err = cancelable.
NewRequest(func() (interface{}, error) {
return manager.startOpenvpnClient(*vpnSession, consumerID, providerID, stateChannel)
}).
Cleanup(utils.InvokeOnSuccess(func(val interface{}) {
val.(openvpn.Client).Stop()
})).
Call()
if err != nil {
dialog.Close()
return err
}
openvpnClient := val.(openvpn.Client)
err = manager.waitForConnectedState(stateChannel, vpnSession.ID, cancelable.Cancelled)
if err != nil {
dialog.Close()
openvpnClient.Stop()
return err
}
manager.cleanConnection = func() {
log.Info(managerLogPrefix, "Closing active connection")
manager.status = statusDisconnecting()
if err := openvpnClient.Stop(); err != nil {
log.Warn(managerLogPrefix, "Openvpn client stopped with error: ", err)
} else {
log.Info(managerLogPrefix, "Openvpn client stopped")
}
}
go openvpnClientWaiter(openvpnClient, dialog)
go manager.consumeOpenvpnStates(stateChannel, vpnSession.ID)
return nil
}
func (manager *connectionManager) Status() ConnectionStatus {
return manager.status
}
func (manager *connectionManager) Disconnect() error {
if manager.status.State == NotConnected {
return ErrNoConnection
}
manager.cleanConnection()
return nil
}
func warnOnClean() {
log.Warn(managerLogPrefix, "Trying to close when there is nothing to close. Possible bug or race condition")
}
// TODO this can be extraced as depencency later when node selection criteria will be clear
func (manager *connectionManager) findProposalByProviderID(providerID identity.Identity) (*dto.ServiceProposal, error) {
proposals, err := manager.mysteriumClient.FindProposals(providerID.Address)
if err != nil {
return nil, err
}
if len(proposals) == 0 {
err = errors.New("provider has no service proposals")
return nil, err
}
return &proposals[0], nil
}
func openvpnClientWaiter(openvpnClient openvpn.Client, dialog communication.Dialog) {
err := openvpnClient.Wait()
if err != nil {
log.Warn(managerLogPrefix, "Openvpn client exited with error: ", err)
} else {
log.Info(managerLogPrefix, "Openvpn client exited")
}
dialog.Close()
}
func (manager *connectionManager) startOpenvpnClient(vpnSession session.SessionDto, consumerID, providerID identity.Identity, stateChannel chan openvpn.State) (openvpn.Client, error) {
openvpnClient, err := manager.newVpnClient(
vpnSession,
consumerID,
providerID,
channelToStateCallbackAdapter(stateChannel),
)
if err != nil {
return nil, err
}
if err = openvpnClient.Start(); err != nil {
return nil, err
}
return openvpnClient, nil
}
func (manager *connectionManager) waitForConnectedState(stateChannel <-chan openvpn.State, sessionID session.SessionID, cancelRequest utils.CancelChannel) error {
for {
select {
case state, more := <-stateChannel:
if !more {
return ErrOpenvpnProcessDied
}
switch state {
case openvpn.ConnectedState:
manager.onStateChanged(state, sessionID)
return nil
default:
manager.onStateChanged(state, sessionID)
}
case <-cancelRequest:
return ErrConnectionCancelled
}
}
}
func (manager *connectionManager) consumeOpenvpnStates(stateChannel <-chan openvpn.State, sessionID session.SessionID) {
for state := range stateChannel {
manager.onStateChanged(state, sessionID)
}
manager.status = statusNotConnected()
log.Debug(managerLogPrefix, "State updater stopped")
}
func (manager *connectionManager) onStateChanged(state openvpn.State, sessionID session.SessionID) {
switch state {
case openvpn.ConnectedState:
manager.statsKeeper.MarkSessionStart()
manager.status = statusConnected(sessionID)
case openvpn.ExitingState:
manager.statsKeeper.MarkSessionEnd()
case openvpn.ReconnectingState:
manager.status = statusReconnecting()
}
}
| 1 | 10,898 | What was wrong with more concrete error? | mysteriumnetwork-node | go |
@@ -14,3 +14,17 @@ function roots_get_search_form($form) {
return $form;
}
add_filter('get_search_form', 'roots_get_search_form');
+
+/**
+ * Add page slug to body_class() classes if it doesn't exist
+ */
+function roots_body_class($classes) {
+ // Add post/page slug
+ if (is_single() || is_page() && !is_front_page()) {
+ if (!in_array(basename(get_permalink()), $classes)) {
+ $classes[] = basename(get_permalink());
+ }
+ }
+ return $classes;
+}
+add_filter('body_class', 'roots_body_class'); | 1 | <?php
/**
* Utility functions
*/
function is_element_empty($element) {
$element = trim($element);
return !empty($element);
}
// Tell WordPress to use searchform.php from the templates/ directory
function roots_get_search_form($form) {
$form = '';
locate_template('/templates/searchform.php', true, false);
return $form;
}
add_filter('get_search_form', 'roots_get_search_form');
| 1 | 8,536 | can you replace the tabs with 2 spaces please to match the rest of the project? | roots-sage | php |
@@ -14,6 +14,8 @@ const IntegralKind = "integral"
type IntegralOpSpec struct {
Unit flux.Duration `json:"unit"`
+ TimeSrc string `json:"timeSrc"`
+ TimeDst string `json:"timeDst"`
execute.AggregateConfig
}
| 1 | package functions
import (
"fmt"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/semantic"
)
const IntegralKind = "integral"
type IntegralOpSpec struct {
Unit flux.Duration `json:"unit"`
execute.AggregateConfig
}
var integralSignature = execute.DefaultAggregateSignature()
func init() {
integralSignature.Params["unit"] = semantic.Duration
flux.RegisterFunction(IntegralKind, createIntegralOpSpec, integralSignature)
flux.RegisterOpSpec(IntegralKind, newIntegralOp)
plan.RegisterProcedureSpec(IntegralKind, newIntegralProcedure, IntegralKind)
execute.RegisterTransformation(IntegralKind, createIntegralTransformation)
}
func createIntegralOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) {
if err := a.AddParentFromArgs(args); err != nil {
return nil, err
}
spec := new(IntegralOpSpec)
if unit, ok, err := args.GetDuration("unit"); err != nil {
return nil, err
} else if ok {
spec.Unit = unit
} else {
//Default is 1s
spec.Unit = flux.Duration(time.Second)
}
if err := spec.AggregateConfig.ReadArgs(args); err != nil {
return nil, err
}
return spec, nil
}
func newIntegralOp() flux.OperationSpec {
return new(IntegralOpSpec)
}
func (s *IntegralOpSpec) Kind() flux.OperationKind {
return IntegralKind
}
type IntegralProcedureSpec struct {
Unit flux.Duration `json:"unit"`
execute.AggregateConfig
}
func newIntegralProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) {
spec, ok := qs.(*IntegralOpSpec)
if !ok {
return nil, fmt.Errorf("invalid spec type %T", qs)
}
return &IntegralProcedureSpec{
Unit: spec.Unit,
AggregateConfig: spec.AggregateConfig,
}, nil
}
func (s *IntegralProcedureSpec) Kind() plan.ProcedureKind {
return IntegralKind
}
func (s *IntegralProcedureSpec) Copy() plan.ProcedureSpec {
ns := new(IntegralProcedureSpec)
*ns = *s
ns.AggregateConfig = s.AggregateConfig.Copy()
return ns
}
func createIntegralTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) {
s, ok := spec.(*IntegralProcedureSpec)
if !ok {
return nil, nil, fmt.Errorf("invalid spec type %T", spec)
}
cache := execute.NewTableBuilderCache(a.Allocator())
d := execute.NewDataset(id, mode, cache)
t := NewIntegralTransformation(d, cache, s)
return t, d, nil
}
type integralTransformation struct {
d execute.Dataset
cache execute.TableBuilderCache
spec IntegralProcedureSpec
}
func NewIntegralTransformation(d execute.Dataset, cache execute.TableBuilderCache, spec *IntegralProcedureSpec) *integralTransformation {
return &integralTransformation{
d: d,
cache: cache,
spec: *spec,
}
}
func (t *integralTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error {
return t.d.RetractTable(key)
}
func (t *integralTransformation) Process(id execute.DatasetID, tbl flux.Table) error {
builder, created := t.cache.TableBuilder(tbl.Key())
if !created {
return fmt.Errorf("integral found duplicate table with key: %v", tbl.Key())
}
execute.AddTableKeyCols(tbl.Key(), builder)
builder.AddCol(flux.ColMeta{
Label: t.spec.TimeDst,
Type: flux.TTime,
})
cols := tbl.Cols()
integrals := make([]*integral, len(cols))
colMap := make([]int, len(cols))
for j, c := range cols {
if execute.ContainsStr(t.spec.Columns, c.Label) {
integrals[j] = newIntegral(time.Duration(t.spec.Unit))
colMap[j] = builder.AddCol(flux.ColMeta{
Label: c.Label,
Type: flux.TFloat,
})
}
}
if err := execute.AppendAggregateTime(t.spec.TimeSrc, t.spec.TimeDst, tbl.Key(), builder); err != nil {
return err
}
timeIdx := execute.ColIdx(t.spec.TimeDst, cols)
if timeIdx < 0 {
return fmt.Errorf("no column %q exists", t.spec.TimeSrc)
}
err := tbl.Do(func(cr flux.ColReader) error {
for j, in := range integrals {
if in == nil {
continue
}
l := cr.Len()
for i := 0; i < l; i++ {
tm := cr.Times(timeIdx)[i]
in.updateFloat(tm, cr.Floats(j)[i])
}
}
return nil
})
if err != nil {
return err
}
execute.AppendKeyValues(tbl.Key(), builder)
for j, in := range integrals {
if in == nil {
continue
}
builder.AppendFloat(colMap[j], in.value())
}
return nil
}
func (t *integralTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error {
return t.d.UpdateWatermark(mark)
}
func (t *integralTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error {
return t.d.UpdateProcessingTime(pt)
}
func (t *integralTransformation) Finish(id execute.DatasetID, err error) {
t.d.Finish(err)
}
func newIntegral(unit time.Duration) *integral {
return &integral{
first: true,
unit: float64(unit),
}
}
type integral struct {
first bool
unit float64
pFloatValue float64
pTime execute.Time
sum float64
}
func (in *integral) value() float64 {
return in.sum
}
func (in *integral) updateFloat(t execute.Time, v float64) {
if in.first {
in.pTime = t
in.pFloatValue = v
in.first = false
return
}
elapsed := float64(t-in.pTime) / in.unit
in.sum += 0.5 * (v + in.pFloatValue) * elapsed
in.pTime = t
in.pFloatValue = v
}
| 1 | 8,433 | Why are we adding it to the integral function? | influxdata-flux | go |
@@ -45,11 +45,11 @@ func XfrmStateAdd(state *XfrmState) error {
msg := &nl.XfrmUsersaInfo{}
msg.Family = uint16(nl.GetIPFamily(state.Dst))
- msg.Id.Daddr.FromIP(state.Dst)
+ msg.ID.Daddr.FromIP(state.Dst)
msg.Saddr.FromIP(state.Src)
- msg.Id.Proto = uint8(state.Proto)
+ msg.ID.Proto = uint8(state.Proto)
msg.Mode = uint8(state.Mode)
- msg.Id.Spi = nl.Swap32(uint32(state.Spi))
+ msg.ID.Spi = nl.Swap32(uint32(state.Spi))
msg.Reqid = uint32(state.Reqid)
msg.ReplayWindow = uint8(state.ReplayWindow)
msg.Lft.SoftByteLimit = nl.XFRM_INF | 1 | package netlink
import (
"fmt"
"syscall"
"github.com/vishvananda/netlink/nl"
)
func writeStateAlgo(a *XfrmStateAlgo) []byte {
algo := nl.XfrmAlgo{
AlgKeyLen: uint32(len(a.Key) * 8),
AlgKey: a.Key,
}
end := len(a.Name)
if end > 64 {
end = 64
}
copy(algo.AlgName[:end], a.Name)
return algo.Serialize()
}
func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
algo := nl.XfrmAlgoAuth{
AlgKeyLen: uint32(len(a.Key) * 8),
AlgTruncLen: uint32(a.TruncateLen),
AlgKey: a.Key,
}
end := len(a.Name)
if end > 64 {
end = 64
}
copy(algo.AlgName[:end], a.Name)
return algo.Serialize()
}
// XfrmStateAdd will add an xfrm state to the system.
// Equivalent to: `ip xfrm state add $state`
func XfrmStateAdd(state *XfrmState) error {
// A state with spi 0 can't be deleted so don't allow it to be set
if state.Spi == 0 {
return fmt.Errorf("Spi must be set when adding xfrm state.")
}
req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWSA, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
msg := &nl.XfrmUsersaInfo{}
msg.Family = uint16(nl.GetIPFamily(state.Dst))
msg.Id.Daddr.FromIP(state.Dst)
msg.Saddr.FromIP(state.Src)
msg.Id.Proto = uint8(state.Proto)
msg.Mode = uint8(state.Mode)
msg.Id.Spi = nl.Swap32(uint32(state.Spi))
msg.Reqid = uint32(state.Reqid)
msg.ReplayWindow = uint8(state.ReplayWindow)
msg.Lft.SoftByteLimit = nl.XFRM_INF
msg.Lft.HardByteLimit = nl.XFRM_INF
msg.Lft.SoftPacketLimit = nl.XFRM_INF
msg.Lft.HardPacketLimit = nl.XFRM_INF
req.AddData(msg)
if state.Auth != nil {
out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth))
req.AddData(out)
}
if state.Crypt != nil {
out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt))
req.AddData(out)
}
if state.Encap != nil {
encapData := make([]byte, nl.SizeofXfrmEncapTmpl)
encap := nl.DeserializeXfrmEncapTmpl(encapData)
encap.EncapType = uint16(state.Encap.Type)
encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort))
encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort))
encap.EncapOa.FromIP(state.Encap.OriginalAddress)
out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData)
req.AddData(out)
}
_, err := req.Execute(syscall.NETLINK_XFRM, 0)
return err
}
// XfrmStateDel will delete an xfrm state from the system. Note that
// the Algos are ignored when matching the state to delete.
// Equivalent to: `ip xfrm state del $state`
func XfrmStateDel(state *XfrmState) error {
req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELSA, syscall.NLM_F_ACK)
msg := &nl.XfrmUsersaId{}
msg.Daddr.FromIP(state.Dst)
msg.Family = uint16(nl.GetIPFamily(state.Dst))
msg.Proto = uint8(state.Proto)
msg.Spi = nl.Swap32(uint32(state.Spi))
req.AddData(msg)
saddr := nl.XfrmAddress{}
saddr.FromIP(state.Src)
srcdata := nl.NewRtAttr(nl.XFRMA_SRCADDR, saddr.Serialize())
req.AddData(srcdata)
_, err := req.Execute(syscall.NETLINK_XFRM, 0)
return err
}
// XfrmStateList gets a list of xfrm states in the system.
// Equivalent to: `ip xfrm state show`.
// The list can be filtered by ip family.
func XfrmStateList(family int) ([]XfrmState, error) {
req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
msg := nl.NewIfInfomsg(family)
req.AddData(msg)
msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
if err != nil {
return nil, err
}
var res []XfrmState
for _, m := range msgs {
msg := nl.DeserializeXfrmUsersaInfo(m)
if family != FAMILY_ALL && family != int(msg.Family) {
continue
}
var state XfrmState
state.Dst = msg.Id.Daddr.ToIP()
state.Src = msg.Saddr.ToIP()
state.Proto = Proto(msg.Id.Proto)
state.Mode = Mode(msg.Mode)
state.Spi = int(nl.Swap32(msg.Id.Spi))
state.Reqid = int(msg.Reqid)
state.ReplayWindow = int(msg.ReplayWindow)
attrs, err := nl.ParseRouteAttr(m[msg.Len():])
if err != nil {
return nil, err
}
for _, attr := range attrs {
switch attr.Attr.Type {
case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
var resAlgo *XfrmStateAlgo
if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
if state.Auth == nil {
state.Auth = new(XfrmStateAlgo)
}
resAlgo = state.Auth
} else {
state.Crypt = new(XfrmStateAlgo)
resAlgo = state.Crypt
}
algo := nl.DeserializeXfrmAlgo(attr.Value[:])
(*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
(*resAlgo).Key = algo.AlgKey
case nl.XFRMA_ALG_AUTH_TRUNC:
if state.Auth == nil {
state.Auth = new(XfrmStateAlgo)
}
algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
state.Auth.Name = nl.BytesToString(algo.AlgName[:])
state.Auth.Key = algo.AlgKey
state.Auth.TruncateLen = int(algo.AlgTruncLen)
case nl.XFRMA_ENCAP:
encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
state.Encap = new(XfrmStateEncap)
state.Encap.Type = EncapType(encap.EncapType)
state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
state.Encap.OriginalAddress = encap.EncapOa.ToIP()
}
}
res = append(res, state)
}
return res, nil
}
| 1 | 17,795 | Same for this file | opencontainers-runc | go |
@@ -234,15 +234,14 @@ namespace NLog.Internal.FileAppenders
appender.Flush();
}
}
-
+
/// <summary>
/// Gets the file info for a particular appender.
/// </summary>
/// <param name="fileName">The file name associated with a particular appender.</param>
- /// <param name="lastWriteTime">The last file write time. The value must be of UTC kind.</param>
- /// <param name="fileLength">Length of the file.</param>
+ /// <param name="fileCharacteristics">The file characteristics, if the file information was retrieved successfully.</param>
/// <returns><see langword="true"/> when the operation succeeded; <see langword="false"/> otherwise.</returns>
- public bool GetFileInfo(string fileName, out DateTime lastWriteTime, out long fileLength)
+ public bool GetFileCharacteristics(string fileName, out FileCharacteristics fileCharacteristics)
{
foreach (BaseFileAppender appender in appenders)
{ | 1 | //
// Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Internal.FileAppenders
{
using System;
/// <summary>
/// Maintains a collection of file appenders usually associated with file targets.
/// </summary>
internal sealed class FileAppenderCache
{
private BaseFileAppender[] appenders;
/// <summary>
/// Initializes a new "empty" instance of the <see cref="FileAppenderCache"/> class with zero size and empty
/// list of appenders.
/// </summary>
public static readonly FileAppenderCache Empty = new FileAppenderCache();
/// <summary>
/// Initializes a new "empty" instance of the <see cref="FileAppenderCache"/> class with zero size and empty
/// list of appenders.
/// </summary>
private FileAppenderCache()
{
Size = 0;
Factory = null;
CreateFileParameters = null;
appenders = new BaseFileAppender[0];
}
/// <summary>
/// Initializes a new instance of the <see cref="FileAppenderCache"/> class.
/// </summary>
/// <remarks>
/// The size of the list should be positive. No validations are performed during initialisation as it is an
/// intenal class.
/// </remarks>
/// <param name="size">Total number of appenders allowed in list.</param>
/// <param name="appenderFactory">Factory used to create each appender.</param>
/// <param name="createFileParams">Parameters used for creating a file.</param>
public FileAppenderCache(int size, IFileAppenderFactory appenderFactory, ICreateFileParameters createFileParams)
{
Size = size;
Factory = appenderFactory;
CreateFileParameters = createFileParams;
appenders = new BaseFileAppender[Size];
}
/// <summary>
/// Gets the parameters which will be used for creating a file.
/// </summary>
public ICreateFileParameters CreateFileParameters { get; private set; }
/// <summary>
/// Gets the file appender factory used by all the appenders in this list.
/// </summary>
public IFileAppenderFactory Factory { get; private set; }
/// <summary>
/// Gets the number of appenders which the list can hold.
/// </summary>
public int Size { get; private set; }
/// <summary>
/// It allocates the first slot in the list when the file name does not already in the list and clean up any
/// unused slots.
/// </summary>
/// <param name="fileName">File name associated with a single appender.</param>
/// <returns>The allocated appender.</returns>
/// <exception cref="NullReferenceException">
/// Thrown when <see cref="M:AllocateAppender"/> is called on an <c>Empty</c><see cref="FileAppenderCache"/> instance.
/// </exception>
public BaseFileAppender AllocateAppender(string fileName)
{
//
// BaseFileAppender.Write is the most expensive operation here
// so the in-memory data structure doesn't have to be
// very sophisticated. It's a table-based LRU, where we move
// the used element to become the first one.
// The number of items is usually very limited so the
// performance should be equivalent to the one of the hashtable.
//
BaseFileAppender appenderToWrite = null;
int freeSpot = appenders.Length - 1;
for (int i = 0; i < appenders.Length; ++i)
{
// Use empty slot in recent appender list, if there is one.
if (appenders[i] == null)
{
freeSpot = i;
break;
}
if (appenders[i].FileName == fileName)
{
// found it, move it to the first place on the list
// (MRU)
// file open has a chance of failure
// if it fails in the constructor, we won't modify any data structures
BaseFileAppender app = appenders[i];
for (int j = i; j > 0; --j)
{
appenders[j] = appenders[j - 1];
}
appenders[0] = app;
appenderToWrite = app;
break;
}
}
if (appenderToWrite == null)
{
BaseFileAppender newAppender = Factory.Open(fileName, CreateFileParameters);
if (appenders[freeSpot] != null)
{
appenders[freeSpot].Close();
appenders[freeSpot] = null;
}
for (int j = freeSpot; j > 0; --j)
{
appenders[j] = appenders[j - 1];
}
appenders[0] = newAppender;
appenderToWrite = newAppender;
}
return appenderToWrite;
}
/// <summary>
/// Close all the allocated appenders.
/// </summary>
public void CloseAppenders()
{
if (appenders != null)
{
for (int i = 0; i < appenders.Length; ++i)
{
if (appenders[i] == null)
{
break;
}
appenders[i].Close();
appenders[i] = null;
}
}
}
/// <summary>
/// Close the allocated appenders initialised before the supplied time.
/// </summary>
/// <param name="expireTime">The time which prior the appenders considered expired</param>
public void CloseAppenders(DateTime expireTime)
{
for (int i = 0; i < this.appenders.Length; ++i)
{
if (this.appenders[i] == null)
{
break;
}
if (this.appenders[i].OpenTime < expireTime)
{
for (int j = i; j < this.appenders.Length; ++j)
{
if (this.appenders[j] == null)
{
break;
}
this.appenders[j].Close();
this.appenders[j] = null;
}
break;
}
}
}
/// <summary>
/// Fluch all the allocated appenders.
/// </summary>
public void FlushAppenders()
{
foreach (BaseFileAppender appender in appenders)
{
if (appender == null)
{
break;
}
appender.Flush();
}
}
/// <summary>
/// Gets the file info for a particular appender.
/// </summary>
/// <param name="fileName">The file name associated with a particular appender.</param>
/// <param name="lastWriteTime">The last file write time. The value must be of UTC kind.</param>
/// <param name="fileLength">Length of the file.</param>
/// <returns><see langword="true"/> when the operation succeeded; <see langword="false"/> otherwise.</returns>
public bool GetFileInfo(string fileName, out DateTime lastWriteTime, out long fileLength)
{
foreach (BaseFileAppender appender in appenders)
{
if (appender == null)
{
break;
}
if (appender.FileName == fileName)
{
appender.GetFileInfo(out lastWriteTime, out fileLength);
return true;
}
}
// Return default values.
fileLength = -1;
lastWriteTime = DateTime.MinValue;
return false;
}
/// <summary>
/// Closes the specified appender and removes it from the list.
/// </summary>
/// <param name="fileName">File name of the appender to be closed.</param>
public void InvalidateAppender(string fileName)
{
for (int i = 0; i < appenders.Length; ++i)
{
if (appenders[i] == null)
{
break;
}
if (appenders[i].FileName == fileName)
{
appenders[i].Close();
for (int j = i; j < appenders.Length - 1; ++j)
{
appenders[j] = appenders[j + 1];
}
appenders[appenders.Length - 1] = null;
break;
}
}
}
}
}
| 1 | 12,275 | Looks good! But why it's still a out parameter? We can refactor the whole tree because it's internal? | NLog-NLog | .cs |
@@ -825,6 +825,11 @@ void roots_seat_set_focus(struct roots_seat *seat, struct roots_view *view) {
view_activate(view, true);
seat->has_focus = true;
+ // We want to unconditionally send keyboard input to the view we are
+ // focusing here, so cancel any existing grabs.
+ struct wlr_seat_keyboard_grab *curr_grab = seat->seat->keyboard_state.grab;
+ curr_grab->interface->cancel(curr_grab);
+
struct wlr_keyboard *keyboard = wlr_seat_get_keyboard(seat->seat);
if (keyboard != NULL) {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface, | 1 | #define _POSIX_C_SOURCE 199309L
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <wayland-server.h>
#include <wlr/config.h>
#include <wlr/types/wlr_idle.h>
#include <wlr/types/wlr_layer_shell.h>
#include <wlr/types/wlr_xcursor_manager.h>
#include <wlr/util/log.h>
#include "rootston/cursor.h"
#include "rootston/input.h"
#include "rootston/keyboard.h"
#include "rootston/seat.h"
#include "rootston/xcursor.h"
static void handle_keyboard_key(struct wl_listener *listener, void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, keyboard_key);
struct roots_desktop *desktop = keyboard->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, keyboard->seat->seat);
struct wlr_event_keyboard_key *event = data;
roots_keyboard_handle_key(keyboard, event);
}
static void handle_keyboard_modifiers(struct wl_listener *listener,
void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, keyboard_modifiers);
struct roots_desktop *desktop = keyboard->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, keyboard->seat->seat);
roots_keyboard_handle_modifiers(keyboard);
}
static void handle_cursor_motion(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, motion);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_motion *event = data;
roots_cursor_handle_motion(cursor, event);
}
static void handle_cursor_motion_absolute(struct wl_listener *listener,
void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, motion_absolute);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_motion_absolute *event = data;
roots_cursor_handle_motion_absolute(cursor, event);
}
static void handle_cursor_button(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, button);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_button *event = data;
roots_cursor_handle_button(cursor, event);
}
static void handle_cursor_axis(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, axis);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_axis *event = data;
roots_cursor_handle_axis(cursor, event);
}
static void handle_touch_down(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_down);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_down *event = data;
roots_cursor_handle_touch_down(cursor, event);
}
static void handle_touch_up(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_up);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_up *event = data;
roots_cursor_handle_touch_up(cursor, event);
}
static void handle_touch_motion(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_motion);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_motion *event = data;
roots_cursor_handle_touch_motion(cursor, event);
}
static void handle_tool_axis(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, tool_axis);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_tablet_tool_axis *event = data;
roots_cursor_handle_tool_axis(cursor, event);
}
static void handle_tool_tip(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, tool_tip);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_tablet_tool_tip *event = data;
roots_cursor_handle_tool_tip(cursor, event);
}
static void handle_request_set_cursor(struct wl_listener *listener,
void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, request_set_cursor);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_seat_pointer_request_set_cursor_event *event = data;
roots_cursor_handle_request_set_cursor(cursor, event);
}
static void seat_reset_device_mappings(struct roots_seat *seat,
struct wlr_input_device *device) {
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_config *config = seat->input->config;
wlr_cursor_map_input_to_output(cursor, device, NULL);
struct roots_device_config *dconfig;
if ((dconfig = roots_config_get_device(config, device))) {
wlr_cursor_map_input_to_region(cursor, device, dconfig->mapped_box);
}
}
static void seat_set_device_output_mappings(struct roots_seat *seat,
struct wlr_input_device *device, struct wlr_output *output) {
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_config *config = seat->input->config;
struct roots_device_config *dconfig =
roots_config_get_device(config, device);
const char *mapped_output = NULL;
if (dconfig != NULL) {
mapped_output = dconfig->mapped_output;
}
if (mapped_output == NULL) {
mapped_output = device->output_name;
}
if (mapped_output && strcmp(mapped_output, output->name) == 0) {
wlr_cursor_map_input_to_output(cursor, device, output);
}
}
void roots_seat_configure_cursor(struct roots_seat *seat) {
struct roots_config *config = seat->input->config;
struct roots_desktop *desktop = seat->input->server->desktop;
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_pointer *pointer;
struct roots_touch *touch;
struct roots_tablet_tool *tablet_tool;
struct roots_output *output;
// reset mappings
wlr_cursor_map_to_output(cursor, NULL);
wl_list_for_each(pointer, &seat->pointers, link) {
seat_reset_device_mappings(seat, pointer->device);
}
wl_list_for_each(touch, &seat->touch, link) {
seat_reset_device_mappings(seat, touch->device);
}
wl_list_for_each(tablet_tool, &seat->tablet_tools, link) {
seat_reset_device_mappings(seat, tablet_tool->device);
}
// configure device to output mappings
const char *mapped_output = NULL;
struct roots_cursor_config *cc =
roots_config_get_cursor(config, seat->seat->name);
if (cc != NULL) {
mapped_output = cc->mapped_output;
}
wl_list_for_each(output, &desktop->outputs, link) {
if (mapped_output &&
strcmp(mapped_output, output->wlr_output->name) == 0) {
wlr_cursor_map_to_output(cursor, output->wlr_output);
}
wl_list_for_each(pointer, &seat->pointers, link) {
seat_set_device_output_mappings(seat, pointer->device,
output->wlr_output);
}
wl_list_for_each(tablet_tool, &seat->tablet_tools, link) {
seat_set_device_output_mappings(seat, tablet_tool->device,
output->wlr_output);
}
wl_list_for_each(touch, &seat->touch, link) {
seat_set_device_output_mappings(seat, touch->device,
output->wlr_output);
}
}
}
static void roots_seat_init_cursor(struct roots_seat *seat) {
seat->cursor = roots_cursor_create(seat);
if (!seat->cursor) {
return;
}
seat->cursor->seat = seat;
struct wlr_cursor *wlr_cursor = seat->cursor->cursor;
struct roots_desktop *desktop = seat->input->server->desktop;
wlr_cursor_attach_output_layout(wlr_cursor, desktop->layout);
roots_seat_configure_cursor(seat);
roots_seat_configure_xcursor(seat);
// add input signals
wl_signal_add(&wlr_cursor->events.motion, &seat->cursor->motion);
seat->cursor->motion.notify = handle_cursor_motion;
wl_signal_add(&wlr_cursor->events.motion_absolute,
&seat->cursor->motion_absolute);
seat->cursor->motion_absolute.notify = handle_cursor_motion_absolute;
wl_signal_add(&wlr_cursor->events.button, &seat->cursor->button);
seat->cursor->button.notify = handle_cursor_button;
wl_signal_add(&wlr_cursor->events.axis, &seat->cursor->axis);
seat->cursor->axis.notify = handle_cursor_axis;
wl_signal_add(&wlr_cursor->events.touch_down, &seat->cursor->touch_down);
seat->cursor->touch_down.notify = handle_touch_down;
wl_signal_add(&wlr_cursor->events.touch_up, &seat->cursor->touch_up);
seat->cursor->touch_up.notify = handle_touch_up;
wl_signal_add(&wlr_cursor->events.touch_motion,
&seat->cursor->touch_motion);
seat->cursor->touch_motion.notify = handle_touch_motion;
wl_signal_add(&wlr_cursor->events.tablet_tool_axis,
&seat->cursor->tool_axis);
seat->cursor->tool_axis.notify = handle_tool_axis;
wl_signal_add(&wlr_cursor->events.tablet_tool_tip, &seat->cursor->tool_tip);
seat->cursor->tool_tip.notify = handle_tool_tip;
wl_signal_add(&seat->seat->events.request_set_cursor,
&seat->cursor->request_set_cursor);
seat->cursor->request_set_cursor.notify = handle_request_set_cursor;
}
static void roots_drag_icon_handle_surface_commit(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, surface_commit);
roots_drag_icon_update_position(icon);
}
static void roots_drag_icon_handle_map(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, map);
roots_drag_icon_damage_whole(icon);
}
static void roots_drag_icon_handle_unmap(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, unmap);
roots_drag_icon_damage_whole(icon);
}
static void roots_drag_icon_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, destroy);
roots_drag_icon_damage_whole(icon);
wl_list_remove(&icon->link);
wl_list_remove(&icon->surface_commit.link);
wl_list_remove(&icon->unmap.link);
wl_list_remove(&icon->destroy.link);
free(icon);
}
static void roots_seat_handle_new_drag_icon(struct wl_listener *listener,
void *data) {
struct roots_seat *seat = wl_container_of(listener, seat, new_drag_icon);
struct wlr_drag_icon *wlr_drag_icon = data;
struct roots_drag_icon *icon = calloc(1, sizeof(struct roots_drag_icon));
if (icon == NULL) {
return;
}
icon->seat = seat;
icon->wlr_drag_icon = wlr_drag_icon;
icon->surface_commit.notify = roots_drag_icon_handle_surface_commit;
wl_signal_add(&wlr_drag_icon->surface->events.commit, &icon->surface_commit);
icon->unmap.notify = roots_drag_icon_handle_unmap;
wl_signal_add(&wlr_drag_icon->events.unmap, &icon->unmap);
icon->map.notify = roots_drag_icon_handle_map;
wl_signal_add(&wlr_drag_icon->events.map, &icon->map);
icon->destroy.notify = roots_drag_icon_handle_destroy;
wl_signal_add(&wlr_drag_icon->events.destroy, &icon->destroy);
wl_list_insert(&seat->drag_icons, &icon->link);
roots_drag_icon_update_position(icon);
}
void roots_drag_icon_update_position(struct roots_drag_icon *icon) {
roots_drag_icon_damage_whole(icon);
struct wlr_drag_icon *wlr_icon = icon->wlr_drag_icon;
struct roots_seat *seat = icon->seat;
struct wlr_cursor *cursor = seat->cursor->cursor;
if (wlr_icon->is_pointer) {
icon->x = cursor->x + wlr_icon->sx;
icon->y = cursor->y + wlr_icon->sy;
} else {
struct wlr_touch_point *point =
wlr_seat_touch_get_point(seat->seat, wlr_icon->touch_id);
if (point == NULL) {
return;
}
icon->x = seat->touch_x + wlr_icon->sx;
icon->y = seat->touch_y + wlr_icon->sy;
}
roots_drag_icon_damage_whole(icon);
}
void roots_drag_icon_damage_whole(struct roots_drag_icon *icon) {
struct roots_output *output;
wl_list_for_each(output, &icon->seat->input->server->desktop->outputs,
link) {
output_damage_whole_drag_icon(output, icon);
}
}
static void seat_view_destroy(struct roots_seat_view *seat_view);
static void roots_seat_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_seat *seat = wl_container_of(listener, seat, destroy);
// TODO: probably more to be freed here
wl_list_remove(&seat->destroy.link);
struct roots_seat_view *view, *nview;
wl_list_for_each_safe(view, nview, &seat->views, link) {
seat_view_destroy(view);
}
}
void roots_seat_destroy(struct roots_seat *seat) {
roots_seat_handle_destroy(&seat->destroy, seat->seat);
wlr_seat_destroy(seat->seat);
}
struct roots_seat *roots_seat_create(struct roots_input *input, char *name) {
struct roots_seat *seat = calloc(1, sizeof(struct roots_seat));
if (!seat) {
return NULL;
}
wl_list_init(&seat->keyboards);
wl_list_init(&seat->pointers);
wl_list_init(&seat->touch);
wl_list_init(&seat->tablet_tools);
wl_list_init(&seat->views);
wl_list_init(&seat->drag_icons);
seat->input = input;
seat->seat = wlr_seat_create(input->server->wl_display, name);
if (!seat->seat) {
free(seat);
return NULL;
}
roots_seat_init_cursor(seat);
if (!seat->cursor) {
wlr_seat_destroy(seat->seat);
free(seat);
return NULL;
}
wl_list_insert(&input->seats, &seat->link);
seat->new_drag_icon.notify = roots_seat_handle_new_drag_icon;
wl_signal_add(&seat->seat->events.new_drag_icon, &seat->new_drag_icon);
seat->destroy.notify = roots_seat_handle_destroy;
wl_signal_add(&seat->seat->events.destroy, &seat->destroy);
return seat;
}
static void seat_update_capabilities(struct roots_seat *seat) {
uint32_t caps = 0;
if (!wl_list_empty(&seat->keyboards)) {
caps |= WL_SEAT_CAPABILITY_KEYBOARD;
}
if (!wl_list_empty(&seat->pointers) || !wl_list_empty(&seat->tablet_tools)) {
caps |= WL_SEAT_CAPABILITY_POINTER;
}
if (!wl_list_empty(&seat->touch)) {
caps |= WL_SEAT_CAPABILITY_TOUCH;
}
wlr_seat_set_capabilities(seat->seat, caps);
// Hide cursor if seat doesn't have pointer capability
if ((caps & WL_SEAT_CAPABILITY_POINTER) == 0) {
wlr_cursor_set_image(seat->cursor->cursor, NULL, 0, 0, 0, 0, 0, 0);
} else {
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
seat->cursor->default_xcursor, seat->cursor->cursor);
}
}
static void handle_keyboard_destroy(struct wl_listener *listener, void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, device_destroy);
struct roots_seat *seat = keyboard->seat;
wl_list_remove(&keyboard->device_destroy.link);
wl_list_remove(&keyboard->keyboard_key.link);
wl_list_remove(&keyboard->keyboard_modifiers.link);
roots_keyboard_destroy(keyboard);
seat_update_capabilities(seat);
}
static void seat_add_keyboard(struct roots_seat *seat,
struct wlr_input_device *device) {
assert(device->type == WLR_INPUT_DEVICE_KEYBOARD);
struct roots_keyboard *keyboard =
roots_keyboard_create(device, seat->input);
if (keyboard == NULL) {
wlr_log(L_ERROR, "could not allocate keyboard for seat");
return;
}
keyboard->seat = seat;
wl_list_insert(&seat->keyboards, &keyboard->link);
keyboard->device_destroy.notify = handle_keyboard_destroy;
wl_signal_add(&keyboard->device->events.destroy, &keyboard->device_destroy);
keyboard->keyboard_key.notify = handle_keyboard_key;
wl_signal_add(&keyboard->device->keyboard->events.key,
&keyboard->keyboard_key);
keyboard->keyboard_modifiers.notify = handle_keyboard_modifiers;
wl_signal_add(&keyboard->device->keyboard->events.modifiers,
&keyboard->keyboard_modifiers);
wlr_seat_set_keyboard(seat->seat, device);
}
static void handle_pointer_destroy(struct wl_listener *listener, void *data) {
struct roots_pointer *pointer =
wl_container_of(listener, pointer, device_destroy);
struct roots_seat *seat = pointer->seat;
wl_list_remove(&pointer->link);
wlr_cursor_detach_input_device(seat->cursor->cursor, pointer->device);
wl_list_remove(&pointer->device_destroy.link);
free(pointer);
seat_update_capabilities(seat);
}
static void seat_add_pointer(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_pointer *pointer = calloc(sizeof(struct roots_pointer), 1);
if (!pointer) {
wlr_log(L_ERROR, "could not allocate pointer for seat");
return;
}
device->data = pointer;
pointer->device = device;
pointer->seat = seat;
wl_list_insert(&seat->pointers, &pointer->link);
pointer->device_destroy.notify = handle_pointer_destroy;
wl_signal_add(&pointer->device->events.destroy, &pointer->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
static void handle_touch_destroy(struct wl_listener *listener, void *data) {
struct roots_pointer *touch =
wl_container_of(listener, touch, device_destroy);
struct roots_seat *seat = touch->seat;
wl_list_remove(&touch->link);
wlr_cursor_detach_input_device(seat->cursor->cursor, touch->device);
wl_list_remove(&touch->device_destroy.link);
free(touch);
seat_update_capabilities(seat);
}
static void seat_add_touch(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_touch *touch = calloc(sizeof(struct roots_touch), 1);
if (!touch) {
wlr_log(L_ERROR, "could not allocate touch for seat");
return;
}
device->data = touch;
touch->device = device;
touch->seat = seat;
wl_list_insert(&seat->touch, &touch->link);
touch->device_destroy.notify = handle_touch_destroy;
wl_signal_add(&touch->device->events.destroy, &touch->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
static void seat_add_tablet_pad(struct roots_seat *seat,
struct wlr_input_device *device) {
// TODO
}
static void handle_tablet_tool_destroy(struct wl_listener *listener,
void *data) {
struct roots_tablet_tool *tablet_tool =
wl_container_of(listener, tablet_tool, device_destroy);
struct roots_seat *seat = tablet_tool->seat;
wlr_cursor_detach_input_device(seat->cursor->cursor, tablet_tool->device);
wl_list_remove(&tablet_tool->device_destroy.link);
wl_list_remove(&tablet_tool->link);
free(tablet_tool);
seat_update_capabilities(seat);
}
static void seat_add_tablet_tool(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_tablet_tool *tablet_tool =
calloc(sizeof(struct roots_tablet_tool), 1);
if (!tablet_tool) {
wlr_log(L_ERROR, "could not allocate tablet_tool for seat");
return;
}
device->data = tablet_tool;
tablet_tool->device = device;
tablet_tool->seat = seat;
wl_list_insert(&seat->tablet_tools, &tablet_tool->link);
tablet_tool->device_destroy.notify = handle_tablet_tool_destroy;
wl_signal_add(&tablet_tool->device->events.destroy,
&tablet_tool->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
void roots_seat_add_device(struct roots_seat *seat,
struct wlr_input_device *device) {
switch (device->type) {
case WLR_INPUT_DEVICE_KEYBOARD:
seat_add_keyboard(seat, device);
break;
case WLR_INPUT_DEVICE_POINTER:
seat_add_pointer(seat, device);
break;
case WLR_INPUT_DEVICE_TOUCH:
seat_add_touch(seat, device);
break;
case WLR_INPUT_DEVICE_TABLET_PAD:
seat_add_tablet_pad(seat, device);
break;
case WLR_INPUT_DEVICE_TABLET_TOOL:
seat_add_tablet_tool(seat, device);
break;
}
seat_update_capabilities(seat);
}
void roots_seat_configure_xcursor(struct roots_seat *seat) {
const char *cursor_theme = NULL;
struct roots_cursor_config *cc =
roots_config_get_cursor(seat->input->config, seat->seat->name);
if (cc != NULL) {
cursor_theme = cc->theme;
if (cc->default_image != NULL) {
seat->cursor->default_xcursor = cc->default_image;
}
}
if (!seat->cursor->xcursor_manager) {
seat->cursor->xcursor_manager =
wlr_xcursor_manager_create(cursor_theme, ROOTS_XCURSOR_SIZE);
if (seat->cursor->xcursor_manager == NULL) {
wlr_log(L_ERROR, "Cannot create XCursor manager for theme %s",
cursor_theme);
return;
}
}
struct roots_output *output;
wl_list_for_each(output, &seat->input->server->desktop->outputs, link) {
float scale = output->wlr_output->scale;
if (wlr_xcursor_manager_load(seat->cursor->xcursor_manager, scale)) {
wlr_log(L_ERROR, "Cannot load xcursor theme for output '%s' "
"with scale %f", output->wlr_output->name, scale);
}
}
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
seat->cursor->default_xcursor, seat->cursor->cursor);
wlr_cursor_warp(seat->cursor->cursor, NULL, seat->cursor->cursor->x,
seat->cursor->cursor->y);
}
bool roots_seat_has_meta_pressed(struct roots_seat *seat) {
struct roots_keyboard *keyboard;
wl_list_for_each(keyboard, &seat->keyboards, link) {
if (!keyboard->config->meta_key) {
continue;
}
uint32_t modifiers =
wlr_keyboard_get_modifiers(keyboard->device->keyboard);
if ((modifiers ^ keyboard->config->meta_key) == 0) {
return true;
}
}
return false;
}
struct roots_view *roots_seat_get_focus(struct roots_seat *seat) {
if (!seat->has_focus || wl_list_empty(&seat->views)) {
return NULL;
}
struct roots_seat_view *seat_view =
wl_container_of(seat->views.next, seat_view, link);
return seat_view->view;
}
static void seat_view_destroy(struct roots_seat_view *seat_view) {
struct roots_seat *seat = seat_view->seat;
if (seat_view->view == roots_seat_get_focus(seat)) {
seat->has_focus = false;
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
if (seat_view == seat->cursor->pointer_view) {
seat->cursor->pointer_view = NULL;
}
wl_list_remove(&seat_view->view_unmap.link);
wl_list_remove(&seat_view->view_destroy.link);
wl_list_remove(&seat_view->link);
free(seat_view);
// Focus first view
if (!wl_list_empty(&seat->views)) {
struct roots_seat_view *first_seat_view = wl_container_of(
seat->views.next, first_seat_view, link);
roots_seat_set_focus(seat, first_seat_view->view);
}
}
static void seat_view_handle_unmap(struct wl_listener *listener, void *data) {
struct roots_seat_view *seat_view =
wl_container_of(listener, seat_view, view_unmap);
seat_view_destroy(seat_view);
}
static void seat_view_handle_destroy(struct wl_listener *listener, void *data) {
struct roots_seat_view *seat_view =
wl_container_of(listener, seat_view, view_destroy);
seat_view_destroy(seat_view);
}
static struct roots_seat_view *seat_add_view(struct roots_seat *seat,
struct roots_view *view) {
struct roots_seat_view *seat_view =
calloc(1, sizeof(struct roots_seat_view));
if (seat_view == NULL) {
return NULL;
}
seat_view->seat = seat;
seat_view->view = view;
wl_list_insert(seat->views.prev, &seat_view->link);
seat_view->view_unmap.notify = seat_view_handle_unmap;
wl_signal_add(&view->events.unmap, &seat_view->view_unmap);
seat_view->view_destroy.notify = seat_view_handle_destroy;
wl_signal_add(&view->events.destroy, &seat_view->view_destroy);
return seat_view;
}
struct roots_seat_view *roots_seat_view_from_view(
struct roots_seat *seat, struct roots_view *view) {
if (view == NULL) {
return NULL;
}
bool found = false;
struct roots_seat_view *seat_view = NULL;
wl_list_for_each(seat_view, &seat->views, link) {
if (seat_view->view == view) {
found = true;
break;
}
}
if (!found) {
seat_view = seat_add_view(seat, view);
if (seat_view == NULL) {
wlr_log(L_ERROR, "Allocation failed");
return NULL;
}
}
return seat_view;
}
bool roots_seat_allow_input(struct roots_seat *seat,
struct wl_resource *resource) {
return !seat->exclusive_client ||
wl_resource_get_client(resource) == seat->exclusive_client;
}
void roots_seat_set_focus(struct roots_seat *seat, struct roots_view *view) {
if (view && !roots_seat_allow_input(seat, view->wlr_surface->resource)) {
return;
}
// Make sure the view will be rendered on top of others, even if it's
// already focused in this seat
if (view != NULL) {
wl_list_remove(&view->link);
wl_list_insert(&seat->input->server->desktop->views, &view->link);
}
bool unfullscreen = true;
#ifdef WLR_HAS_XWAYLAND
if (view && view->type == ROOTS_XWAYLAND_VIEW &&
view->xwayland_surface->override_redirect) {
unfullscreen = false;
}
#endif
if (view && unfullscreen) {
struct roots_desktop *desktop = view->desktop;
struct roots_output *output;
struct wlr_box box;
view_get_box(view, &box);
wl_list_for_each(output, &desktop->outputs, link) {
if (output->fullscreen_view &&
output->fullscreen_view != view &&
wlr_output_layout_intersects(
desktop->layout,
output->wlr_output, &box)) {
view_set_fullscreen(output->fullscreen_view,
false, NULL);
}
}
}
struct roots_view *prev_focus = roots_seat_get_focus(seat);
if (view == prev_focus) {
return;
}
#ifdef WLR_HAS_XWAYLAND
if (view && view->type == ROOTS_XWAYLAND_VIEW &&
wlr_xwayland_surface_is_unmanaged(view->xwayland_surface)) {
return;
}
#endif
struct roots_seat_view *seat_view = NULL;
if (view != NULL) {
seat_view = roots_seat_view_from_view(seat, view);
if (seat_view == NULL) {
return;
}
}
seat->has_focus = false;
// Deactivate the old view if it is not focused by some other seat
if (prev_focus != NULL && !input_view_has_focus(seat->input, prev_focus)) {
view_activate(prev_focus, false);
}
if (view == NULL) {
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
wlr_seat_keyboard_clear_focus(seat->seat);
return;
}
wl_list_remove(&seat_view->link);
wl_list_insert(&seat->views, &seat_view->link);
view_damage_whole(view);
if (seat->focused_layer) {
return;
}
view_activate(view, true);
seat->has_focus = true;
struct wlr_keyboard *keyboard = wlr_seat_get_keyboard(seat->seat);
if (keyboard != NULL) {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface,
keyboard->keycodes, keyboard->num_keycodes,
&keyboard->modifiers);
} else {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface,
NULL, 0, NULL);
}
}
/**
* Focus semantics of layer surfaces are somewhat detached from the normal focus
* flow. For layers above the shell layer, for example, you cannot unfocus them.
* You also cannot alt-tab between layer surfaces and shell surfaces.
*/
void roots_seat_set_focus_layer(struct roots_seat *seat,
struct wlr_layer_surface *layer) {
if (!layer) {
seat->focused_layer = NULL;
return;
}
struct wlr_keyboard *keyboard = wlr_seat_get_keyboard(seat->seat);
if (!roots_seat_allow_input(seat, layer->resource)) {
return;
}
if (seat->has_focus) {
struct roots_view *prev_focus = roots_seat_get_focus(seat);
wlr_seat_keyboard_clear_focus(seat->seat);
view_activate(prev_focus, false);
}
seat->has_focus = false;
if (layer->layer >= ZWLR_LAYER_SHELL_V1_LAYER_TOP) {
seat->focused_layer = layer;
}
if (keyboard != NULL) {
wlr_seat_keyboard_notify_enter(seat->seat, layer->surface,
keyboard->keycodes, keyboard->num_keycodes,
&keyboard->modifiers);
} else {
wlr_seat_keyboard_notify_enter(seat->seat, layer->surface,
NULL, 0, NULL);
}
}
void roots_seat_set_exclusive_client(struct roots_seat *seat,
struct wl_client *client) {
if (!client) {
seat->exclusive_client = client;
// Triggers a refocus of the topmost surface layer if necessary
// TODO: Make layer surface focus per-output based on cursor position
struct roots_output *output;
wl_list_for_each(output, &seat->input->server->desktop->outputs, link) {
arrange_layers(output);
}
return;
}
if (seat->focused_layer) {
if (wl_resource_get_client(seat->focused_layer->resource) != client) {
roots_seat_set_focus_layer(seat, NULL);
}
}
if (seat->has_focus) {
struct roots_view *focus = roots_seat_get_focus(seat);
if (wl_resource_get_client(focus->wlr_surface->resource) != client) {
roots_seat_set_focus(seat, NULL);
}
}
if (seat->seat->pointer_state.focused_client) {
if (seat->seat->pointer_state.focused_client->client != client) {
wlr_seat_pointer_clear_focus(seat->seat);
}
}
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
struct wlr_touch_point *point;
wl_list_for_each(point, &seat->seat->touch_state.touch_points, link) {
if (point->client->client != client) {
wlr_seat_touch_point_clear_focus(seat->seat,
now.tv_nsec / 1000, point->touch_id);
}
}
seat->exclusive_client = client;
}
void roots_seat_cycle_focus(struct roots_seat *seat) {
if (wl_list_empty(&seat->views)) {
return;
}
struct roots_seat_view *first_seat_view = wl_container_of(
seat->views.next, first_seat_view, link);
if (!seat->has_focus) {
roots_seat_set_focus(seat, first_seat_view->view);
return;
}
if (wl_list_length(&seat->views) < 2) {
return;
}
// Focus the next view
struct roots_seat_view *next_seat_view = wl_container_of(
first_seat_view->link.next, next_seat_view, link);
roots_seat_set_focus(seat, next_seat_view->view);
// Move the first view to the end of the list
wl_list_remove(&first_seat_view->link);
wl_list_insert(seat->views.prev, &first_seat_view->link);
}
void roots_seat_begin_move(struct roots_seat *seat, struct roots_view *view) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_MOVE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
if (view->maximized) {
cursor->view_x = view->saved.x;
cursor->view_y = view->saved.y;
} else {
cursor->view_x = view->x;
cursor->view_y = view->y;
}
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
ROOTS_XCURSOR_MOVE, seat->cursor->cursor);
}
void roots_seat_begin_resize(struct roots_seat *seat, struct roots_view *view,
uint32_t edges) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_RESIZE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
if (view->maximized) {
cursor->view_x = view->saved.x;
cursor->view_y = view->saved.y;
cursor->view_width = view->saved.width;
cursor->view_height = view->saved.height;
} else {
cursor->view_x = view->x;
cursor->view_y = view->y;
struct wlr_box box;
view_get_box(view, &box);
cursor->view_width = box.width;
cursor->view_height = box.height;
}
cursor->resize_edges = edges;
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
const char *resize_name = wlr_xcursor_get_resize_name(edges);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
resize_name, seat->cursor->cursor);
}
void roots_seat_begin_rotate(struct roots_seat *seat, struct roots_view *view) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_ROTATE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
cursor->view_rotation = view->rotation;
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
ROOTS_XCURSOR_ROTATE, seat->cursor->cursor);
}
void roots_seat_end_compositor_grab(struct roots_seat *seat) {
struct roots_cursor *cursor = seat->cursor;
struct roots_view *view = roots_seat_get_focus(seat);
if (view == NULL) {
return;
}
switch(cursor->mode) {
case ROOTS_CURSOR_MOVE:
view_move(view, cursor->view_x, cursor->view_y);
break;
case ROOTS_CURSOR_RESIZE:
view_move_resize(view, cursor->view_x, cursor->view_y, cursor->view_width, cursor->view_height);
break;
case ROOTS_CURSOR_ROTATE:
view->rotation = cursor->view_rotation;
break;
case ROOTS_CURSOR_PASSTHROUGH:
break;
}
cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
struct roots_seat *input_last_active_seat(struct roots_input *input) {
struct roots_seat *seat = NULL, *_seat;
wl_list_for_each(_seat, &input->seats, link) {
if (!seat || (seat->seat->last_event.tv_sec > _seat->seat->last_event.tv_sec &&
seat->seat->last_event.tv_nsec > _seat->seat->last_event.tv_nsec)) {
seat = _seat;
}
}
return seat;
}
| 1 | 11,778 | probably better to use `wlr_seat_keyboard_end_grab()`. | swaywm-wlroots | c |
@@ -142,6 +142,9 @@ public class TransactionPool implements BlockAddedObserver {
public ValidationResult<TransactionInvalidReason> addLocalTransaction(
final Transaction transaction) {
+ if (transaction.getChainId().isEmpty() && !configuration.isUnprotectedTransactionsAllowed()) {
+ return ValidationResult.invalid(TransactionInvalidReason.UNPROTECTED_TRANSACTION);
+ }
if (!configuration.getTxFeeCap().isZero()
&& minTransactionGasPrice(transaction).compareTo(configuration.getTxFeeCap()) > 0) {
return ValidationResult.invalid(TransactionInvalidReason.TX_FEECAP_EXCEEDED); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.eth.transactions;
import static java.util.Collections.singletonList;
import static org.apache.logging.log4j.LogManager.getLogger;
import static org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason.CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.chain.BlockAddedEvent;
import org.hyperledger.besu.ethereum.chain.BlockAddedObserver;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.core.fees.BaseFee;
import org.hyperledger.besu.ethereum.core.fees.EIP1559;
import org.hyperledger.besu.ethereum.core.fees.TransactionPriceCalculator;
import org.hyperledger.besu.ethereum.eth.EthProtocol;
import org.hyperledger.besu.ethereum.eth.manager.EthContext;
import org.hyperledger.besu.ethereum.eth.manager.EthPeer;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.eth.transactions.PendingTransactions.TransactionAddedStatus;
import org.hyperledger.besu.ethereum.mainnet.MainnetTransactionValidator;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.TransactionValidationParams;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.transaction.TransactionInvalidReason;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.metrics.LabelledMetric;
import java.util.Collection;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import org.apache.logging.log4j.Logger;
/**
* Maintains the set of pending transactions received from JSON-RPC or other nodes. Transactions are
* removed automatically when they are included in a block on the canonical chain and re-added if a
* re-org removes them from the canonical chain again.
*
* <p>This class is safe for use across multiple threads.
*/
public class TransactionPool implements BlockAddedObserver {
private static final Logger LOG = getLogger();
private static final long SYNC_TOLERANCE = 100L;
private static final String REMOTE = "remote";
private static final String LOCAL = "local";
private final PendingTransactions pendingTransactions;
private final ProtocolSchedule protocolSchedule;
private final ProtocolContext protocolContext;
private final TransactionBatchAddedListener transactionBatchAddedListener;
private final Optional<TransactionBatchAddedListener> pendingTransactionBatchAddedListener;
private final SyncState syncState;
private final Wei minTransactionGasPrice;
private final LabelledMetric<Counter> duplicateTransactionCounter;
private final PeerTransactionTracker peerTransactionTracker;
private final Optional<PeerPendingTransactionTracker> maybePeerPendingTransactionTracker;
private final Optional<EIP1559> eip1559;
private final TransactionPriceCalculator frontierPriceCalculator =
TransactionPriceCalculator.frontier();
private final TransactionPriceCalculator eip1559PriceCalculator =
TransactionPriceCalculator.eip1559();
private final TransactionPoolConfiguration configuration;
public TransactionPool(
final PendingTransactions pendingTransactions,
final ProtocolSchedule protocolSchedule,
final ProtocolContext protocolContext,
final TransactionBatchAddedListener transactionBatchAddedListener,
final Optional<TransactionBatchAddedListener> pendingTransactionBatchAddedListener,
final SyncState syncState,
final EthContext ethContext,
final PeerTransactionTracker peerTransactionTracker,
final Optional<PeerPendingTransactionTracker> maybePeerPendingTransactionTracker,
final Wei minTransactionGasPrice,
final MetricsSystem metricsSystem,
final Optional<EIP1559> eip1559,
final TransactionPoolConfiguration configuration) {
this.pendingTransactions = pendingTransactions;
this.protocolSchedule = protocolSchedule;
this.protocolContext = protocolContext;
this.transactionBatchAddedListener = transactionBatchAddedListener;
this.pendingTransactionBatchAddedListener = pendingTransactionBatchAddedListener;
this.syncState = syncState;
this.peerTransactionTracker = peerTransactionTracker;
this.maybePeerPendingTransactionTracker = maybePeerPendingTransactionTracker;
this.minTransactionGasPrice = minTransactionGasPrice;
this.eip1559 = eip1559;
this.configuration = configuration;
duplicateTransactionCounter =
metricsSystem.createLabelledCounter(
BesuMetricCategory.TRANSACTION_POOL,
"transactions_duplicates_total",
"Total number of duplicate transactions received",
"source");
ethContext.getEthPeers().subscribeConnect(this::handleConnect);
}
void handleConnect(final EthPeer peer) {
pendingTransactions
.getLocalTransactions()
.forEach(transaction -> peerTransactionTracker.addToPeerSendQueue(peer, transaction));
maybePeerPendingTransactionTracker
.filter(
peerPendingTransactionTracker ->
peerPendingTransactionTracker.isPeerSupported(peer, EthProtocol.ETH65))
.ifPresent(
peerPendingTransactionTracker ->
pendingTransactions
.getNewPooledHashes()
.forEach(hash -> peerPendingTransactionTracker.addToPeerSendQueue(peer, hash)));
}
public boolean addTransactionHash(final Hash transactionHash) {
return pendingTransactions.addTransactionHash(transactionHash);
}
public ValidationResult<TransactionInvalidReason> addLocalTransaction(
final Transaction transaction) {
if (!configuration.getTxFeeCap().isZero()
&& minTransactionGasPrice(transaction).compareTo(configuration.getTxFeeCap()) > 0) {
return ValidationResult.invalid(TransactionInvalidReason.TX_FEECAP_EXCEEDED);
}
final ValidationResult<TransactionInvalidReason> validationResult =
validateTransaction(transaction);
if (validationResult.isValid()) {
final TransactionAddedStatus transactionAddedStatus =
pendingTransactions.addLocalTransaction(transaction);
if (!transactionAddedStatus.equals(TransactionAddedStatus.ADDED)) {
duplicateTransactionCounter.labels(LOCAL).inc();
return ValidationResult.invalid(transactionAddedStatus.getInvalidReason().orElseThrow());
}
final Collection<Transaction> txs = singletonList(transaction);
transactionBatchAddedListener.onTransactionsAdded(txs);
pendingTransactionBatchAddedListener.ifPresent(it -> it.onTransactionsAdded(txs));
}
return validationResult;
}
public void addRemoteTransactions(final Collection<Transaction> transactions) {
if (!syncState.isInSync(SYNC_TOLERANCE)) {
return;
}
final Set<Transaction> addedTransactions = new HashSet<>();
for (final Transaction transaction : transactions) {
pendingTransactions.tryEvictTransactionHash(transaction.getHash());
if (pendingTransactions.containsTransaction(transaction.getHash())) {
// We already have this transaction, don't even validate it.
duplicateTransactionCounter.labels(REMOTE).inc();
continue;
}
final Wei transactionGasPrice = minTransactionGasPrice(transaction);
if (transactionGasPrice.compareTo(minTransactionGasPrice) < 0) {
continue;
}
final ValidationResult<TransactionInvalidReason> validationResult =
validateTransaction(transaction);
if (validationResult.isValid()) {
final boolean added = pendingTransactions.addRemoteTransaction(transaction);
if (added) {
addedTransactions.add(transaction);
} else {
duplicateTransactionCounter.labels(REMOTE).inc();
}
} else {
LOG.trace(
"Validation failed ({}) for transaction {}. Discarding.",
validationResult.getInvalidReason(),
transaction);
}
}
if (!addedTransactions.isEmpty()) {
transactionBatchAddedListener.onTransactionsAdded(addedTransactions);
}
}
public long subscribePendingTransactions(final PendingTransactionListener listener) {
return pendingTransactions.subscribePendingTransactions(listener);
}
public void unsubscribePendingTransactions(final long id) {
pendingTransactions.unsubscribePendingTransactions(id);
}
public long subscribeDroppedTransactions(final PendingTransactionDroppedListener listener) {
return pendingTransactions.subscribeDroppedTransactions(listener);
}
public void unsubscribeDroppedTransactions(final long id) {
pendingTransactions.unsubscribeDroppedTransactions(id);
}
@Override
public void onBlockAdded(final BlockAddedEvent event) {
event.getAddedTransactions().forEach(pendingTransactions::transactionAddedToBlock);
addRemoteTransactions(event.getRemovedTransactions());
}
private MainnetTransactionValidator getTransactionValidator() {
return protocolSchedule
.getByBlockNumber(protocolContext.getBlockchain().getChainHeadBlockNumber())
.getTransactionValidator();
}
public PendingTransactions getPendingTransactions() {
return pendingTransactions;
}
private ValidationResult<TransactionInvalidReason> validateTransaction(
final Transaction transaction) {
final BlockHeader chainHeadBlockHeader = getChainHeadBlockHeader();
final ValidationResult<TransactionInvalidReason> basicValidationResult =
getTransactionValidator().validate(transaction, chainHeadBlockHeader.getBaseFee());
if (!basicValidationResult.isValid()) {
return basicValidationResult;
}
if (transaction.getGasLimit() > chainHeadBlockHeader.getGasLimit()) {
return ValidationResult.invalid(
TransactionInvalidReason.EXCEEDS_BLOCK_GAS_LIMIT,
String.format(
"Transaction gas limit of %s exceeds block gas limit of %s",
transaction.getGasLimit(), chainHeadBlockHeader.getGasLimit()));
}
return protocolContext
.getWorldStateArchive()
.get(chainHeadBlockHeader.getStateRoot(), chainHeadBlockHeader.getHash())
.map(
worldState -> {
final Account senderAccount = worldState.get(transaction.getSender());
return getTransactionValidator()
.validateForSender(
transaction, senderAccount, TransactionValidationParams.transactionPool());
})
.orElseGet(() -> ValidationResult.invalid(CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE));
}
public Optional<Transaction> getTransactionByHash(final Hash hash) {
return pendingTransactions.getTransactionByHash(hash);
}
private BlockHeader getChainHeadBlockHeader() {
final MutableBlockchain blockchain = protocolContext.getBlockchain();
return blockchain.getBlockHeader(blockchain.getChainHeadHash()).get();
}
public interface TransactionBatchAddedListener {
void onTransactionsAdded(Iterable<Transaction> transactions);
}
private Wei minTransactionGasPrice(final Transaction transaction) {
// EIP-1559 enablement guard block
if (!ExperimentalEIPs.eip1559Enabled || this.eip1559.isEmpty()) {
return frontierPriceCalculator.price(transaction, Optional.empty());
}
final BlockHeader chainHeadBlockHeader = getChainHeadBlockHeader();
// Compute transaction price using EIP-1559 rules if chain head is after fork
if (this.eip1559.get().isEIP1559(chainHeadBlockHeader.getNumber())) {
return BaseFee.minTransactionPriceInNextBlock(
transaction, eip1559PriceCalculator, chainHeadBlockHeader::getBaseFee);
} else { // Use frontier rules otherwise
return frontierPriceCalculator.price(transaction, Optional.empty());
}
}
}
| 1 | 24,735 | Such a large test impact for such a little 3 line change ;) | hyperledger-besu | java |
@@ -51,9 +51,9 @@ class Payment extends AbstractTranslatableEntity implements OrderableEntityInter
protected $vat;
/**
- * @var Collection
+ * @var \Shopsys\FrameworkBundle\Model\Transport\Transport[]|Collection
*
- * @ORM\ManyToMany(targetEntity="Shopsys\FrameworkBundle\Model\Transport\Transport")
+ * @ORM\ManyToMany(targetEntity="Shopsys\FrameworkBundle\Model\Transport\Transport", inversedBy="payments", cascade={"persist"})
* @ORM\JoinTable(name="payments_transports")
*/
protected $transports; | 1 | <?php
namespace Shopsys\FrameworkBundle\Model\Payment;
use Doctrine\Common\Collections\ArrayCollection;
use Doctrine\Common\Collections\Collection;
use Doctrine\ORM\Mapping as ORM;
use Gedmo\Mapping\Annotation as Gedmo;
use Prezent\Doctrine\Translatable\Annotation as Prezent;
use Shopsys\FrameworkBundle\Component\Gedmo\SortablePosition;
use Shopsys\FrameworkBundle\Component\Grid\Ordering\OrderableEntityInterface;
use Shopsys\FrameworkBundle\Model\Localization\AbstractTranslatableEntity;
use Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency;
use Shopsys\FrameworkBundle\Model\Transport\Transport;
/**
* @ORM\Table(name="payments")
* @ORM\Entity
*/
class Payment extends AbstractTranslatableEntity implements OrderableEntityInterface
{
/**
* @var int
*
* @ORM\Column(type="integer")
* @ORM\Id
* @ORM\GeneratedValue(strategy="IDENTITY")
*/
protected $id;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\PaymentTranslation[]
*
* @Prezent\Translations(targetEntity="Shopsys\FrameworkBundle\Model\Payment\PaymentTranslation")
*/
protected $translations;
/**
* @var \Shopsys\FrameworkBundle\Model\Payment\PaymentPrice[]
*
* @ORM\OneToMany(targetEntity="Shopsys\FrameworkBundle\Model\Payment\PaymentPrice", mappedBy="payment", cascade={"persist"})
*/
protected $prices;
/**
* @var \Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat
*
* @ORM\ManyToOne(targetEntity="Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat")
* @ORM\JoinColumn(nullable=false)
*/
protected $vat;
/**
* @var Collection
*
* @ORM\ManyToMany(targetEntity="Shopsys\FrameworkBundle\Model\Transport\Transport")
* @ORM\JoinTable(name="payments_transports")
*/
protected $transports;
/**
* @var bool
*
* @ORM\Column(type="boolean")
*/
protected $hidden;
/**
* @var bool
*
* @ORM\Column(type="boolean")
*/
protected $deleted;
/**
* @var int|null
*
* @Gedmo\SortablePosition
* @ORM\Column(type="integer", nullable=false)
*/
protected $position;
/**
* @var bool
*
* @ORM\Column(type="boolean")
*/
protected $czkRounding;
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\PaymentData $paymentData
*/
public function __construct(PaymentData $paymentData)
{
$this->translations = new ArrayCollection();
$this->vat = $paymentData->vat;
$this->transports = new ArrayCollection();
$this->hidden = $paymentData->hidden;
$this->deleted = false;
$this->setTranslations($paymentData);
$this->prices = new ArrayCollection();
$this->czkRounding = $paymentData->czkRounding;
$this->position = SortablePosition::LAST_POSITION;
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\Transport $transport
*/
public function addTransport(Transport $transport)
{
if (!$this->transports->contains($transport)) {
$this->transports->add($transport);
}
}
/**
* @param \Shopsys\FrameworkBundle\Model\Transport\Transport[] $transports
*/
public function setTransports(array $transports)
{
$this->transports->clear();
foreach ($transports as $transport) {
$this->addTransport($transport);
}
}
/**
* @return \Doctrine\Common\Collections\Collection
*/
public function getTransports()
{
return $this->transports;
}
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\PaymentData $paymentData
*/
protected function setTranslations(PaymentData $paymentData)
{
foreach ($paymentData->name as $locale => $name) {
$this->translation($locale)->setName($name);
}
foreach ($paymentData->description as $locale => $description) {
$this->translation($locale)->setDescription($description);
}
foreach ($paymentData->instructions as $locale => $instructions) {
$this->translation($locale)->setInstructions($instructions);
}
}
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\PaymentData $paymentData
*/
public function edit(PaymentData $paymentData)
{
$this->vat = $paymentData->vat;
$this->hidden = $paymentData->hidden;
$this->czkRounding = $paymentData->czkRounding;
$this->setTranslations($paymentData);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Payment\PaymentPriceFactoryInterface $paymentPriceFactory
* @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency $currency
* @param string $price
*/
public function setPrice(
PaymentPriceFactoryInterface $paymentPriceFactory,
Currency $currency,
$price
) {
foreach ($this->prices as $paymentInputPrice) {
if ($paymentInputPrice->getCurrency() === $currency) {
$paymentInputPrice->setPrice($price);
return;
}
}
$this->prices[] = $paymentPriceFactory->create($this, $currency, $price);
}
/**
* @return int
*/
public function getId()
{
return $this->id;
}
/**
* @param string|null $locale
* @return string
*/
public function getName($locale = null)
{
return $this->translation($locale)->getName();
}
/**
* @return \Shopsys\FrameworkBundle\Model\Payment\PaymentPrice[]
*/
public function getPrices()
{
return $this->prices;
}
/**
* @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\Currency $currency
* @return \Shopsys\FrameworkBundle\Model\Payment\PaymentPrice
*/
public function getPrice(Currency $currency)
{
foreach ($this->prices as $price) {
if ($price->getCurrency() === $currency) {
return $price;
}
}
$message = 'Payment price with currency ID ' . $currency->getId() . ' from payment with ID ' . $this->getId() . 'not found.';
throw new \Shopsys\FrameworkBundle\Model\Payment\Exception\PaymentPriceNotFoundException($message);
}
/**
* @return \Shopsys\FrameworkBundle\Model\Pricing\Vat\Vat
*/
public function getVat()
{
return $this->vat;
}
/**
* @param string|null $locale
* @return string|null
*/
public function getDescription($locale = null)
{
return $this->translation($locale)->getDescription();
}
/**
* @param string|null $locale
* @return string|null
*/
public function getInstructions($locale = null)
{
return $this->translation($locale)->getInstructions();
}
/**
* @return bool
*/
public function isHidden()
{
return $this->hidden;
}
/**
* @return bool
*/
public function isDeleted()
{
return $this->deleted;
}
public function markAsDeleted()
{
$this->deleted = true;
$this->transports->clear();
}
/**
* @return int|null
*/
public function getPosition()
{
return $this->position;
}
/**
* @param int $position
*/
public function setPosition($position)
{
$this->position = $position;
}
/**
* @return bool
*/
public function isCzkRounding()
{
return $this->czkRounding;
}
/**
* @return \Shopsys\FrameworkBundle\Model\Payment\PaymentTranslation
*/
protected function createTranslation()
{
return new PaymentTranslation();
}
}
| 1 | 10,111 | Is the `cascade` necessary? It should work also without the casdade, because Payments are always already persisted when use cases with them. If it doesn't work, please tell me the reason. It is as same in Transport. | shopsys-shopsys | php |
@@ -0,0 +1,11 @@
+<?php
+
+declare(strict_types=1);
+
+namespace Psalm\Issue;
+
+final class NonInvariantPropertyType extends CodeIssue
+{
+ public const ERROR_LEVEL = -1;
+ public const SHORTCODE = 235159;
+} | 1 | 1 | 9,941 | This is not very short. I'm not sure sure if there's a system for assigning shortcodes to issues. | vimeo-psalm | php |
|
@@ -20,6 +20,9 @@ const (
// the default configuration provided to ingress-annotation should be
// created.
tlsACMEAnnotation = "kubernetes.io/tls-acme"
+ // editInPlaceAnnotation is used to toggle the use of ingressClass instead
+ // of ingress on the created Certificate resource
+ editInPlaceAnnotation = "certmanager.k8s.io/edit-in-place"
// issuerNameAnnotation can be used to override the issuer specified on the
// created Certificate resource.
issuerNameAnnotation = "certmanager.k8s.io/issuer" | 1 | package controller
import (
"context"
"fmt"
"strconv"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1"
extv1beta1 "k8s.io/api/extensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
)
const (
// tlsACMEAnnotation is here for compatibility with kube-lego style
// ingress resources. When set to "true", a Certificate resource with
// the default configuration provided to ingress-annotation should be
// created.
tlsACMEAnnotation = "kubernetes.io/tls-acme"
// issuerNameAnnotation can be used to override the issuer specified on the
// created Certificate resource.
issuerNameAnnotation = "certmanager.k8s.io/issuer"
// clusterIssuerNameAnnotation can be used to override the issuer specified on the
// created Certificate resource. The Certificate will reference the
// specified *ClusterIssuer* instead of normal issuer.
clusterIssuerNameAnnotation = "certmanager.k8s.io/cluster-issuer"
// acmeIssuerChallengeTypeAnnotation can be used to override the default ACME challenge
// type to be used when the specified issuer is an ACME issuer
acmeIssuerChallengeTypeAnnotation = "certmanager.k8s.io/acme-challenge-type"
// acmeIssuerDNS01ProviderNameAnnotation can be used to override the default dns01 provider
// configured on the issuer if the challenge type is set to dns01
acmeIssuerDNS01ProviderNameAnnotation = "certmanager.k8s.io/acme-dns01-provider"
)
var ingressGVK = extv1beta1.SchemeGroupVersion.WithKind("Ingress")
func (c *Controller) Sync(ctx context.Context, ing *extv1beta1.Ingress) error {
if !shouldSync(ing) {
glog.Infof("Not syncing ingress %s/%s as it does not contain necessary annotations", ing.Namespace, ing.Name)
return nil
}
newCrts, updateCrts, err := c.buildCertificates(ing)
if err != nil {
return err
}
for _, crt := range newCrts {
_, err := c.CMClient.CertmanagerV1alpha1().Certificates(crt.Namespace).Create(crt)
if err != nil {
return err
}
c.Recorder.Eventf(ing, corev1.EventTypeNormal, "CreateCertificate", "Successfully created Certificate %q", crt.Name)
}
for _, crt := range updateCrts {
_, err := c.CMClient.CertmanagerV1alpha1().Certificates(crt.Namespace).Update(crt)
if err != nil {
return err
}
c.Recorder.Eventf(ing, corev1.EventTypeNormal, "UpdateCertificate", "Successfully updated Certificate %q", crt.Name)
}
return nil
}
func (c *Controller) buildCertificates(ing *extv1beta1.Ingress) (new, update []*v1alpha1.Certificate, _ error) {
issuerName, issuerKind := c.issuerForIngress(ing)
issuer, err := c.getGenericIssuer(ing.Namespace, issuerName, issuerKind)
if err != nil {
return nil, nil, err
}
var newCrts []*v1alpha1.Certificate
var updateCrts []*v1alpha1.Certificate
for i, tls := range ing.Spec.TLS {
// validate the ingress TLS block
if len(tls.Hosts) == 0 {
return nil, nil, fmt.Errorf("secret %q for ingress %q has no hosts specified", tls.SecretName, ing.Name)
}
if tls.SecretName == "" {
return nil, nil, fmt.Errorf("TLS entry %d for ingress %q must specify a secretName", i, ing.Name)
}
existingCrt, err := c.certificateLister.Certificates(ing.Namespace).Get(tls.SecretName)
if !apierrors.IsNotFound(err) && err != nil {
return nil, nil, err
}
crt := &v1alpha1.Certificate{
ObjectMeta: metav1.ObjectMeta{
Name: tls.SecretName,
Namespace: ing.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ing, ingressGVK)},
},
Spec: v1alpha1.CertificateSpec{
DNSNames: tls.Hosts,
SecretName: tls.SecretName,
IssuerRef: v1alpha1.ObjectReference{
Name: issuerName,
Kind: issuerKind,
},
},
}
err = c.setIssuerSpecificConfig(crt, issuer, ing, tls)
if err != nil {
return nil, nil, err
}
// check if a Certificate for this TLS entry already exists, and if it
// does then skip this entry
if existingCrt != nil {
glog.Infof("Certificate %q for ingress %q already exists", tls.SecretName, ing.Name)
if !certNeedsUpdate(existingCrt, crt) {
glog.Infof("Certificate %q for ingress %q is up to date", tls.SecretName, ing.Name)
continue
}
updateCrt := existingCrt.DeepCopy()
updateCrt.Spec.DNSNames = tls.Hosts
updateCrt.Spec.SecretName = tls.SecretName
updateCrt.Spec.IssuerRef.Name = issuerName
updateCrt.Spec.IssuerRef.Kind = issuerKind
updateCrts = append(updateCrts, updateCrt)
} else {
newCrts = append(newCrts, crt)
}
}
return newCrts, updateCrts, nil
}
// certNeedsUpdate checks and returns true if two Certificates are equal
func certNeedsUpdate(a, b *v1alpha1.Certificate) bool {
if a.Name != b.Name {
return true
}
if len(a.Spec.DNSNames) != len(b.Spec.DNSNames) {
return true
}
for i := range a.Spec.DNSNames {
if a.Spec.DNSNames[i] != b.Spec.DNSNames[i] {
return true
}
}
if a.Spec.SecretName != b.Spec.SecretName {
return true
}
if a.Spec.IssuerRef.Name != b.Spec.IssuerRef.Name {
return true
}
if a.Spec.IssuerRef.Kind != b.Spec.IssuerRef.Kind {
return true
}
return false
}
func (c *Controller) setIssuerSpecificConfig(crt *v1alpha1.Certificate, issuer v1alpha1.GenericIssuer, ing *extv1beta1.Ingress, tls extv1beta1.IngressTLS) error {
ingAnnotations := ing.Annotations
if ingAnnotations == nil {
ingAnnotations = map[string]string{}
}
// for ACME issuers
if issuer.GetSpec().ACME != nil {
challengeType, ok := ingAnnotations[acmeIssuerChallengeTypeAnnotation]
if !ok {
challengeType = c.options.DefaultACMEIssuerChallengeType
}
domainCfg := v1alpha1.ACMECertificateDomainConfig{
Domains: tls.Hosts,
}
switch challengeType {
case "http01":
domainCfg.HTTP01 = &v1alpha1.ACMECertificateHTTP01Config{Ingress: ing.Name}
case "dns01":
dnsProvider, ok := ingAnnotations[acmeIssuerDNS01ProviderNameAnnotation]
if !ok {
dnsProvider = c.options.DefaultACMEIssuerDNS01ProviderName
}
if dnsProvider == "" {
return fmt.Errorf("no acme issuer dns01 challenge provider specified")
}
domainCfg.DNS01 = &v1alpha1.ACMECertificateDNS01Config{Provider: dnsProvider}
default:
return fmt.Errorf("invalid acme issuer challenge type specified %q", challengeType)
}
crt.Spec.ACME = &v1alpha1.ACMECertificateConfig{Config: []v1alpha1.ACMECertificateDomainConfig{domainCfg}}
}
return nil
}
// shouldSync returns true if this ingress should have a Certificate resource
// created for it
func shouldSync(ing *extv1beta1.Ingress) bool {
annotations := ing.Annotations
if annotations == nil {
annotations = map[string]string{}
}
if _, ok := annotations[issuerNameAnnotation]; ok {
return true
}
if _, ok := annotations[clusterIssuerNameAnnotation]; ok {
return true
}
if s, ok := annotations[tlsACMEAnnotation]; ok {
if b, _ := strconv.ParseBool(s); b {
return true
}
}
if _, ok := annotations[acmeIssuerChallengeTypeAnnotation]; ok {
return true
}
if _, ok := annotations[acmeIssuerDNS01ProviderNameAnnotation]; ok {
return true
}
return false
}
// issuerForIngress will determine the issuer that should be specified on a
// Certificate created for the given Ingress resource. If one is not set, the
// default issuer given to the controller will be used.
func (c *Controller) issuerForIngress(ing *extv1beta1.Ingress) (name string, kind string) {
name = c.options.DefaultIssuerName
kind = c.options.DefaultIssuerKind
annotations := ing.Annotations
if annotations == nil {
annotations = map[string]string{}
}
if issuerName, ok := annotations[issuerNameAnnotation]; ok {
name = issuerName
kind = v1alpha1.IssuerKind
}
if issuerName, ok := annotations[clusterIssuerNameAnnotation]; ok {
name = issuerName
kind = v1alpha1.ClusterIssuerKind
}
return name, kind
}
func (c *Controller) getGenericIssuer(namespace, name, kind string) (v1alpha1.GenericIssuer, error) {
switch kind {
case v1alpha1.IssuerKind:
return c.issuerLister.Issuers(namespace).Get(name)
case v1alpha1.ClusterIssuerKind:
if c.clusterIssuerLister == nil {
return nil, fmt.Errorf("cannot get ClusterIssuer for %q as ingress-shim is scoped to a single namespace", name)
}
return c.clusterIssuerLister.Get(name)
default:
return nil, fmt.Errorf(`invalid value %q for issuer kind. Must be empty, %q or %q`, kind, v1alpha1.IssuerKind, v1alpha1.ClusterIssuerKind)
}
}
| 1 | 12,156 | We should probably prefix this with `acme-http01` to bring it in-line with other annotations, e.g. `certmanager.k8s.io/acme-http01-edit-in-place`. It's quite wordy, but it *is* more specific (and I'd hope more obvious to the user what it is for?) | jetstack-cert-manager | go |
@@ -665,7 +665,7 @@ public class JobTypeManager {
/**
+ * Get the keystore load props
+ */
- public Props getCommonPluginLoadProps() {
+ public Props getCommonPluginLoadProps() {
return this.cachedCommonPluginLoadProps;
}
| 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.jobtype;
import azkaban.Constants;
import azkaban.Constants.PluginManager;
import azkaban.cluster.Cluster;
import azkaban.cluster.ClusterRouter;
import azkaban.cluster.DisabledClusterRouter;
import azkaban.flow.CommonJobProperties;
import azkaban.jobExecutor.JavaProcessJob;
import azkaban.jobExecutor.Job;
import azkaban.jobExecutor.JobClassLoader;
import azkaban.jobExecutor.NoopJob;
import azkaban.jobExecutor.ProcessJob;
import azkaban.jobExecutor.utils.JobExecutionException;
import azkaban.utils.Props;
import azkaban.utils.PropsUtils;
import azkaban.utils.Utils;
import com.google.common.annotations.VisibleForTesting;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
public class JobTypeManager {
private static final Logger LOGGER = Logger.getLogger(JobTypeManager.class);
private static final String[] NON_OVERRIDABLE_PROPS =
{ CommonJobProperties.TARGET_CLUSTER_CLASSPATH,
CommonJobProperties.TARGET_CLUSTER_NATIVE_LIB,
"env.HADOOP_HOME", "env.HADOOP_COMMON_HOME", "env.HADOOP_YARN_HOME", "env.HADOOP_HDFS_HOME",
"env.HADOOP_MAPRED_HOME", "env.HADOOP_CONF_DIR", "env.YARN_CONF_DIR" };
private final String jobTypePluginDir; // the dir for jobtype plugins
private final ClassLoader parentLoader;
private final Props globalProperties;
private final ClusterRouter clusterRouter;
private JobTypePluginSet pluginSet;
// Only used to load keyStore.
private Props cachedCommonPluginLoadProps;
// Overridable plugin load properties
private final String pluginLoadOverrideProps;
@VisibleForTesting
public JobTypeManager(final String jobtypePluginDir, final Props globalProperties,
final ClassLoader parentClassLoader) {
this(jobtypePluginDir, globalProperties, parentClassLoader, new DisabledClusterRouter(), null);
}
public JobTypeManager(final String jobtypePluginDir, final Props globalProperties,
final ClassLoader parentClassLoader, final ClusterRouter clusterRouter,
final String pluginLoadOverrideProps ) {
this.jobTypePluginDir = jobtypePluginDir;
this.parentLoader = parentClassLoader;
this.globalProperties = globalProperties;
this.clusterRouter = clusterRouter;
this.pluginLoadOverrideProps = pluginLoadOverrideProps;
loadPlugins();
}
public void loadPlugins() throws JobTypeManagerException {
final JobTypePluginSet plugins = new JobTypePluginSet();
loadDefaultTypes(plugins);
if (this.jobTypePluginDir != null) {
final File pluginDir = new File(this.jobTypePluginDir);
if (pluginDir.exists()) {
LOGGER.info("Job type plugin directory set. Loading extra job types from " + pluginDir);
try {
loadPluginJobTypes(plugins);
} catch (final Exception e) {
LOGGER.info("Plugin jobtypes failed to load. " + e.getCause(), e);
throw new JobTypeManagerException(e);
}
}
}
// Swap the plugin set. If exception is thrown, then plugin isn't swapped.
synchronized (this) {
this.pluginSet = plugins;
}
}
private void loadDefaultTypes(final JobTypePluginSet plugins)
throws JobTypeManagerException {
LOGGER.info("Loading plugin default job types");
plugins.addPluginClassName("command", ProcessJob.class.getName());
plugins.addPluginClassName("javaprocess", JavaProcessJob.class.getName());
plugins.addPluginClassName("noop", NoopJob.class.getName());
}
// load Job Types from jobtype plugin dir
private void loadPluginJobTypes(final JobTypePluginSet plugins)
throws JobTypeManagerException {
final File jobPluginsDir = new File(this.jobTypePluginDir);
if (!jobPluginsDir.exists()) {
LOGGER.error("Job type plugin dir " + this.jobTypePluginDir
+ " doesn't exist. Will not load any external plugins.");
return;
} else if (!jobPluginsDir.isDirectory()) {
throw new JobTypeManagerException("Job type plugin dir "
+ this.jobTypePluginDir + " is not a directory!");
} else if (!jobPluginsDir.canRead()) {
throw new JobTypeManagerException("Job type plugin dir "
+ this.jobTypePluginDir + " is not readable!");
}
// Load the common properties used by all jobs that are run
Props commonPluginJobProps = null;
final File commonJobPropsFile = new File(jobPluginsDir, Constants.PluginManager.COMMONCONFFILE);
if (commonJobPropsFile.exists()) {
LOGGER.info("Common plugin job props file " + commonJobPropsFile
+ " found. Attempt to load.");
try {
commonPluginJobProps = new Props(this.globalProperties, commonJobPropsFile);
} catch (final IOException e) {
throw new JobTypeManagerException(
"Failed to load common plugin job properties" + e.getCause());
}
} else {
LOGGER.info("Common plugin job props file " + commonJobPropsFile
+ " not found. Using only globals props");
commonPluginJobProps = new Props(this.globalProperties);
}
// Loads the common properties used by all plugins when loading
Props commonPluginLoadProps = null;
final File commonLoadPropsFile = new File(jobPluginsDir, Constants.PluginManager.COMMONSYSCONFFILE);
if (commonLoadPropsFile.exists()) {
LOGGER.info("Common plugin load props file " + commonLoadPropsFile
+ " found. Attempt to load.");
try {
commonPluginLoadProps = new Props(null, commonLoadPropsFile);
this.cachedCommonPluginLoadProps = commonPluginLoadProps;
} catch (final IOException e) {
throw new JobTypeManagerException(
"Failed to load common plugin loader properties" + e.getCause());
}
} else {
LOGGER.info("Common plugin load props file " + commonLoadPropsFile
+ " not found. Using empty props.");
commonPluginLoadProps = new Props();
}
plugins.setCommonPluginJobProps(commonPluginJobProps);
plugins.setCommonPluginLoadProps(commonPluginLoadProps);
if (commonPluginLoadProps
.containsKey(Constants.PluginManager.DEFAULT_PROXY_USERS_JOBTYPE_CLASSES)) {
plugins.addDefaultProxyUsersJobTypeClasses(commonPluginLoadProps
.getStringList(Constants.PluginManager.DEFAULT_PROXY_USERS_JOBTYPE_CLASSES));
}
if (commonPluginLoadProps.containsKey(PluginManager.DEFAULT_PROXY_USERS_FILTER)) {
plugins.addDefaultProxyUsersFilter(
commonPluginLoadProps.getStringList(PluginManager.DEFAULT_PROXY_USERS_FILTER));
}
// Loads the default-proxy-users mappings for all job types.
final File defaultProxyUsersFile = new File(jobPluginsDir,
Constants.PluginManager.DEFAULT_PROXY_USERS_FILE);
if (!defaultProxyUsersFile.exists()) {
LOGGER.info("Default proxy users file " + defaultProxyUsersFile
+ " not found.");
} else {
LOGGER.info("Default proxy users file " + defaultProxyUsersFile
+ " found. Attempting to load.");
try {
final Props defaultProxyUsers = new Props(null, defaultProxyUsersFile);
for (String jobType : defaultProxyUsers.getKeySet()) {
plugins.addDefaultProxyUser(jobType, defaultProxyUsers.getString(jobType,
StringUtils.EMPTY));
}
} catch (final IOException e) {
throw new JobTypeManagerException(
"Failed to load common plugin loader properties" + e.getCause());
}
}
// Loading job types
for (final File dir : jobPluginsDir.listFiles()) {
if (dir.isDirectory() && dir.canRead()) {
try {
loadJobTypes(dir, plugins);
} catch (final Exception e) {
LOGGER.error("Failed to load jobtype " + dir.getName() + e.getMessage(), e);
throw new JobTypeManagerException(e);
}
}
}
}
private void loadJobTypes(final File pluginDir, final JobTypePluginSet plugins)
throws JobTypeManagerException {
// Directory is the jobtypeName
final String jobTypeName = pluginDir.getName();
LOGGER.info("Loading plugin " + jobTypeName);
Props pluginJobProps = null;
Props pluginLoadProps = null;
Props pluginPrivateProps = null;
final File pluginJobPropsFile = new File(pluginDir, Constants.PluginManager.CONFFILE);
final File pluginLoadPropsFile = new File(pluginDir, Constants.PluginManager.SYSCONFFILE);
if (!pluginLoadPropsFile.exists()) {
LOGGER.info("Plugin load props file " + pluginLoadPropsFile + " not found.");
return;
}
try {
final Props commonPluginJobProps = plugins.getCommonPluginJobProps();
final Props commonPluginLoadProps = plugins.getCommonPluginLoadProps();
if (pluginJobPropsFile.exists()) {
pluginJobProps = new Props(commonPluginJobProps, pluginJobPropsFile);
} else {
pluginJobProps = new Props(commonPluginJobProps);
}
// Set the private props.
pluginPrivateProps = new Props(null, pluginLoadPropsFile);
pluginPrivateProps.put("plugin.dir", pluginDir.getAbsolutePath());
plugins.addPluginPrivateProps(jobTypeName, pluginPrivateProps);
pluginLoadProps = new Props(commonPluginLoadProps, pluginPrivateProps);
// Adding "plugin.dir" to allow plugin.properties file could read this property. Also, user
// code could leverage this property as well.
pluginJobProps.put("plugin.dir", pluginDir.getAbsolutePath());
pluginLoadProps = PropsUtils.resolveProps(pluginLoadProps);
} catch (final Exception e) {
LOGGER.error("pluginLoadProps to help with debugging: " + pluginLoadProps);
throw new JobTypeManagerException("Failed to get jobtype properties"
+ e.getMessage(), e);
}
// Add properties into the plugin set
plugins.addPluginLoadProps(jobTypeName, pluginLoadProps);
if (pluginJobProps != null) {
plugins.addPluginJobProps(jobTypeName, pluginJobProps);
}
final URL[] urls = loadJobTypeClassLoaderURLs(pluginDir, jobTypeName, plugins);
final ClassLoader jobTypeLoader = new URLClassLoader(urls, parentLoader);
final String jobtypeClass = pluginLoadProps.get("jobtype.class");
if (jobtypeClass == null) {
throw new JobTypeManagerException("Failed to get jobtype property: jobtype.class");
}
// load an instance of JobPropsProcessor configured for this jobtype plugin,
// the JobPropsProcessor instance will be called for each job before it starts to run
final String jobPropsProcessorClass = pluginLoadProps.get("jobtype.job.props.processor.class");
if (jobPropsProcessorClass != null && !jobPropsProcessorClass.isEmpty()) {
Class<? extends JobPropsProcessor> processorClazz;
try {
processorClazz = (Class<? extends JobPropsProcessor>) jobTypeLoader.loadClass(jobPropsProcessorClass);
final JobPropsProcessor jobPropsProcessor = (JobPropsProcessor)
Utils.callConstructor(processorClazz, pluginLoadProps);
plugins.addPluginJobPropsProcessor(jobTypeName, jobPropsProcessor);
} catch (final ClassNotFoundException e) {
throw new JobTypeManagerException(e);
}
}
plugins.addPluginClassName(jobTypeName, jobtypeClass);
plugins.addPluginClassLoaderURLs(jobTypeName, urls);
LOGGER.info("Loaded jobtype " + jobTypeName + " " + jobtypeClass);
}
/**
* Creates and loads all plugin resources (jars) into a ClassLoader
*/
private URL[] loadJobTypeClassLoaderURLs(final File pluginDir,
final String jobTypeName, final JobTypePluginSet plugins) {
// sysconf says what jars/confs to load
final List<URL> resources = new ArrayList<>();
final Props pluginLoadProps = plugins.getPluginLoaderProps(jobTypeName);
try {
// first global classpath
LOGGER.info("Adding global resources for " + jobTypeName);
final List<String> typeGlobalClassPath =
pluginLoadProps.getStringList("jobtype.global.classpath", null, ",");
if (typeGlobalClassPath != null) {
for (final String jar : typeGlobalClassPath) {
final URL cpItem = new File(jar).toURI().toURL();
if (!resources.contains(cpItem)) {
LOGGER.info("adding to classpath " + cpItem);
resources.add(cpItem);
}
}
}
// type specific classpath
LOGGER.info("Adding type resources.");
final List<String> typeClassPath =
pluginLoadProps.getStringList("jobtype.classpath", null, ",");
if (typeClassPath != null) {
for (final String jar : typeClassPath) {
final URL cpItem = new File(jar).toURI().toURL();
if (!resources.contains(cpItem)) {
LOGGER.info("adding to classpath " + cpItem);
resources.add(cpItem);
}
}
}
final List<String> jobtypeLibDirs =
pluginLoadProps.getStringList("jobtype.lib.dir", null, ",");
if (jobtypeLibDirs != null) {
for (final String libDir : jobtypeLibDirs) {
for (final File f : new File(libDir).listFiles()) {
if (f.getName().endsWith(".jar")) {
resources.add(f.toURI().toURL());
LOGGER.info("adding to classpath " + f.toURI().toURL());
}
}
}
}
LOGGER.info("Adding type override resources.");
for (final File f : pluginDir.listFiles()) {
if (f.getName().endsWith(".jar")) {
resources.add(f.toURI().toURL());
LOGGER.info("adding to classpath " + f.toURI().toURL());
}
}
} catch (final MalformedURLException e) {
throw new JobTypeManagerException(e);
}
// each job type can have a different class loader
LOGGER.info(String.format("Classpath for plugin[dir: %s, JobType: %s]: %s", pluginDir, jobTypeName,
resources));
return resources.toArray(new URL[resources.size()]);
}
@VisibleForTesting
public Job buildJobExecutor(final String jobId, Props jobProps, final Logger logger)
throws JobTypeManagerException {
final JobParams jobParams = createJobParams(jobId, jobProps, logger);
return createJob(jobId, jobParams, logger);
}
/**
* Create job parameters that can be used to create a job instance.
* @param jobId job id
* @param jobProps job properties
* @param logger logger
* @return job parameters that can be used to create a job instance
*/
public JobParams createJobParams(final String jobId, Props jobProps, final Logger logger) {
// This is final because during build phase, you should never need to swap
// the pluginSet for safety reasons
final JobTypePluginSet pluginSet = getJobTypePluginSet();
try {
final Optional<String> jobTypeOptional = getJobType(jobProps);
if (!jobTypeOptional.isPresent()) {
throw new JobExecutionException(String.format(
"The 'type' parameter for job[%s] is missing or null or empty", jobProps));
}
final String jobType = jobTypeOptional.get();
logger.info("Building " + jobType + " job executor. ");
jobProps = getJobProps(jobProps, pluginSet, jobType);
final Props pluginLoadProps = getPluginLoadProps(pluginSet, jobType);
final List<URL> jobClassLoaderUrls = new ArrayList<>();
// collect jobtype declared dependencies for the job's classloader
final URL[] jobTypeURLs = pluginSet.getPluginClassLoaderURLs(jobType);
jobClassLoaderUrls.addAll(Arrays.asList(jobTypeURLs));
// collect cluster-specific dependencies for the job's classloader
Cluster targetCluster = null;
final Collection<String> components = getClusterComponents(jobProps,
pluginSet.getPluginLoaderProps(jobType), false);
ClassLoader jobContextClassLoader = this.parentLoader;
if (!components.isEmpty()) {
targetCluster = this.clusterRouter.getCluster(jobId, jobProps, logger,
components);
if (targetCluster != null && !Cluster.UNKNOWN.equals(targetCluster)) {
jobContextClassLoader = targetCluster.getSecurityManagerClassLoader();
jobProps.put(CommonJobProperties.TARGET_CLUSTER_ID, targetCluster.clusterId);
}
}
logger.info(String.format("JobClassLoader URLs: %s", jobClassLoaderUrls.stream()
.map(URL::toString).collect(Collectors.joining(", "))));
final ClassLoader jobClassLoader = new JobClassLoader(
jobClassLoaderUrls.toArray(new URL[jobClassLoaderUrls.size()]),
jobContextClassLoader, jobId);
// load the jobtype from JobClassLoader
final String jobTypeClassName = pluginSet.getPluginClassName(jobType);
final Class<? extends Object> jobTypeClass = jobClassLoader.loadClass(jobTypeClassName);
if (jobTypeClass == null) {
throw new JobExecutionException(String.format("Job type [%s] "
+ "is unrecognized. Could not construct job [%s] of type [%s].",
jobType, jobId, jobType));
}
// inject cluster jars and native libraries into jobs through properties
Props clusterSpecificProps = getClusterSpecificJobProps(targetCluster, jobProps, pluginLoadProps);
for (final String key : clusterSpecificProps.getKeySet()) {
// User's job props should take precedence over cluster props
if (!jobProps.containsKey(key)) {
jobProps.put(key, clusterSpecificProps.get(key));
}
}
// Override any plugin load props if specified.
// Make a clone of pluginLoadProps to ensure the original object is not corrupted.
// Use the cloned object from here on.
final Props pluginLoadPropsCopy = Props.clone(pluginLoadProps);
if (pluginLoadOverrideProps != null) {
final String[] propsList = pluginLoadOverrideProps.split(",");
for (final String prop : propsList) {
final String value = clusterSpecificProps.getString(prop, null);
if (value == null) {
// The property must be present in cluster specific props
logger.warn(String.format("Expected override property %s is not "
+ " present in ClusterSpecific Properties, ignoring it.", prop));
continue;
}
pluginLoadPropsCopy.put(prop, value);
}
}
Props nonOverriddableClusterProps = getClusterSpecificNonOverridableJobProps(clusterSpecificProps);
// CAUTION: ADD ROUTER-SPECIFIC PROPERTIES THAT ARE CRITICAL FOR JOB EXECUTION AS THE LAST
// STEP TO STOP THEM FROM BEING ACCIDENTALLY OVERRIDDEN BY JOB PROPERTIES
jobProps.putAll(nonOverriddableClusterProps);
jobProps = PropsUtils.resolveProps(jobProps);
return new JobParams(jobTypeClass, jobProps, pluginSet.getPluginPrivateProps(jobType),
pluginLoadPropsCopy, jobContextClassLoader);
} catch (final Exception e) {
logger.error("Failed to build job executor for job " + jobId
+ e.getMessage());
throw new JobTypeManagerException("Failed to build job executor for job "
+ jobId, e);
} catch (final Throwable t) {
logger.error(
"Failed to build job executor for job " + jobId + t.getMessage(), t);
throw new JobTypeManagerException("Failed to build job executor for job "
+ jobId, t);
}
}
/**
* @param jobProps Properties for an Azkaban Job
* @return The {@link Optional} jobType for the Azkaban Job.
*/
public static Optional<String> getJobType(Props jobProps) {
final String jobType = jobProps.getString("type", StringUtils.EMPTY);
return StringUtils.isNotBlank(jobType) ? Optional.of(jobType) : Optional.empty();
}
private static Props getClusterSpecificNonOverridableJobProps(final Props clusterSpecificJobProp) {
final Props props = new Props();
for (String prop : NON_OVERRIDABLE_PROPS) {
final String value = clusterSpecificJobProp.get(prop);
if (value != null) {
props.put(prop, value);
}
}
return props;
}
private static Props getJobProps(Props jobProps, JobTypePluginSet pluginSet, String jobType) {
Props pluginJobProps = pluginSet.getPluginJobProps(jobType);
// For default jobtypes, even though they don't have pluginJobProps configured,
// they still need to load properties from common.properties file if it's present
// because common.properties file is global to all jobtypes.
if (pluginJobProps == null) {
pluginJobProps = pluginSet.getCommonPluginJobProps();
}
if (pluginJobProps != null) {
for (final String k : pluginJobProps.getKeySet()) {
if (!jobProps.containsKey(k)) {
jobProps.put(k, pluginJobProps.get(k));
}
}
}
final JobPropsProcessor propsProcessor = pluginSet.getPluginJobPropsProcessor(jobType);
if (propsProcessor != null) {
jobProps = propsProcessor.process(jobProps);
}
return jobProps;
}
static Props getPluginLoadProps(JobTypePluginSet pluginSet, String jobType) {
Props pluginLoadProps = pluginSet.getPluginLoaderProps(jobType);
if (pluginLoadProps != null) {
pluginLoadProps = PropsUtils.resolveProps(pluginLoadProps);
} else {
// pluginSet.getCommonPluginLoadProps() will return null if there is no plugins directory.
// hence assigning default Props() if that's the case
pluginLoadProps = pluginSet.getCommonPluginLoadProps();
if (pluginLoadProps == null) {
pluginLoadProps = new Props();
}
}
return pluginLoadProps;
}
/**
* Create an instance of Job with the given parameters, job id and job logger.
*/
public static Job createJob(final String jobId, final JobParams jobParams, final Logger logger) {
try {
return
(Job) Utils.callConstructor(jobParams.jobClass, jobId, jobParams.pluginLoadProps,
jobParams.jobProps, jobParams.pluginPrivateProps, logger);
} catch (final Throwable e) {
final String message = "Ctor with private properties %s, will try one without. e = ";
if (e instanceof IllegalStateException && e.getCause() instanceof NoSuchMethodException) {
// expected, message quietly, don't confuse users
logger.debug(String.format(message, "not defined") + e.getMessage());
} else {
// unexpected, message loudly
logger.warn(String.format(message, "failed"), e);
}
}
try {
return
(Job) Utils.callConstructor(jobParams.jobClass, jobId, jobParams.pluginLoadProps,
jobParams.jobProps, logger);
} catch (final Throwable e) {
final String message = String.format("Failed to build job: %s", jobId);
logger.error(message, e);
throw new JobTypeManagerException(message, e);
}
}
public static final class JobParams {
public final Class<? extends Object> jobClass;
public final ClassLoader contextClassLoader;
public final Props jobProps;
public final Props pluginLoadProps;
public final Props pluginPrivateProps;
public JobParams(final Class<? extends Object> jobClass, final Props jobProps,
final Props pluginPrivateProps, final Props pluginLoadProps,
final ClassLoader contextClassLoader) {
this.jobClass = jobClass;
this.contextClassLoader = contextClassLoader;
this.jobProps = jobProps;
this.pluginLoadProps = pluginLoadProps;
this.pluginPrivateProps = pluginPrivateProps;
}
}
/**
* Public for test reasons. Will need to move tests to the same package
*/
public synchronized JobTypePluginSet getJobTypePluginSet() {
return this.pluginSet;
}
/**
* Expose cluster-specific libraries and native libraries through job properties.
* if a router is configured, construct the properties based on cluster.properties
* otherwise, the cluster is implicitly configured, the properties will be based
* on plugins' private properties.
*/
private Props getClusterSpecificJobProps(final Cluster cluster, final Props jobProps,
final Props pluginProps) {
final Props clusterProps = new Props();
Props sourceProps;
if (cluster != null && !Cluster.UNKNOWN.equals(cluster)){
sourceProps = cluster.getProperties();
clusterProps.putAll(sourceProps);
} else {
// fall back to the existing mechanism if no cluster is found/configured
sourceProps = pluginProps;
}
final Collection<String> components = getClusterComponents(jobProps, pluginProps, true);
final String javaLibPath = Cluster.getJavaLibraryPath(sourceProps, components);
if (javaLibPath != null && !javaLibPath.isEmpty()) {
clusterProps.put(CommonJobProperties.TARGET_CLUSTER_CLASSPATH, javaLibPath);
}
final String nativeLibPath = Cluster.getNativeLibraryPath(sourceProps, components);
if (nativeLibPath != null && !nativeLibPath.isEmpty()) {
clusterProps.put(CommonJobProperties.TARGET_CLUSTER_NATIVE_LIB, nativeLibPath);
}
final String hadoopSecurityManagerClass =
sourceProps.get(Cluster.HADOOP_SECURITY_MANAGER_CLASS_PROP);
if (hadoopSecurityManagerClass != null) {
clusterProps.put(
Cluster.HADOOP_SECURITY_MANAGER_CLASS_PROP, hadoopSecurityManagerClass);
}
return clusterProps;
}
/**
* Get the components within a cluster that a job depends on. Note if
* jobtype.dependency.components.excluded is set to true, the libraries
* of the specified components are not exposed to the JVM process that
* a job may spawn. This is to address jar conflict between
* HadoopSecurityManager dependencies (hive) and those of individual jobs
*/
private static Collection<String> getClusterComponents(final Props jobProps,
final Props pluginProps, final boolean exclusionEnabled) {
// use ordered set to maintain the classpath order as much as possible
final Set<String> components = new LinkedHashSet<>();
if (jobProps != null) {
final List<String> jobTypeComponents = jobProps.getStringList(
CommonJobProperties.JOB_CLUSTER_COMPONENTS_DEPENDENCIES, Collections.emptyList(), ",");
components.addAll(jobTypeComponents);
}
if (pluginProps != null) {
final List<String> jobtypeComponents = pluginProps.getStringList(
CommonJobProperties.JOBTYPE_CLUSTER_COMPONENTS_DEPENDENCIES, Collections.emptyList(), ",");
components.addAll(jobtypeComponents);
}
if (exclusionEnabled && pluginProps != null) {
final List<String> excludedComponentsFromJobProcess = pluginProps.getStringList(
CommonJobProperties.JOBTYPE_CLUSTER_COMPONENTS_DEPENDENCIES_EXCLUDED, Collections.emptyList(), ",");
components.removeAll(excludedComponentsFromJobProcess);
}
return components;
}
/**
+ * Get the keystore load props
+ */
public Props getCommonPluginLoadProps() {
return this.cachedCommonPluginLoadProps;
}
}
| 1 | 22,684 | No need to add this file change for this PR. Please remove it. | azkaban-azkaban | java |
@@ -8,6 +8,7 @@ import net.sourceforge.pmd.lang.java.ast.ASTImportDeclaration;
import net.sourceforge.pmd.lang.java.rule.AbstractJavaRule;
public class DontImportJavaLangRule extends AbstractJavaRule {
+ private static final String IMPORT_JAVA_LANG = "java.lang.";
@Override
public Object visit(ASTImportDeclaration node, Object data) { | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.rule.imports;
import net.sourceforge.pmd.lang.java.ast.ASTImportDeclaration;
import net.sourceforge.pmd.lang.java.rule.AbstractJavaRule;
public class DontImportJavaLangRule extends AbstractJavaRule {
@Override
public Object visit(ASTImportDeclaration node, Object data) {
if (node.isStatic()) {
return data;
}
String img = node.jjtGetChild(0).getImage();
if (img.startsWith("java.lang")) {
if (img.startsWith("java.lang.ref") || img.startsWith("java.lang.reflect")
|| img.startsWith("java.lang.annotation") || img.startsWith("java.lang.instrument")
|| img.startsWith("java.lang.management") || img.startsWith("java.lang.Thread.")
|| img.startsWith("java.lang.ProcessBuilder.")) {
return data;
}
addViolation(data, node);
}
return data;
}
}
| 1 | 12,055 | this extra dot at the end is causing a couple tests to fail on Travis | pmd-pmd | java |
@@ -853,7 +853,11 @@ Arguments:
@conf.commands.register
def tshark(*args,**kargs):
- """Sniff packets and print them calling pkt.show(), a bit like text wireshark"""
- sniff(prn=lambda x: x.display(),*args,**kargs)
-
-
+ """Sniff packets and print them calling pkt.summary(), a bit like text wireshark"""
+ print("Capturing on '" + str(kargs.get('iface') if 'iface' in kargs else conf.iface) + "'")
+ i = [0] # This should be a nonlocal variable, using a mutable object for Python 2 compatibility
+ def _cb(pkt):
+ print("%5d\t%s" % (i[0], pkt.summary()))
+ i[0] += 1
+ sniff(prn=_cb, store=False, *args, **kargs)
+ print("\n%d packet%s captured" % (i[0], 's' if i[0] > 1 else '')) | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Functions to send and receive packets.
"""
from __future__ import absolute_import, print_function
import errno
import itertools
import threading
import os
from select import select, error as select_error
import subprocess
import time
from scapy.consts import DARWIN, FREEBSD, OPENBSD, WINDOWS
from scapy.data import ETH_P_ALL, MTU
from scapy.config import conf
from scapy.packet import Gen
from scapy.utils import get_temp_file, PcapReader, tcpdump, wrpcap
from scapy import plist
from scapy.error import log_runtime, log_interactive
from scapy.base_classes import SetGen
from scapy.supersocket import StreamSocket, L3RawSocket, L2ListenTcpdump
from scapy.modules import six
from scapy.modules.six.moves import map
if conf.route is None:
# unused import, only to initialize conf.route
import scapy.route
from scapy.supersocket import SuperSocket
#################
## Debug class ##
#################
class debug:
recv=[]
sent=[]
match=[]
####################
## Send / Receive ##
####################
def _sndrcv_snd(pks, timeout, inter, verbose, tobesent, stopevent):
"""Function used in the sending thread of sndrcv()"""
try:
i = 0
if verbose:
print("Begin emission:")
for p in tobesent:
pks.send(p)
i += 1
time.sleep(inter)
if verbose:
print("Finished to send %i packets." % i)
except SystemExit:
pass
except KeyboardInterrupt:
pass
except:
log_runtime.info("--- Error sending packets", exc_info=True)
if timeout is not None:
stopevent.wait(timeout)
stopevent.set()
class _BreakException(Exception):
"""A dummy exception used in _get_pkt() to get out of the infinite
loop
"""
pass
def _sndrcv_rcv(pks, tobesent, stopevent, nbrecv, notans, verbose, chainCC,
multi):
"""Function used to recieve packets and check their hashret"""
ans = []
hsent = {}
for i in tobesent:
h = i.hashret()
hsent.setdefault(i.hashret(), []).append(i)
if WINDOWS:
def _get_pkt():
return pks.recv(MTU)
elif conf.use_bpf:
from scapy.arch.bpf.supersocket import bpf_select
def _get_pkt():
if bpf_select([pks]):
return pks.recv()
elif (conf.use_pcap and not isinstance(pks, (StreamSocket, L3RawSocket, L2ListenTcpdump))) or \
(not isinstance(pks, (StreamSocket, L2ListenTcpdump)) and (DARWIN or FREEBSD or OPENBSD)):
def _get_pkt():
res = pks.nonblock_recv()
if res is None:
time.sleep(0.05)
return res
else:
def _get_pkt():
try:
inp, _, _ = select([pks], [], [], 0.05)
except (IOError, select_error) as exc:
# select.error has no .errno attribute
if exc.args[0] != errno.EINTR:
raise
else:
if inp:
return pks.recv(MTU)
if stopevent.is_set():
raise _BreakException()
try:
try:
while True:
r = _get_pkt()
if r is None:
if stopevent.is_set():
break
continue
ok = False
h = r.hashret()
if h in hsent:
hlst = hsent[h]
for i, sentpkt in enumerate(hlst):
if r.answers(sentpkt):
ans.append((sentpkt, r))
if verbose > 1:
os.write(1, b"*")
ok = True
if not multi:
del hlst[i]
notans -= 1
else:
if not hasattr(sentpkt, '_answered'):
notans -= 1
sentpkt._answered = 1
break
if notans == 0 and not multi:
break
if not ok:
if verbose > 1:
os.write(1, b".")
nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
except KeyboardInterrupt:
if chainCC:
raise
except _BreakException:
pass
finally:
stopevent.set()
return (hsent, ans, nbrecv, notans)
def sndrcv(pks, pkt, timeout=None, inter=0, verbose=None, chainCC=False,
retry=0, multi=False, rcv_pks=None):
"""Scapy raw function to send a packet and recieve its answer.
WARNING: This is an internal function. Using sr/srp/sr1/srp is
more appropriate in many cases.
pks: SuperSocket instance to send/recieve packets
pkt: the packet to send
rcv_pks: if set, will be used instead of pks to recieve packets. packets will still
be sent through pks
nofilter: put 1 to avoid use of BPF filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus"""
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
if verbose is None:
verbose = conf.verb
debug.recv = plist.PacketList([],"Unanswered")
debug.sent = plist.PacketList([],"Sent")
debug.match = plist.SndRcvList([])
nbrecv = 0
ans = []
# do it here to fix random fields, so that parent and child have the same
tobesent = [p for p in pkt]
notans = len(tobesent)
if retry < 0:
retry = -retry
autostop = retry
else:
autostop = 0
while retry >= 0:
if timeout is not None and timeout < 0:
timeout = None
stopevent = threading.Event()
thread = threading.Thread(
target=_sndrcv_snd,
args=(pks, timeout, inter, verbose, tobesent, stopevent),
)
thread.start()
hsent, newans, nbrecv, notans = _sndrcv_rcv(
(rcv_pks or pks), tobesent, stopevent, nbrecv, notans, verbose, chainCC, multi,
)
thread.join()
ans.extend(newans)
remain = list(itertools.chain(*six.itervalues(hsent)))
if multi:
remain = [p for p in remain if not hasattr(p, '_answered')]
if autostop and len(remain) > 0 and len(remain) != len(tobesent):
retry = autostop
tobesent = remain
if len(tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent=plist.PacketList(remain[:], "Sent")
debug.match=plist.SndRcvList(ans[:])
# Clean the ans list to delete the field _answered
if multi:
for snd, _ in ans:
if hasattr(snd, '_answered'):
del snd._answered
if verbose:
print("\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans))
return plist.SndRcvList(ans), plist.PacketList(remain, "Unanswered")
def __gen_send(s, x, inter=0, loop=0, count=None, verbose=None, realtime=None, return_packets=False, *args, **kargs):
if isinstance(x, str):
x = conf.raw_layer(load=x)
if not isinstance(x, Gen):
x = SetGen(x)
if verbose is None:
verbose = conf.verb
n = 0
if count is not None:
loop = -count
elif not loop:
loop = -1
if return_packets:
sent_packets = plist.PacketList()
try:
while loop:
dt0 = None
for p in x:
if realtime:
ct = time.time()
if dt0:
st = dt0+p.time-ct
if st > 0:
time.sleep(st)
else:
dt0 = ct-p.time
s.send(p)
if return_packets:
sent_packets.append(p)
n += 1
if verbose:
os.write(1,b".")
time.sleep(inter)
if loop < 0:
loop += 1
except KeyboardInterrupt:
pass
s.close()
if verbose:
print("\nSent %i packets." % n)
if return_packets:
return sent_packets
@conf.commands.register
def send(x, inter=0, loop=0, count=None, verbose=None, realtime=None, return_packets=False, socket=None,
*args, **kargs):
"""Send packets at layer 3
send(packets, [inter=0], [loop=0], [count=None], [verbose=conf.verb], [realtime=None], [return_packets=False],
[socket=None]) -> None"""
if socket is None:
socket = conf.L3socket(*args, **kargs)
return __gen_send(socket, x, inter=inter, loop=loop, count=count,verbose=verbose,
realtime=realtime, return_packets=return_packets)
@conf.commands.register
def sendp(x, inter=0, loop=0, iface=None, iface_hint=None, count=None, verbose=None, realtime=None,
return_packets=False, socket=None, *args, **kargs):
"""Send packets at layer 2
sendp(packets, [inter=0], [loop=0], [iface=None], [iface_hint=None], [count=None], [verbose=conf.verb],
[realtime=None], [return_packets=False], [socket=None]) -> None"""
if iface is None and iface_hint is not None and socket is None:
iface = conf.route.route(iface_hint)[0]
if socket is None:
socket = conf.L2socket(iface=iface, *args, **kargs)
return __gen_send(socket, x, inter=inter, loop=loop, count=count,
verbose=verbose, realtime=realtime, return_packets=return_packets)
@conf.commands.register
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None):
"""Send packets at layer 2 using tcpreplay for performance
pps: packets per second
mpbs: MBits per second
realtime: use packet's timestamp, bending time with real-time value
loop: number of times to process the packet list
file_cache: cache packets in RAM instead of reading from disk at each iteration
iface: output interface """
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%f" % mbps)
elif realtime is not None:
argv.append("--multiplier=%f" % realtime)
else:
argv.append("--topspeed")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--preload-pcap")
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
try:
subprocess.check_call(argv)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception:
if conf.interactive:
log_interactive.error("Cannot execute [%s]", argv[0], exc_info=True)
else:
raise
finally:
os.unlink(f)
@conf.commands.register
def sr(x, promisc=None, filter=None, iface=None, nofilter=0, *args,**kargs):
"""Send and receive packets at layer 3
nofilter: put 1 to avoid use of BPF filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if "timeout" not in kargs:
kargs["timeout"] = -1
s = conf.L3socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
@conf.commands.register
def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args,**kargs):
"""Send packets at layer 3 and return only the first answer
nofilter: put 1 to avoid use of BPF filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if "timeout" not in kargs:
kargs["timeout"] = -1
s=conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface)
ans, _ = sndrcv(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
else:
return None
@conf.commands.register
def srp(x, promisc=None, iface=None, iface_hint=None, filter=None, nofilter=0, type=ETH_P_ALL, *args,**kargs):
"""Send and receive packets at layer 2
nofilter: put 1 to avoid use of BPF filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: work only on the given interface"""
if "timeout" not in kargs:
kargs["timeout"] = -1
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(promisc=promisc, iface=iface, filter=filter, nofilter=nofilter, type=type)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
@conf.commands.register
def srp1(*args,**kargs):
"""Send and receive packets at layer 2 and return only the first answer
nofilter: put 1 to avoid use of BPF filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: work only on the given interface"""
if "timeout" not in kargs:
kargs["timeout"] = -1
ans, _ = srp(*args, **kargs)
if len(ans) > 0:
return ans[0][1]
else:
return None
# SEND/RECV LOOP METHODS
def __sr_loop(srfunc, pkts, prn=lambda x:x[1].summary(), prnfail=lambda x:x.summary(), inter=1, timeout=None, count=None, verbose=None, store=1, *args, **kargs):
n = 0
r = 0
ct = conf.color_theme
if verbose is None:
verbose = conf.verb
parity = 0
ans=[]
unans=[]
if timeout is None:
timeout = min(2*inter, 5)
try:
while True:
parity ^= 1
col = [ct.even,ct.odd][parity]
if count is not None:
if count == 0:
break
count -= 1
start = time.time()
if verbose > 1:
print("\rsend...\r", end=' ')
res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=True, *args, **kargs)
n += len(res[0])+len(res[1])
r += len(res[0])
if verbose > 1 and prn and len(res[0]) > 0:
msg = "RECV %i:" % len(res[0])
print("\r"+ct.success(msg), end=' ')
for p in res[0]:
print(col(prn(p)))
print(" "*len(msg), end=' ')
if verbose > 1 and prnfail and len(res[1]) > 0:
msg = "fail %i:" % len(res[1])
print("\r"+ct.fail(msg), end=' ')
for p in res[1]:
print(col(prnfail(p)))
print(" "*len(msg), end=' ')
if verbose > 1 and not (prn or prnfail):
print("recv:%i fail:%i" % tuple(map(len, res[:2])))
if store:
ans += res[0]
unans += res[1]
end=time.time()
if end-start < inter:
time.sleep(inter+start-end)
except KeyboardInterrupt:
pass
if verbose and n>0:
print(ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n,r,100.0*r/n)))
return plist.SndRcvList(ans),plist.PacketList(unans)
@conf.commands.register
def srloop(pkts, *args, **kargs):
"""Send a packet at layer 3 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(sr, pkts, *args, **kargs)
@conf.commands.register
def srploop(pkts, *args, **kargs):
"""Send a packet at layer 2 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(srp, pkts, *args, **kargs)
# SEND/RECV FLOOD METHODS
def sndrcvflood(pks, pkt, inter=0, verbose=None, chainCC=False, prn=lambda x: x):
if not verbose:
verbose = conf.verb
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
tobesent = [p for p in pkt]
stopevent = threading.Event()
count_packets = six.moves.queue.Queue()
def send_in_loop(tobesent, stopevent, count_packets=count_packets):
"""Infinite generator that produces the same packet until stopevent is triggered."""
while True:
for p in tobesent:
if stopevent.is_set():
raise StopIteration()
count_packets.put(0)
yield p
infinite_gen = send_in_loop(tobesent, stopevent)
# We don't use _sndrcv_snd verbose (it messes the logs up as in a thread that ends after recieving)
thread = threading.Thread(
target=_sndrcv_snd,
args=(pks, None, inter, False, infinite_gen, stopevent),
)
thread.start()
hsent, ans, nbrecv, notans = _sndrcv_rcv(pks, tobesent, stopevent, 0, len(tobesent), verbose, chainCC, False)
thread.join()
remain = list(itertools.chain(*six.itervalues(hsent)))
# Apply prn
ans = [(x, prn(y)) for (x, y) in ans]
if verbose:
print("\nReceived %i packets, got %i answers, remaining %i packets. Sent a total of %i packets." % (nbrecv+len(ans), len(ans), notans, count_packets.qsize()))
count_packets.empty()
del count_packets
return plist.SndRcvList(ans), plist.PacketList(remain, "Unanswered")
@conf.commands.register
def srflood(x, promisc=None, filter=None, iface=None, nofilter=None, *args,**kargs):
"""Flood and receive packets at layer 3
prn: function applied to packets received
unique: only consider packets whose print
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s = conf.L3socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter)
r=sndrcvflood(s,x,*args,**kargs)
s.close()
return r
@conf.commands.register
def sr1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args,**kargs):
"""Flood and receive packets at layer 3 and return only the first answer
prn: function applied to packets received
verbose: set verbosity level
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s=conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface)
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
else:
return None
@conf.commands.register
def srpflood(x, promisc=None, filter=None, iface=None, iface_hint=None, nofilter=None, *args,**kargs):
"""Flood and receive packets at layer 2
prn: function applied to packets received
unique: only consider packets whose print
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter)
r=sndrcvflood(s,x,*args,**kargs)
s.close()
return r
@conf.commands.register
def srp1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args,**kargs):
"""Flood and receive packets at layer 2 and return only the first answer
prn: function applied to packets received
verbose: set verbosity level
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s=conf.L2socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface)
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
else:
return None
# SNIFF METHODS
@conf.commands.register
def sniff(count=0, store=True, offline=None, prn=None, lfilter=None,
L2socket=None, timeout=None, opened_socket=None,
stop_filter=None, iface=None, *arg, **karg):
"""
Sniff packets and return a list of packets.
Arguments:
count: number of packets to capture. 0 means infinity.
store: whether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned, it
is displayed.
Ex: prn = lambda x: x.summary()
filter: BPF filter to apply.
lfilter: Python function applied to each packet to determine if
further action may be done.
Ex: lfilter = lambda x: x.haslayer(Padding)
offline: PCAP file (or list of PCAP files) to read packets from,
instead of sniffing them
timeout: stop sniffing after a given time (default: None).
L2socket: use the provided L2socket (default: use conf.L2listen).
opened_socket: provide an object (or a list of objects) ready to use
.recv() on.
stop_filter: Python function applied to each packet to determine if
we have to stop the capture after this packet.
Ex: stop_filter = lambda x: x.haslayer(TCP)
iface: interface or list of interfaces (default: None for sniffing
on all interfaces).
The iface, offline and opened_socket parameters can be either an
element, a list of elements, or a dict object mapping an element to a
label (see examples below).
Examples:
>>> sniff(filter="arp")
>>> sniff(lfilter=lambda pkt: ARP in pkt)
>>> sniff(iface="eth0", prn=Packet.summary)
>>> sniff(iface=["eth0", "mon0"],
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
>>> sniff(iface={"eth0": "Ethernet", "mon0": "Wifi"},
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
"""
c = 0
sniff_sockets = {} # socket: label dict
if opened_socket is not None:
if isinstance(opened_socket, list):
sniff_sockets.update((s, "socket%d" % i)
for i, s in enumerate(opened_socket))
elif isinstance(opened_socket, dict):
sniff_sockets.update((s, label)
for s, label in six.iteritems(opened_socket))
else:
sniff_sockets[opened_socket] = "socket0"
if offline is not None:
flt = karg.get('filter')
if isinstance(offline, list):
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname, args=["-w", "-", flt], getfd=True)
), fname) for fname in offline)
elif isinstance(offline, dict):
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname, args=["-w", "-", flt], getfd=True)
), label) for fname, label in six.iteritems(offline))
else:
sniff_sockets[PcapReader(
offline if flt is None else
tcpdump(offline, args=["-w", "-", flt], getfd=True)
)] = offline
if not sniff_sockets or iface is not None:
if L2socket is None:
L2socket = conf.L2listen
if isinstance(iface, list):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, *arg, **karg), ifname)
for ifname in iface
)
elif isinstance(iface, dict):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, *arg, **karg), iflabel)
for ifname, iflabel in six.iteritems(iface)
)
else:
sniff_sockets[L2socket(type=ETH_P_ALL, iface=iface,
*arg, **karg)] = iface
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
read_allowed_exceptions = ()
if conf.use_bpf:
from scapy.arch.bpf.supersocket import bpf_select
def _select(sockets):
return bpf_select(sockets, remain)
elif WINDOWS:
from scapy.arch.pcapdnet import PcapTimeoutElapsed
read_allowed_exceptions = (PcapTimeoutElapsed,)
def _select(sockets):
try:
return sockets
except PcapTimeoutElapsed:
return []
else:
def _select(sockets):
try:
return select(sockets, [], [], remain)[0]
except select_error as exc:
# Catch 'Interrupted system call' errors
if exc[0] == errno.EINTR:
return []
raise
try:
while sniff_sockets:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
ins = _select(sniff_sockets)
for s in ins:
try:
p = s.recv()
except read_allowed_exceptions:
continue
if p is None:
del sniff_sockets[s]
break
if lfilter and not lfilter(p):
continue
p.sniffed_on = sniff_sockets[s]
if store:
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print(r)
if stop_filter and stop_filter(p):
sniff_sockets = []
break
if 0 < count <= c:
sniff_sockets = []
break
except KeyboardInterrupt:
pass
if opened_socket is None:
for s in sniff_sockets:
s.close()
return plist.PacketList(lst,"Sniffed")
@conf.commands.register
def bridge_and_sniff(if1, if2, xfrm12=None, xfrm21=None, prn=None, L2socket=None,
*args, **kargs):
"""Forward traffic between interfaces if1 and if2, sniff and return
the exchanged packets.
Arguments:
if1, if2: the interfaces to use (interface names or opened sockets).
xfrm12: a function to call when forwarding a packet from if1 to
if2. If it returns True, the packet is forwarded as it. If it
returns False or None, the packet is discarded. If it returns a
packet, this packet is forwarded instead of the original packet
one.
xfrm21: same as xfrm12 for packets forwarded from if2 to if1.
The other arguments are the same than for the function sniff(),
except for offline, opened_socket and iface that are ignored.
See help(sniff) for more.
"""
for arg in ['opened_socket', 'offline', 'iface']:
if arg in kargs:
log_runtime.warning("Argument %s cannot be used in "
"bridge_and_sniff() -- ignoring it.", arg)
del kargs[arg]
def _init_socket(iface, count):
if isinstance(iface, SuperSocket):
return iface, "iface%d" % count
else:
return (L2socket or conf.L2socket)(iface=iface), iface
sckt1, if1 = _init_socket(if1, 1)
sckt2, if2 = _init_socket(if2, 2)
peers = {if1: sckt2, if2: sckt1}
xfrms = {}
if xfrm12 is not None:
xfrms[if1] = xfrm12
if xfrm21 is not None:
xfrms[if2] = xfrm21
def prn_send(pkt):
try:
sendsock = peers[pkt.sniffed_on]
except KeyError:
return
if pkt.sniffed_on in xfrms:
try:
newpkt = xfrms[pkt.sniffed_on](pkt)
except:
log_runtime.warning(
'Exception in transformation function for packet [%s] '
'received on %s -- dropping',
pkt.summary(), pkt.sniffed_on, exc_info=True
)
return
else:
if newpkt is True:
newpkt = pkt.original
elif not newpkt:
return
else:
newpkt = pkt.original
try:
sendsock.send(newpkt)
except:
log_runtime.warning('Cannot forward packet [%s] received on %s',
pkt.summary(), pkt.sniffed_on, exc_info=True)
if prn is None:
prn = prn_send
else:
prn_orig = prn
def prn(pkt):
prn_send(pkt)
return prn_orig(pkt)
return sniff(opened_socket={sckt1: if1, sckt2: if2}, prn=prn,
*args, **kargs)
@conf.commands.register
def tshark(*args,**kargs):
"""Sniff packets and print them calling pkt.show(), a bit like text wireshark"""
sniff(prn=lambda x: x.display(),*args,**kargs)
| 1 | 11,694 | Did you try using `global` with a regular integer ? | secdev-scapy | py |
@@ -76,7 +76,7 @@ int Contractor::Run()
#ifdef WIN32
#pragma message("Memory consumption on Windows can be higher due to different bit packing")
#else
- static_assert(sizeof(extractor::NodeBasedEdge) == 20,
+ static_assert(sizeof(extractor::NodeBasedEdge) == 24,
"changing extractor::NodeBasedEdge type has influence on memory consumption!");
static_assert(sizeof(extractor::EdgeBasedEdge) == 16,
"changing EdgeBasedEdge type has influence on memory consumption!"); | 1 | #include "contractor/contractor.hpp"
#include "contractor/crc32_processor.hpp"
#include "contractor/graph_contractor.hpp"
#include "extractor/compressed_edge_container.hpp"
#include "extractor/node_based_edge.hpp"
#include "util/exception.hpp"
#include "util/graph_loader.hpp"
#include "util/integer_range.hpp"
#include "util/io.hpp"
#include "util/simple_logger.hpp"
#include "util/static_graph.hpp"
#include "util/static_rtree.hpp"
#include "util/string_util.hpp"
#include "util/timing_util.hpp"
#include "util/typedefs.hpp"
#include <boost/assert.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/functional/hash.hpp>
#include <boost/interprocess/file_mapping.hpp>
#include <boost/interprocess/mapped_region.hpp>
#include <boost/spirit/include/qi.hpp>
#include <tbb/blocked_range.h>
#include <tbb/concurrent_unordered_map.h>
#include <tbb/enumerable_thread_specific.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_for_each.h>
#include <tbb/parallel_invoke.h>
#include <tbb/parallel_sort.h>
#include <tbb/spin_mutex.h>
#include <algorithm>
#include <bitset>
#include <cstdint>
#include <fstream>
#include <iterator>
#include <memory>
#include <thread>
#include <tuple>
#include <vector>
namespace std
{
template <> struct hash<std::pair<OSMNodeID, OSMNodeID>>
{
std::size_t operator()(const std::pair<OSMNodeID, OSMNodeID> &k) const noexcept
{
return static_cast<uint64_t>(k.first) ^ (static_cast<uint64_t>(k.second) << 12);
}
};
template <> struct hash<std::tuple<OSMNodeID, OSMNodeID, OSMNodeID>>
{
std::size_t operator()(const std::tuple<OSMNodeID, OSMNodeID, OSMNodeID> &k) const noexcept
{
std::size_t seed = 0;
boost::hash_combine(seed, static_cast<uint64_t>(std::get<0>(k)));
boost::hash_combine(seed, static_cast<uint64_t>(std::get<1>(k)));
boost::hash_combine(seed, static_cast<uint64_t>(std::get<2>(k)));
return seed;
}
};
}
namespace osrm
{
namespace contractor
{
int Contractor::Run()
{
#ifdef WIN32
#pragma message("Memory consumption on Windows can be higher due to different bit packing")
#else
static_assert(sizeof(extractor::NodeBasedEdge) == 20,
"changing extractor::NodeBasedEdge type has influence on memory consumption!");
static_assert(sizeof(extractor::EdgeBasedEdge) == 16,
"changing EdgeBasedEdge type has influence on memory consumption!");
#endif
if (config.core_factor > 1.0 || config.core_factor < 0)
{
throw util::exception("Core factor must be between 0.0 to 1.0 (inclusive)");
}
TIMER_START(preparing);
util::SimpleLogger().Write() << "Loading edge-expanded graph representation";
util::DeallocatingVector<extractor::EdgeBasedEdge> edge_based_edge_list;
EdgeID max_edge_id = LoadEdgeExpandedGraph(config.edge_based_graph_path,
edge_based_edge_list,
config.edge_segment_lookup_path,
config.edge_penalty_path,
config.segment_speed_lookup_paths,
config.turn_penalty_lookup_paths,
config.node_based_graph_path,
config.geometry_path,
config.datasource_names_path,
config.datasource_indexes_path,
config.rtree_leaf_path);
// Contracting the edge-expanded graph
TIMER_START(contraction);
std::vector<bool> is_core_node;
std::vector<float> node_levels;
if (config.use_cached_priority)
{
ReadNodeLevels(node_levels);
}
util::SimpleLogger().Write() << "Reading node weights.";
std::vector<EdgeWeight> node_weights;
std::string node_file_name = config.osrm_input_path.string() + ".enw";
if (util::deserializeVector(node_file_name, node_weights))
{
util::SimpleLogger().Write() << "Done reading node weights.";
}
else
{
throw util::exception("Failed reading node weights.");
}
util::DeallocatingVector<QueryEdge> contracted_edge_list;
ContractGraph(max_edge_id,
edge_based_edge_list,
contracted_edge_list,
std::move(node_weights),
is_core_node,
node_levels);
TIMER_STOP(contraction);
util::SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec";
std::size_t number_of_used_edges = WriteContractedGraph(max_edge_id, contracted_edge_list);
WriteCoreNodeMarker(std::move(is_core_node));
if (!config.use_cached_priority)
{
WriteNodeLevels(std::move(node_levels));
}
TIMER_STOP(preparing);
util::SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
util::SimpleLogger().Write() << "Contraction: " << ((max_edge_id + 1) / TIMER_SEC(contraction))
<< " nodes/sec and "
<< number_of_used_edges / TIMER_SEC(contraction) << " edges/sec";
util::SimpleLogger().Write() << "finished preprocessing";
return 0;
}
// Utilities for LoadEdgeExpandedGraph to restore my sanity
namespace
{
struct Segment final
{
OSMNodeID from, to;
};
struct SpeedSource final
{
unsigned speed;
std::uint8_t source;
};
struct SegmentSpeedSource final
{
Segment segment;
SpeedSource speed_source;
};
using SegmentSpeedSourceFlatMap = std::vector<SegmentSpeedSource>;
// Binary Search over a flattened key,val Segment storage
SegmentSpeedSourceFlatMap::iterator find(SegmentSpeedSourceFlatMap &map, const Segment &key)
{
const auto last = end(map);
const auto by_segment = [](const SegmentSpeedSource &lhs, const SegmentSpeedSource &rhs) {
return std::tie(lhs.segment.from, lhs.segment.to) >
std::tie(rhs.segment.from, rhs.segment.to);
};
auto it = std::lower_bound(begin(map), last, SegmentSpeedSource{key, {0, 0}}, by_segment);
if (it != last && (std::tie(it->segment.from, it->segment.to) == std::tie(key.from, key.to)))
return it;
return last;
}
// Convenience aliases. TODO: make actual types at some point in time.
// TODO: turn penalties need flat map + binary search optimization, take a look at segment speeds
using Turn = std::tuple<OSMNodeID, OSMNodeID, OSMNodeID>;
using TurnHasher = std::hash<Turn>;
using PenaltySource = std::pair<double, std::uint8_t>;
using TurnPenaltySourceMap = tbb::concurrent_unordered_map<Turn, PenaltySource, TurnHasher>;
// Functions for parsing files and creating lookup tables
SegmentSpeedSourceFlatMap
parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_speed_filenames)
{
// TODO: shares code with turn penalty lookup parse function
using Mutex = tbb::spin_mutex;
// Loaded and parsed in parallel, at the end we combine results in a flattened map-ish view
SegmentSpeedSourceFlatMap flatten;
Mutex flatten_mutex;
const auto parse_segment_speed_file = [&](const std::size_t idx) {
const auto file_id = idx + 1; // starts at one, zero means we assigned the weight
const auto filename = segment_speed_filenames[idx];
std::ifstream segment_speed_file{filename, std::ios::binary};
if (!segment_speed_file)
throw util::exception{"Unable to open segment speed file " + filename};
SegmentSpeedSourceFlatMap local;
std::uint64_t from_node_id{};
std::uint64_t to_node_id{};
unsigned speed{};
for (std::string line; std::getline(segment_speed_file, line);)
{
using namespace boost::spirit::qi;
auto it = begin(line);
const auto last = end(line);
// The ulong_long -> uint64_t will likely break on 32bit platforms
const auto ok = parse(it,
last, //
(ulong_long >> ',' >> ulong_long >> ',' >> uint_), //
from_node_id,
to_node_id,
speed); //
if (!ok || it != last)
throw util::exception{"Segment speed file " + filename + " malformed"};
SegmentSpeedSource val{
{static_cast<OSMNodeID>(from_node_id), static_cast<OSMNodeID>(to_node_id)},
{speed, static_cast<std::uint8_t>(file_id)}};
local.push_back(std::move(val));
}
util::SimpleLogger().Write() << "Loaded speed file " << filename << " with " << local.size()
<< " speeds";
{
Mutex::scoped_lock _{flatten_mutex};
flatten.insert(end(flatten),
std::make_move_iterator(begin(local)),
std::make_move_iterator(end(local)));
}
};
tbb::parallel_for(std::size_t{0}, segment_speed_filenames.size(), parse_segment_speed_file);
// With flattened map-ish view of all the files, sort and unique them on from,to,source
// The greater '>' is used here since we want to give files later on higher precedence
const auto sort_by = [](const SegmentSpeedSource &lhs, const SegmentSpeedSource &rhs) {
return std::tie(lhs.segment.from, lhs.segment.to, lhs.speed_source.source) >
std::tie(rhs.segment.from, rhs.segment.to, rhs.speed_source.source);
};
std::stable_sort(begin(flatten), end(flatten), sort_by);
// Unique only on from,to to take the source precedence into account and remove duplicates
const auto unique_by = [](const SegmentSpeedSource &lhs, const SegmentSpeedSource &rhs) {
return std::tie(lhs.segment.from, lhs.segment.to) ==
std::tie(rhs.segment.from, rhs.segment.to);
};
const auto it = std::unique(begin(flatten), end(flatten), unique_by);
flatten.erase(it, end(flatten));
util::SimpleLogger().Write() << "In total loaded " << segment_speed_filenames.size()
<< " speed file(s) with a total of " << flatten.size()
<< " unique values";
return flatten;
}
TurnPenaltySourceMap
parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_penalty_filenames)
{
// TODO: shares code with turn penalty lookup parse function
TurnPenaltySourceMap map;
const auto parse_turn_penalty_file = [&](const std::size_t idx) {
const auto file_id = idx + 1; // starts at one, zero means we assigned the weight
const auto filename = turn_penalty_filenames[idx];
std::ifstream turn_penalty_file{filename, std::ios::binary};
if (!turn_penalty_file)
throw util::exception{"Unable to open turn penalty file " + filename};
std::uint64_t from_node_id{};
std::uint64_t via_node_id{};
std::uint64_t to_node_id{};
double penalty{};
for (std::string line; std::getline(turn_penalty_file, line);)
{
using namespace boost::spirit::qi;
auto it = begin(line);
const auto last = end(line);
// The ulong_long -> uint64_t will likely break on 32bit platforms
const auto ok =
parse(it,
last, //
(ulong_long >> ',' >> ulong_long >> ',' >> ulong_long >> ',' >> double_), //
from_node_id,
via_node_id,
to_node_id,
penalty); //
if (!ok || it != last)
throw util::exception{"Turn penalty file " + filename + " malformed"};
map[std::make_tuple(
OSMNodeID(from_node_id), OSMNodeID(via_node_id), OSMNodeID(to_node_id))] =
std::make_pair(penalty, file_id);
}
};
tbb::parallel_for(std::size_t{0}, turn_penalty_filenames.size(), parse_turn_penalty_file);
return map;
}
} // anon ns
EdgeID Contractor::LoadEdgeExpandedGraph(
std::string const &edge_based_graph_filename,
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const std::vector<std::string> &segment_speed_filenames,
const std::vector<std::string> &turn_penalty_filenames,
const std::string &nodes_filename,
const std::string &geometry_filename,
const std::string &datasource_names_filename,
const std::string &datasource_indexes_filename,
const std::string &rtree_leaf_filename)
{
if (segment_speed_filenames.size() > 255 || turn_penalty_filenames.size() > 255)
throw util::exception("Limit of 255 segment speed and turn penalty files each reached");
util::SimpleLogger().Write() << "Opening " << edge_based_graph_filename;
boost::filesystem::ifstream input_stream(edge_based_graph_filename, std::ios::binary);
if (!input_stream)
throw util::exception("Could not load edge based graph file");
const bool update_edge_weights = !segment_speed_filenames.empty();
const bool update_turn_penalties = !turn_penalty_filenames.empty();
boost::filesystem::ifstream edge_segment_input_stream;
boost::filesystem::ifstream edge_fixed_penalties_input_stream;
if (update_edge_weights || update_turn_penalties)
{
edge_segment_input_stream.open(edge_segment_lookup_filename, std::ios::binary);
edge_fixed_penalties_input_stream.open(edge_penalty_filename, std::ios::binary);
if (!edge_segment_input_stream || !edge_fixed_penalties_input_stream)
{
throw util::exception("Could not load .edge_segment_lookup or .edge_penalties, did you "
"run osrm-extract with '--generate-edge-lookup'?");
}
}
const util::FingerPrint fingerprint_valid = util::FingerPrint::GetValid();
util::FingerPrint fingerprint_loaded;
input_stream.read((char *)&fingerprint_loaded, sizeof(util::FingerPrint));
fingerprint_loaded.TestContractor(fingerprint_valid);
std::uint64_t number_of_edges = 0;
EdgeID max_edge_id = SPECIAL_EDGEID;
input_stream.read((char *)&number_of_edges, sizeof(number_of_edges));
input_stream.read((char *)&max_edge_id, sizeof(max_edge_id));
edge_based_edge_list.resize(number_of_edges);
util::SimpleLogger().Write() << "Reading " << number_of_edges
<< " edges from the edge based graph";
SegmentSpeedSourceFlatMap segment_speed_lookup;
TurnPenaltySourceMap turn_penalty_lookup;
const auto parse_segment_speeds = [&] {
if (update_edge_weights)
segment_speed_lookup = parse_segment_lookup_from_csv_files(segment_speed_filenames);
};
const auto parse_turn_penalties = [&] {
if (update_turn_penalties)
turn_penalty_lookup = parse_turn_penalty_lookup_from_csv_files(turn_penalty_filenames);
};
// If we update the edge weights, this file will hold the datasource information for each
// segment; the other files will also be conditionally filled concurrently if we make an update
std::vector<uint8_t> m_geometry_datasource;
std::vector<extractor::QueryNode> internal_to_external_node_map;
std::vector<unsigned> m_geometry_indices;
std::vector<extractor::CompressedEdgeContainer::CompressedEdge> m_geometry_list;
const auto maybe_load_internal_to_external_node_map = [&] {
if (!(update_edge_weights || update_turn_penalties))
return;
boost::filesystem::ifstream nodes_input_stream(nodes_filename, std::ios::binary);
if (!nodes_input_stream)
{
throw util::exception("Failed to open " + nodes_filename);
}
unsigned number_of_nodes = 0;
nodes_input_stream.read((char *)&number_of_nodes, sizeof(unsigned));
internal_to_external_node_map.resize(number_of_nodes);
// Load all the query nodes into a vector
nodes_input_stream.read(reinterpret_cast<char *>(&(internal_to_external_node_map[0])),
number_of_nodes * sizeof(extractor::QueryNode));
};
const auto maybe_load_geometries = [&] {
if (!(update_edge_weights || update_turn_penalties))
return;
std::ifstream geometry_stream(geometry_filename, std::ios::binary);
if (!geometry_stream)
{
throw util::exception("Failed to open " + geometry_filename);
}
unsigned number_of_indices = 0;
unsigned number_of_compressed_geometries = 0;
geometry_stream.read((char *)&number_of_indices, sizeof(unsigned));
m_geometry_indices.resize(number_of_indices);
if (number_of_indices > 0)
{
geometry_stream.read((char *)&(m_geometry_indices[0]),
number_of_indices * sizeof(unsigned));
}
geometry_stream.read((char *)&number_of_compressed_geometries, sizeof(unsigned));
BOOST_ASSERT(m_geometry_indices.back() == number_of_compressed_geometries);
m_geometry_list.resize(number_of_compressed_geometries);
if (number_of_compressed_geometries > 0)
{
geometry_stream.read((char *)&(m_geometry_list[0]),
number_of_compressed_geometries *
sizeof(extractor::CompressedEdgeContainer::CompressedEdge));
}
};
// Folds all our actions into independently concurrently executing lambdas
tbb::parallel_invoke(parse_segment_speeds,
parse_turn_penalties, //
maybe_load_internal_to_external_node_map,
maybe_load_geometries);
if (update_edge_weights || update_turn_penalties)
{
// Here, we have to update the compressed geometry weights
// First, we need the external-to-internal node lookup table
// This is a list of the "data source id" for every segment in the compressed
// geometry container. We assume that everything so far has come from the
// profile (data source 0). Here, we replace the 0's with the index of the
// CSV file that supplied the value that gets used for that segment, then
// we write out this list so that it can be returned by the debugging
// vector tiles later on.
m_geometry_datasource.resize(m_geometry_list.size(), 0);
// Now, we iterate over all the segments stored in the StaticRTree, updating
// the packed geometry weights in the `.geometries` file (note: we do not
// update the RTree itself, we just use the leaf nodes to iterate over all segments)
using LeafNode = util::StaticRTree<extractor::EdgeBasedNode>::LeafNode;
using boost::interprocess::file_mapping;
using boost::interprocess::mapped_region;
using boost::interprocess::read_only;
const file_mapping mapping{rtree_leaf_filename.c_str(), read_only};
mapped_region region{mapping, read_only};
region.advise(mapped_region::advice_willneed);
const auto bytes = region.get_size();
const auto first = static_cast<const LeafNode *>(region.get_address());
const auto last = first + (bytes / sizeof(LeafNode));
// vector to count used speeds for logging
// size offset by one since index 0 is used for speeds not from external file
using counters_type = std::vector<std::size_t>;
std::size_t num_counters = segment_speed_filenames.size() + 1;
tbb::enumerable_thread_specific<counters_type> segment_speeds_counters(
counters_type(num_counters, 0));
const constexpr auto LUA_SOURCE = 0;
tbb::parallel_for_each(first, last, [&](const LeafNode ¤t_node) {
auto &counters = segment_speeds_counters.local();
for (size_t i = 0; i < current_node.object_count; i++)
{
const auto &leaf_object = current_node.objects[i];
extractor::QueryNode *u;
extractor::QueryNode *v;
if (leaf_object.forward_packed_geometry_id != SPECIAL_EDGEID)
{
const unsigned forward_begin =
m_geometry_indices.at(leaf_object.forward_packed_geometry_id);
if (leaf_object.fwd_segment_position == 0)
{
u = &(internal_to_external_node_map[leaf_object.u]);
v = &(
internal_to_external_node_map[m_geometry_list[forward_begin].node_id]);
}
else
{
u = &(internal_to_external_node_map
[m_geometry_list[forward_begin +
leaf_object.fwd_segment_position - 1]
.node_id]);
v = &(internal_to_external_node_map
[m_geometry_list[forward_begin + leaf_object.fwd_segment_position]
.node_id]);
}
const double segment_length = util::coordinate_calculation::greatCircleDistance(
util::Coordinate{u->lon, u->lat}, util::Coordinate{v->lon, v->lat});
auto forward_speed_iter =
find(segment_speed_lookup, Segment{u->node_id, v->node_id});
if (forward_speed_iter != segment_speed_lookup.end())
{
int new_segment_weight =
std::max(1,
static_cast<int>(std::floor(
(segment_length * 10.) /
(forward_speed_iter->speed_source.speed / 3.6) +
.5)));
m_geometry_list[forward_begin + leaf_object.fwd_segment_position].weight =
new_segment_weight;
m_geometry_datasource[forward_begin + leaf_object.fwd_segment_position] =
forward_speed_iter->speed_source.source;
// count statistics for logging
counters[forward_speed_iter->speed_source.source] += 1;
}
else
{
// count statistics for logging
counters[LUA_SOURCE] += 1;
}
}
if (leaf_object.reverse_packed_geometry_id != SPECIAL_EDGEID)
{
const unsigned reverse_begin =
m_geometry_indices.at(leaf_object.reverse_packed_geometry_id);
const unsigned reverse_end =
m_geometry_indices.at(leaf_object.reverse_packed_geometry_id + 1);
int rev_segment_position =
(reverse_end - reverse_begin) - leaf_object.fwd_segment_position - 1;
if (rev_segment_position == 0)
{
u = &(internal_to_external_node_map[leaf_object.v]);
v = &(
internal_to_external_node_map[m_geometry_list[reverse_begin].node_id]);
}
else
{
u = &(
internal_to_external_node_map[m_geometry_list[reverse_begin +
rev_segment_position - 1]
.node_id]);
v = &(internal_to_external_node_map
[m_geometry_list[reverse_begin + rev_segment_position].node_id]);
}
const double segment_length = util::coordinate_calculation::greatCircleDistance(
util::Coordinate{u->lon, u->lat}, util::Coordinate{v->lon, v->lat});
auto reverse_speed_iter =
find(segment_speed_lookup, Segment{u->node_id, v->node_id});
if (reverse_speed_iter != segment_speed_lookup.end())
{
int new_segment_weight =
std::max(1,
static_cast<int>(std::floor(
(segment_length * 10.) /
(reverse_speed_iter->speed_source.speed / 3.6) +
.5)));
m_geometry_list[reverse_begin + rev_segment_position].weight =
new_segment_weight;
m_geometry_datasource[reverse_begin + rev_segment_position] =
reverse_speed_iter->speed_source.source;
// count statistics for logging
counters[reverse_speed_iter->speed_source.source] += 1;
}
else
{
// count statistics for logging
counters[LUA_SOURCE] += 1;
}
}
}
}); // parallel_for_each
counters_type merged_counters(num_counters, 0);
for (const auto &counters : segment_speeds_counters)
{
for (std::size_t i = 0; i < counters.size(); i++)
{
merged_counters[i] += counters[i];
}
}
for (std::size_t i = 0; i < merged_counters.size(); i++)
{
if (i == LUA_SOURCE)
{
util::SimpleLogger().Write() << "Used " << merged_counters[LUA_SOURCE]
<< " speeds from LUA profile or input map";
}
else
{
// segments_speeds_counters has 0 as LUA, segment_speed_filenames not, thus we need
// to susbstract 1 to avoid off-by-one error
util::SimpleLogger().Write() << "Used " << merged_counters[i] << " speeds from "
<< segment_speed_filenames[i - 1];
}
}
}
const auto maybe_save_geometries = [&] {
if (!(update_edge_weights || update_turn_penalties))
return;
// Now save out the updated compressed geometries
std::ofstream geometry_stream(geometry_filename, std::ios::binary);
if (!geometry_stream)
{
throw util::exception("Failed to open " + geometry_filename + " for writing");
}
const unsigned number_of_indices = m_geometry_indices.size();
const unsigned number_of_compressed_geometries = m_geometry_list.size();
geometry_stream.write(reinterpret_cast<const char *>(&number_of_indices), sizeof(unsigned));
geometry_stream.write(reinterpret_cast<char *>(&(m_geometry_indices[0])),
number_of_indices * sizeof(unsigned));
geometry_stream.write(reinterpret_cast<const char *>(&number_of_compressed_geometries),
sizeof(unsigned));
geometry_stream.write(reinterpret_cast<char *>(&(m_geometry_list[0])),
number_of_compressed_geometries *
sizeof(extractor::CompressedEdgeContainer::CompressedEdge));
};
const auto save_datasource_indexes = [&] {
std::ofstream datasource_stream(datasource_indexes_filename, std::ios::binary);
if (!datasource_stream)
{
throw util::exception("Failed to open " + datasource_indexes_filename + " for writing");
}
std::uint64_t number_of_datasource_entries = m_geometry_datasource.size();
datasource_stream.write(reinterpret_cast<const char *>(&number_of_datasource_entries),
sizeof(number_of_datasource_entries));
if (number_of_datasource_entries > 0)
{
datasource_stream.write(reinterpret_cast<char *>(&(m_geometry_datasource[0])),
number_of_datasource_entries * sizeof(uint8_t));
}
};
const auto save_datastore_names = [&] {
std::ofstream datasource_stream(datasource_names_filename, std::ios::binary);
if (!datasource_stream)
{
throw util::exception("Failed to open " + datasource_names_filename + " for writing");
}
datasource_stream << "lua profile" << std::endl;
for (auto const &name : segment_speed_filenames)
{
// Only write the filename, without path or extension.
// This prevents information leakage, and keeps names short
// for rendering in the debug tiles.
const boost::filesystem::path p(name);
datasource_stream << p.stem().string() << std::endl;
}
};
tbb::parallel_invoke(maybe_save_geometries, save_datasource_indexes, save_datastore_names);
// TODO: can we read this in bulk? util::DeallocatingVector isn't necessarily
// all stored contiguously
for (; number_of_edges > 0; --number_of_edges)
{
extractor::EdgeBasedEdge inbuffer;
input_stream.read((char *)&inbuffer, sizeof(extractor::EdgeBasedEdge));
if (update_edge_weights || update_turn_penalties)
{
// Processing-time edge updates
unsigned fixed_penalty;
edge_fixed_penalties_input_stream.read(reinterpret_cast<char *>(&fixed_penalty),
sizeof(fixed_penalty));
int new_weight = 0;
unsigned num_osm_nodes = 0;
edge_segment_input_stream.read(reinterpret_cast<char *>(&num_osm_nodes),
sizeof(num_osm_nodes));
OSMNodeID previous_osm_node_id;
edge_segment_input_stream.read(reinterpret_cast<char *>(&previous_osm_node_id),
sizeof(previous_osm_node_id));
OSMNodeID this_osm_node_id;
double segment_length;
int segment_weight;
int compressed_edge_nodes = static_cast<int>(num_osm_nodes);
--num_osm_nodes;
for (; num_osm_nodes != 0; --num_osm_nodes)
{
edge_segment_input_stream.read(reinterpret_cast<char *>(&this_osm_node_id),
sizeof(this_osm_node_id));
edge_segment_input_stream.read(reinterpret_cast<char *>(&segment_length),
sizeof(segment_length));
edge_segment_input_stream.read(reinterpret_cast<char *>(&segment_weight),
sizeof(segment_weight));
auto speed_iter =
find(segment_speed_lookup, Segment{previous_osm_node_id, this_osm_node_id});
if (speed_iter != segment_speed_lookup.end())
{
// This sets the segment weight using the same formula as the
// EdgeBasedGraphFactory for consistency. The *why* of this formula
// is lost in the annals of time.
int new_segment_weight = std::max(
1,
static_cast<int>(std::floor(
(segment_length * 10.) / (speed_iter->speed_source.speed / 3.6) + .5)));
new_weight += new_segment_weight;
}
else
{
// If no lookup found, use the original weight value for this segment
new_weight += segment_weight;
}
previous_osm_node_id = this_osm_node_id;
}
OSMNodeID from_id;
OSMNodeID via_id;
OSMNodeID to_id;
edge_fixed_penalties_input_stream.read(reinterpret_cast<char *>(&from_id),
sizeof(from_id));
edge_fixed_penalties_input_stream.read(reinterpret_cast<char *>(&via_id),
sizeof(via_id));
edge_fixed_penalties_input_stream.read(reinterpret_cast<char *>(&to_id), sizeof(to_id));
const auto turn_iter =
turn_penalty_lookup.find(std::make_tuple(from_id, via_id, to_id));
if (turn_iter != turn_penalty_lookup.end())
{
int new_turn_weight = static_cast<int>(turn_iter->second.first * 10);
if (new_turn_weight + new_weight < compressed_edge_nodes)
{
util::SimpleLogger().Write(logWARNING)
<< "turn penalty " << turn_iter->second.first << " for turn " << from_id
<< ", " << via_id << ", " << to_id
<< " is too negative: clamping turn weight to " << compressed_edge_nodes;
}
inbuffer.weight = std::max(new_turn_weight + new_weight, compressed_edge_nodes);
}
else
{
inbuffer.weight = fixed_penalty + new_weight;
}
}
edge_based_edge_list.emplace_back(std::move(inbuffer));
}
util::SimpleLogger().Write() << "Done reading edges";
return max_edge_id;
}
void Contractor::ReadNodeLevels(std::vector<float> &node_levels) const
{
boost::filesystem::ifstream order_input_stream(config.level_output_path, std::ios::binary);
unsigned level_size;
order_input_stream.read((char *)&level_size, sizeof(unsigned));
node_levels.resize(level_size);
order_input_stream.read((char *)node_levels.data(), sizeof(float) * node_levels.size());
}
void Contractor::WriteNodeLevels(std::vector<float> &&in_node_levels) const
{
std::vector<float> node_levels(std::move(in_node_levels));
boost::filesystem::ofstream order_output_stream(config.level_output_path, std::ios::binary);
unsigned level_size = node_levels.size();
order_output_stream.write((char *)&level_size, sizeof(unsigned));
order_output_stream.write((char *)node_levels.data(), sizeof(float) * node_levels.size());
}
void Contractor::WriteCoreNodeMarker(std::vector<bool> &&in_is_core_node) const
{
std::vector<bool> is_core_node(std::move(in_is_core_node));
std::vector<char> unpacked_bool_flags(std::move(is_core_node.size()));
for (auto i = 0u; i < is_core_node.size(); ++i)
{
unpacked_bool_flags[i] = is_core_node[i] ? 1 : 0;
}
boost::filesystem::ofstream core_marker_output_stream(config.core_output_path,
std::ios::binary);
unsigned size = unpacked_bool_flags.size();
core_marker_output_stream.write((char *)&size, sizeof(unsigned));
core_marker_output_stream.write((char *)unpacked_bool_flags.data(),
sizeof(char) * unpacked_bool_flags.size());
}
std::size_t
Contractor::WriteContractedGraph(unsigned max_node_id,
const util::DeallocatingVector<QueryEdge> &contracted_edge_list)
{
// Sorting contracted edges in a way that the static query graph can read some in in-place.
tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end());
const unsigned contracted_edge_count = contracted_edge_list.size();
util::SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count
<< " edges";
const util::FingerPrint fingerprint = util::FingerPrint::GetValid();
boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary);
hsgr_output_stream.write((char *)&fingerprint, sizeof(util::FingerPrint));
const NodeID max_used_node_id = [&contracted_edge_list] {
NodeID tmp_max = 0;
for (const QueryEdge &edge : contracted_edge_list)
{
BOOST_ASSERT(SPECIAL_NODEID != edge.source);
BOOST_ASSERT(SPECIAL_NODEID != edge.target);
tmp_max = std::max(tmp_max, edge.source);
tmp_max = std::max(tmp_max, edge.target);
}
return tmp_max;
}();
util::SimpleLogger().Write(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes";
util::SimpleLogger().Write(logDEBUG) << "contracted graph has " << (max_used_node_id + 1)
<< " nodes";
std::vector<util::StaticGraph<EdgeData>::NodeArrayEntry> node_array;
// make sure we have at least one sentinel
node_array.resize(max_node_id + 2);
util::SimpleLogger().Write() << "Building node array";
util::StaticGraph<EdgeData>::EdgeIterator edge = 0;
util::StaticGraph<EdgeData>::EdgeIterator position = 0;
util::StaticGraph<EdgeData>::EdgeIterator last_edge;
// initializing 'first_edge'-field of nodes:
for (const auto node : util::irange(0u, max_used_node_id + 1))
{
last_edge = edge;
while ((edge < contracted_edge_count) && (contracted_edge_list[edge].source == node))
{
++edge;
}
node_array[node].first_edge = position; //=edge
position += edge - last_edge; // remove
}
for (const auto sentinel_counter :
util::irange<unsigned>(max_used_node_id + 1, node_array.size()))
{
// sentinel element, guarded against underflow
node_array[sentinel_counter].first_edge = contracted_edge_count;
}
util::SimpleLogger().Write() << "Serializing node array";
RangebasedCRC32 crc32_calculator;
const unsigned edges_crc32 = crc32_calculator(contracted_edge_list);
util::SimpleLogger().Write() << "Writing CRC32: " << edges_crc32;
const unsigned node_array_size = node_array.size();
// serialize crc32, aka checksum
hsgr_output_stream.write((char *)&edges_crc32, sizeof(unsigned));
// serialize number of nodes
hsgr_output_stream.write((char *)&node_array_size, sizeof(unsigned));
// serialize number of edges
hsgr_output_stream.write((char *)&contracted_edge_count, sizeof(unsigned));
// serialize all nodes
if (node_array_size > 0)
{
hsgr_output_stream.write((char *)&node_array[0],
sizeof(util::StaticGraph<EdgeData>::NodeArrayEntry) *
node_array_size);
}
// serialize all edges
util::SimpleLogger().Write() << "Building edge array";
std::size_t number_of_used_edges = 0;
util::StaticGraph<EdgeData>::EdgeArrayEntry current_edge;
for (const auto edge : util::irange<std::size_t>(0UL, contracted_edge_list.size()))
{
// some self-loops are required for oneway handling. Need to assertthat we only keep these
// (TODO)
// no eigen loops
// BOOST_ASSERT(contracted_edge_list[edge].source != contracted_edge_list[edge].target ||
// node_represents_oneway[contracted_edge_list[edge].source]);
current_edge.target = contracted_edge_list[edge].target;
current_edge.data = contracted_edge_list[edge].data;
// every target needs to be valid
BOOST_ASSERT(current_edge.target <= max_used_node_id);
#ifndef NDEBUG
if (current_edge.data.distance <= 0)
{
util::SimpleLogger().Write(logWARNING)
<< "Edge: " << edge << ",source: " << contracted_edge_list[edge].source
<< ", target: " << contracted_edge_list[edge].target
<< ", dist: " << current_edge.data.distance;
util::SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node "
<< contracted_edge_list[edge].source << "/"
<< node_array.size() - 1;
return 1;
}
#endif
hsgr_output_stream.write((char *)¤t_edge,
sizeof(util::StaticGraph<EdgeData>::EdgeArrayEntry));
++number_of_used_edges;
}
return number_of_used_edges;
}
/**
\brief Build contracted graph.
*/
void Contractor::ContractGraph(
const EdgeID max_edge_id,
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
util::DeallocatingVector<QueryEdge> &contracted_edge_list,
std::vector<EdgeWeight> &&node_weights,
std::vector<bool> &is_core_node,
std::vector<float> &inout_node_levels) const
{
std::vector<float> node_levels;
node_levels.swap(inout_node_levels);
GraphContractor graph_contractor(
max_edge_id + 1, edge_based_edge_list, std::move(node_levels), std::move(node_weights));
graph_contractor.Run(config.core_factor);
graph_contractor.GetEdges(contracted_edge_list);
graph_contractor.GetCoreMarker(is_core_node);
graph_contractor.GetNodeLevels(inout_node_levels);
}
}
}
| 1 | 16,420 | windows size might be off | Project-OSRM-osrm-backend | cpp |
@@ -25,10 +25,8 @@
#include "engine/bp/BPFileReader.h"
#include "engine/bp/BPFileWriter.h"
-#ifdef ADIOS_HAVE_DATAMAN // external dependencies
#include "engine/dataman/DataManReader.h"
#include "engine/dataman/DataManWriter.h"
-#endif
#ifdef ADIOS_HAVE_ADIOS1 // external dependencies
#include "engine/adios1/ADIOS1Reader.h" | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* ADIOS.cpp
*
* Created on: Sep 29, 2016
* Author: William F Godoy
*/
/// \cond EXCLUDE_FROM_DOXYGEN
#include <fstream>
#include <ios> //std::ios_base::failure
#include <iostream>
#include <sstream>
#include <utility>
/// \endcond
#include "ADIOS.h"
#include "ADIOS.tcc"
#include "functions/adiosFunctions.h"
// Engines
#include "engine/bp/BPFileReader.h"
#include "engine/bp/BPFileWriter.h"
#ifdef ADIOS_HAVE_DATAMAN // external dependencies
#include "engine/dataman/DataManReader.h"
#include "engine/dataman/DataManWriter.h"
#endif
#ifdef ADIOS_HAVE_ADIOS1 // external dependencies
#include "engine/adios1/ADIOS1Reader.h"
#include "engine/adios1/ADIOS1Writer.h"
#endif
namespace adios
{
ADIOS::ADIOS(const Verbose verbose, const bool debugMode)
: m_DebugMode{debugMode}
{
InitMPI();
}
ADIOS::ADIOS(const std::string config, const Verbose verbose,
const bool debugMode)
: m_ConfigFile(config), m_DebugMode(debugMode)
{
InitMPI();
// InitXML( m_ConfigFile, m_MPIComm, m_DebugMode, m_Transforms );
}
ADIOS::ADIOS(const std::string configFile, MPI_Comm mpiComm,
const Verbose verbose, const bool debugMode)
: m_MPIComm(mpiComm), m_ConfigFile(configFile), m_DebugMode(debugMode)
{
InitMPI();
// InitXML( m_XMLConfigFile, m_MPIComm, m_DebugMode, m_HostLanguage,
// m_Transforms, m_Groups );
}
ADIOS::ADIOS(MPI_Comm mpiComm, const Verbose verbose, const bool debugMode)
: m_MPIComm(mpiComm), m_DebugMode(debugMode)
{
InitMPI();
}
// ADIOS::~ADIOS() {}
void ADIOS::InitMPI()
{
if (m_DebugMode == true)
{
if (m_MPIComm == MPI_COMM_NULL)
{
throw std::ios_base::failure(
"ERROR: engine communicator is MPI_COMM_NULL,"
" in call to ADIOS Open or Constructor\n");
}
}
MPI_Comm_rank(m_MPIComm, &m_RankMPI);
MPI_Comm_size(m_MPIComm, &m_SizeMPI);
}
Method &ADIOS::DeclareMethod(const std::string methodName)
{
if (m_DebugMode == true)
{
if (m_Methods.count(methodName) == 1)
{
throw std::invalid_argument(
"ERROR: method " + methodName +
" already declared, from DeclareMethod\n");
}
}
m_Methods.emplace(methodName, Method(methodName, m_DebugMode));
return m_Methods.at(methodName);
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
MPI_Comm mpiComm, const Method &method)
{
if (m_DebugMode == true)
{
if (m_EngineNames.count(name) == 1) // Check if Engine already exists
{
throw std::invalid_argument(
"ERROR: engine name " + name +
" already created by Open, in call from Open.\n");
}
}
m_EngineNames.insert(name);
const std::string type(method.m_Type);
const bool isDefaultWriter =
(accessMode == "w" || accessMode == "write" || accessMode == "a" ||
accessMode == "append") &&
type.empty()
? true
: false;
const bool isDefaultReader =
(accessMode == "r" || accessMode == "read") && type.empty() ? true
: false;
if (isDefaultWriter || type == "BPFileWriter" || type == "bpfilewriter")
{
return std::make_shared<BPFileWriter>(*this, name, accessMode, mpiComm,
method);
}
else if (isDefaultReader || type == "BPReader" || type == "bpreader")
{
return std::make_shared<BPFileReader>(*this, name, accessMode, mpiComm,
method);
}
else if (type == "SIRIUS" || type == "sirius" || type == "Sirius")
{
// not yet supported
// return std::make_shared<engine::DataMan>( *this, name, accessMode,
// mpiComm, method, iomode, timeout_sec, m_DebugMode, method.m_nThreads
// );
}
else if (type == "DataManWriter")
{
#ifdef ADIOS_HAVE_DATAMAN
return std::make_shared<DataManWriter>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument(
"ERROR: this version didn't compile with "
"Dataman library, can't Open DataManWriter\n");
#endif
}
else if (type == "DataManReader")
{
#ifdef ADIOS_HAVE_DATAMAN
return std::make_shared<DataManReader>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument(
"ERROR: this version didn't compile with "
"Dataman library, can't Open DataManReader\n");
#endif
}
else if (type == "ADIOS1Writer")
{
#ifdef ADIOS_HAVE_ADIOS1
return std::make_shared<ADIOS1Writer>(*this, name, accessMode, mpiComm,
method);
#else
throw std::invalid_argument(
"ERROR: this version didn't compile with ADIOS "
"1.x library, can't Open ADIOS1Writer\n");
#endif
}
else if (type == "Vis")
{
// return std::make_shared<Vis>( *this, name, accessMode, mpiComm,
// method,
// iomode, timeout_sec, m_DebugMode, method.m_nThreads );
}
else
{
if (m_DebugMode == true)
{
throw std::invalid_argument("ERROR: method type " + type +
" not supported for " + name +
", in call to Open\n");
}
}
return nullptr; // if debug mode is off
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
const Method &method)
{
return Open(name, accessMode, m_MPIComm, method);
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
MPI_Comm mpiComm,
const std::string methodName)
{
auto itMethod = m_Methods.find(methodName);
if (m_DebugMode == true)
{
CheckMethod(itMethod, methodName, " in call to Open\n");
}
return Open(name, accessMode, mpiComm, itMethod->second);
}
std::shared_ptr<Engine> ADIOS::Open(const std::string &name,
const std::string accessMode,
const std::string methodName)
{
return Open(name, accessMode, m_MPIComm, methodName);
}
std::shared_ptr<Engine> ADIOS::OpenFileReader(const std::string &fileName,
MPI_Comm mpiComm,
const Method &method)
{
return Open(fileName, "r", mpiComm, method);
}
std::shared_ptr<Engine> ADIOS::OpenFileReader(const std::string &name,
MPI_Comm mpiComm,
const std::string methodName)
{
auto itMethod = m_Methods.find(methodName);
if (m_DebugMode == true)
{
CheckMethod(itMethod, methodName, " in call to Open\n");
}
return Open(name, "r", m_MPIComm, itMethod->second);
}
VariableCompound &ADIOS::GetVariableCompound(const std::string &name)
{
return m_Compound.at(GetVariableIndex<void>(name));
}
void ADIOS::MonitorVariables(std::ostream &logStream)
{
logStream << "\tVariable \t Type\n";
for (auto &variablePair : m_Variables)
{
const std::string name(variablePair.first);
const std::string type(variablePair.second.first);
if (type == GetType<char>())
{
GetVariable<char>(name).Monitor(logStream);
}
else if (type == GetType<unsigned char>())
{
GetVariable<unsigned char>(name).Monitor(logStream);
}
else if (type == GetType<short>())
{
GetVariable<short>(name).Monitor(logStream);
}
else if (type == GetType<unsigned short>())
{
GetVariable<unsigned short>(name).Monitor(logStream);
}
else if (type == GetType<int>())
{
GetVariable<int>(name).Monitor(logStream);
}
else if (type == GetType<unsigned int>())
{
GetVariable<unsigned int>(name).Monitor(logStream);
}
else if (type == GetType<long int>())
{
GetVariable<long int>(name).Monitor(logStream);
}
else if (type == GetType<unsigned long int>())
{
GetVariable<unsigned long int>(name).Monitor(logStream);
}
else if (type == GetType<long long int>())
{
GetVariable<long long int>(name).Monitor(logStream);
}
else if (type == GetType<unsigned long long int>())
{
GetVariable<unsigned long long int>(name).Monitor(logStream);
}
else if (type == GetType<float>())
{
GetVariable<float>(name).Monitor(logStream);
}
else if (type == GetType<double>())
{
GetVariable<double>(name).Monitor(logStream);
}
else if (type == GetType<long double>())
{
GetVariable<long double>(name).Monitor(logStream);
}
else if (type == GetType<std::complex<float>>())
{
GetVariable<std::complex<float>>(name).Monitor(logStream);
}
else if (type == GetType<std::complex<double>>())
{
GetVariable<std::complex<double>>(name).Monitor(logStream);
}
else if (type == GetType<std::complex<long double>>())
{
GetVariable<std::complex<long double>>(name).Monitor(logStream);
}
}
}
// PRIVATE FUNCTIONS BELOW
void ADIOS::CheckVariableInput(const std::string &name,
const Dims &dimensions) const
{
if (m_DebugMode == true)
{
if (m_Variables.count(name) == 1)
{
throw std::invalid_argument(
"ERROR: variable " + name +
" already exists, in call to DefineVariable\n");
}
if (dimensions.empty() == true)
{
throw std::invalid_argument(
"ERROR: variable " + name +
" dimensions can't be empty, in call to DefineVariable\n");
}
}
}
void ADIOS::CheckVariableName(
std::map<std::string, std::pair<std::string, unsigned int>>::const_iterator
itVariable,
const std::string &name, const std::string hint) const
{
if (m_DebugMode == true)
{
if (itVariable == m_Variables.end())
{
throw std::invalid_argument("ERROR: variable " + name +
" does not exist " + hint + "\n");
}
}
}
void ADIOS::CheckMethod(std::map<std::string, Method>::const_iterator itMethod,
const std::string methodName,
const std::string hint) const
{
if (itMethod == m_Methods.end())
{
throw std::invalid_argument("ERROR: method " + methodName +
" not found " + hint + "\n");
}
}
} // end namespace adios
| 1 | 11,363 | This should still be guarded with an `#ifdef` since the DataMan code will only be supported for shared lib builds. | ornladios-ADIOS2 | cpp |
@@ -1,7 +1,7 @@
<%= "#{@plan.title}" %>
<%= "----------------------------------------------------------\n" %>
<% if @show_coversheet %>
-<%= @hash[:attribution].length > 1 ? _("Creators: ") : _('Creator:') %> <%= @hash[:attribution].join(', ') %>
+<%= @hash[:attribution].many? ? _("Creators: ") : _('Creator:') %> <%= @hash[:attribution].join(', ') %>
<%= _("Affiliation: ") + @hash[:affiliation] %>
<% if @hash[:funder].present? %>
<%= _("Template: ") + @hash[:funder] %> | 1 | <%= "#{@plan.title}" %>
<%= "----------------------------------------------------------\n" %>
<% if @show_coversheet %>
<%= @hash[:attribution].length > 1 ? _("Creators: ") : _('Creator:') %> <%= @hash[:attribution].join(', ') %>
<%= _("Affiliation: ") + @hash[:affiliation] %>
<% if @hash[:funder].present? %>
<%= _("Template: ") + @hash[:funder] %>
<% else %>
<%= _("Template: ") + @hash[:template] + @hash[:customizer] %>
<% end %>
<% if @plan.grant_number.present? %>
<%= _("Grant number: ") + @plan.grant_number %>
<% end %>
<% if @plan.description.present? %>
<%= _("Project abstract: ") %>
<%= "\t" + strip_tags(@plan.description) + "\n" %>
<% end %>
<%= _("Last modified: ") + l(@plan.updated_at.to_date, formats: :short) %>
<%= _("Copyright information:") %>
<%= "\t" + _(" The above plan creator(s) have agreed that others may use as much of the text of this plan as they would like in their own plans, and customise it as necessary. You do not need to credit the creator(s) as the source of the language used, but using any of the plan's text does not imply that the creator(s) endorse, or have any relationship to, your project or proposal") %>
<%= "----------------------------------------------------------\n" %>
<% end %>
<% @hash[:phases].each do |phase| %>
<%# Only render selected phase %>
<% if phase[:title] == @selected_phase.title %>
<%= (@hash[:phases].length > 1 ? "#{phase[:title]}" : "") %>
<% phase[:sections].each do |section| %>
<% if display_questions_and_section_headings(section, @show_sections_questions, @show_custom_sections) %>
<%= "#{section[:title]}\n" %>
<% section[:questions].each do |question| %>
<%# text in this case is an array to accomodate for option_based %>
<% if question[:text].respond_to?(:each) %>
<% question[:text].each do |txt| %>
<%= "#{strip_tags(txt.gsub(/<br\/?>/, '\n'))}\n" %>
<% end %>
<% else %>
<%= "#{strip_tags(question[:text][0].gsub(/<tr>(\s|<td>|<\/td>| )*(<\/tr>|<tr>)/,""))}\n" if question[:text].present? && question[:text][0].present? %>
<% end %>
<% answer = @plan.answer(question[:id], false) %>
<% blank = (answer.present? && answer.is_valid?) ? answer.text.gsub(/<\/?p>/, '').gsub(/<br\s?\/?>/, '\n').chomp.blank? : true %>
<% if blank && @show_unanswered %>
<%= " #{_("Question not answered.")}\n\n" %>
<% elsif !blank %>
<% if answer.question_options.length > 0 %>
<% answer.question_options.each do |opt| %>
<%= " #{opt.text}\n" %>
<% end %>
<% end %>
<%= " #{strip_tags(answer.text.gsub(/<\/?p>/, '').gsub(/<br\s?\/?>/, '\n').chomp)}\n\n" if answer.text.present? %>
<% end %>
<% end %>
<% end %>
<% end %>
<% end %>
<% end %>
<%= "----------------------------------------------------------" %>
<%= _("A Data Management Plan created using %{application_name}") % { application_name: Rails.configuration.branding[:application][:name] } %> | 1 | 18,050 | thanks for cleaning up these old length checks | DMPRoadmap-roadmap | rb |
@@ -189,7 +189,7 @@ func (e *Executor) addBuiltinAnnontations(manifests []provider.Manifest, variant
}
func (e *Executor) applyManifests(ctx context.Context, manifests []provider.Manifest) error {
- e.LogPersister.Infof("Start applying %d manifests", len(manifests))
+ e.LogPersister.Infof("Start applying %d manifests to %q namespace", len(manifests), e.config.Input.Namespace)
for _, m := range manifests {
if err := e.provider.ApplyManifest(ctx, m); err != nil {
e.LogPersister.Errorf("Failed to apply manifest: %s (%v)", m.Key.ReadableString(), err) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
provider "github.com/pipe-cd/pipe/pkg/app/piped/cloudprovider/kubernetes"
"github.com/pipe-cd/pipe/pkg/app/piped/executor"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
variantLabel = "pipecd.dev/variant" // Variant name: primary, stage, baseline
)
type Executor struct {
executor.Input
provider provider.Provider
config *config.KubernetesDeploymentSpec
}
type registerer interface {
Register(stage model.Stage, f executor.Factory) error
RegisterRollback(kind model.ApplicationKind, f executor.Factory) error
}
// Register registers this executor factory into a given registerer.
func Register(r registerer) {
f := func(in executor.Input) executor.Executor {
return &Executor{
Input: in,
}
}
r.Register(model.StageK8sSync, f)
r.Register(model.StageK8sPrimaryRollout, f)
r.Register(model.StageK8sCanaryRollout, f)
r.Register(model.StageK8sCanaryClean, f)
r.Register(model.StageK8sBaselineRollout, f)
r.Register(model.StageK8sBaselineClean, f)
r.Register(model.StageK8sTrafficRouting, f)
r.RegisterRollback(model.ApplicationKind_KUBERNETES, f)
}
func (e *Executor) Execute(sig executor.StopSignal) model.StageStatus {
e.config = e.DeploymentConfig.KubernetesDeploymentSpec
if e.config == nil {
e.LogPersister.Error("Malformed deployment configuration: missing KubernetesDeploymentSpec")
return model.StageStatus_STAGE_FAILURE
}
var (
ctx = sig.Context()
appDir = filepath.Join(e.RepoDir, e.Deployment.GitPath.Path)
)
e.provider = provider.NewProvider(e.Deployment.ApplicationName, appDir, e.RepoDir, e.Deployment.GitPath.ConfigFilename, e.config.Input, e.Logger)
e.Logger.Info("start executing kubernetes stage",
zap.String("stage-name", e.Stage.Name),
zap.String("app-dir", appDir),
)
var (
originalStatus = e.Stage.Status
status model.StageStatus
)
switch model.Stage(e.Stage.Name) {
case model.StageK8sSync:
status = e.ensureSync(ctx)
case model.StageK8sPrimaryRollout:
status = e.ensurePrimaryRollout(ctx)
case model.StageK8sCanaryRollout:
status = e.ensureCanaryRollout(ctx)
case model.StageK8sCanaryClean:
status = e.ensureCanaryClean(ctx)
case model.StageK8sBaselineRollout:
status = e.ensureBaselineRollout(ctx)
case model.StageK8sBaselineClean:
status = e.ensureBaselineClean(ctx)
case model.StageK8sTrafficRouting:
status = e.ensureTrafficRouting(ctx)
case model.StageRollback:
status = e.ensureRollback(ctx)
default:
e.LogPersister.Errorf("Unsupported stage %s for kubernetes application", e.Stage.Name)
return model.StageStatus_STAGE_FAILURE
}
return executor.DetermineStageStatus(sig.Signal(), originalStatus, status)
}
func (e *Executor) loadManifests(ctx context.Context) ([]provider.Manifest, error) {
cache := provider.AppManifestsCache{
AppID: e.Deployment.ApplicationId,
Cache: e.AppManifestsCache,
Logger: e.Logger,
}
manifests, ok := cache.Get(e.Deployment.Trigger.Commit.Hash)
if ok {
return manifests, nil
}
// When the manifests were not in the cache we have to load them.
manifests, err := e.provider.LoadManifests(ctx)
if err != nil {
return nil, err
}
cache.Put(e.Deployment.Trigger.Commit.Hash, manifests)
return manifests, nil
}
func (e *Executor) loadRunningManifests(ctx context.Context) (manifests []provider.Manifest, err error) {
runningCommit := e.Deployment.RunningCommitHash
if runningCommit == "" {
return nil, fmt.Errorf("unable to determine running commit")
}
cache := provider.AppManifestsCache{
AppID: e.Deployment.ApplicationId,
Cache: e.AppManifestsCache,
Logger: e.Logger,
}
manifests, ok := cache.Get(runningCommit)
if ok {
return manifests, nil
}
// When the manifests were not in the cache we have to load them.
var (
appDir = filepath.Join(e.RepoDir, e.Deployment.GitPath.Path)
p = provider.NewProvider(e.Deployment.ApplicationName, appDir, e.RunningRepoDir, e.Deployment.GitPath.ConfigFilename, e.config.Input, e.Logger)
)
manifests, err = p.LoadManifests(ctx)
if err != nil {
return nil, err
}
cache.Put(runningCommit, manifests)
return manifests, nil
}
func (e *Executor) addBuiltinAnnontations(manifests []provider.Manifest, variant, hash string) {
for i := range manifests {
manifests[i].AddAnnotations(map[string]string{
provider.LabelManagedBy: provider.ManagedByPiped,
provider.LabelPiped: e.PipedConfig.PipedID,
provider.LabelApplication: e.Deployment.ApplicationId,
variantLabel: variant,
provider.LabelOriginalAPIVersion: manifests[i].Key.APIVersion,
provider.LabelResourceKey: manifests[i].Key.String(),
provider.LabelCommitHash: hash,
})
}
}
func (e *Executor) applyManifests(ctx context.Context, manifests []provider.Manifest) error {
e.LogPersister.Infof("Start applying %d manifests", len(manifests))
for _, m := range manifests {
if err := e.provider.ApplyManifest(ctx, m); err != nil {
e.LogPersister.Errorf("Failed to apply manifest: %s (%v)", m.Key.ReadableString(), err)
return err
}
e.LogPersister.Successf("- applied manifest: %s", m.Key.ReadableString())
}
e.LogPersister.Successf("Successfully applied %d manifests", len(manifests))
return nil
}
func (e *Executor) deleteResources(ctx context.Context, resources []provider.ResourceKey) error {
resourcesLen := len(resources)
if resourcesLen == 0 {
e.LogPersister.Info("No resources to delete")
return nil
}
e.LogPersister.Infof("Start deleting %d resources", len(resources))
var deletedCount int
for _, k := range resources {
err := e.provider.Delete(ctx, k)
if err == nil {
e.LogPersister.Successf("- deleted resource: %s", k.ReadableString())
deletedCount++
continue
}
if errors.Is(err, provider.ErrNotFound) {
e.LogPersister.Infof("- no resource %s to delete", k.ReadableString())
deletedCount++
continue
}
e.LogPersister.Errorf("- unable to delete resource: %s (%v)", k.ReadableString(), err)
}
if deletedCount < resourcesLen {
e.LogPersister.Infof("Deleted %d/%d resources", deletedCount, resourcesLen)
return fmt.Errorf("unable to delete %d resources", resourcesLen-deletedCount)
}
e.LogPersister.Successf("Successfully deleted %d resources", len(resources))
return nil
}
func findManifests(kind, name string, manifests []provider.Manifest) []provider.Manifest {
var out []provider.Manifest
for _, m := range manifests {
if m.Key.Kind != kind {
continue
}
if name != "" && m.Key.Name != name {
continue
}
out = append(out, m)
}
return out
}
func findConfigMapManifests(manifests []provider.Manifest) []provider.Manifest {
var out []provider.Manifest
for _, m := range manifests {
if !m.Key.IsConfigMap() {
continue
}
out = append(out, m)
}
return out
}
func findSecretManifests(manifests []provider.Manifest) []provider.Manifest {
var out []provider.Manifest
for _, m := range manifests {
if !m.Key.IsSecret() {
continue
}
out = append(out, m)
}
return out
}
func findWorkloadManifests(manifests []provider.Manifest, refs []config.K8sResourceReference) []provider.Manifest {
if len(refs) == 0 {
return findManifests(provider.KindDeployment, "", manifests)
}
workloads := make([]provider.Manifest, 0)
for _, ref := range refs {
kind := provider.KindDeployment
if ref.Kind != "" {
kind = ref.Kind
}
ms := findManifests(kind, ref.Name, manifests)
workloads = append(workloads, ms...)
}
return workloads
}
func duplicateManifests(manifests []provider.Manifest, nameSuffix string) []provider.Manifest {
out := make([]provider.Manifest, 0, len(manifests))
for _, m := range manifests {
out = append(out, duplicateManifest(m, nameSuffix))
}
return out
}
func duplicateManifest(m provider.Manifest, nameSuffix string) provider.Manifest {
name := makeSuffixedName(m.Key.Name, nameSuffix)
return m.Duplicate(name)
}
func generateVariantServiceManifests(services []provider.Manifest, variant, nameSuffix string) ([]provider.Manifest, error) {
manifests := make([]provider.Manifest, 0, len(services))
updateService := func(s *corev1.Service) {
s.Name = makeSuffixedName(s.Name, nameSuffix)
// Currently, we suppose that all generated services should be ClusterIP.
s.Spec.Type = corev1.ServiceTypeClusterIP
// Append the variant label to the selector
// to ensure that the generated service is using only workloads of this variant.
if s.Spec.Selector == nil {
s.Spec.Selector = map[string]string{}
}
s.Spec.Selector[variantLabel] = variant
// Empty all unneeded fields.
s.Spec.ExternalIPs = nil
s.Spec.LoadBalancerIP = ""
s.Spec.LoadBalancerSourceRanges = nil
}
for _, m := range services {
s := &corev1.Service{}
if err := m.ConvertToStructuredObject(s); err != nil {
return nil, err
}
updateService(s)
manifest, err := provider.ParseFromStructuredObject(s)
if err != nil {
return nil, fmt.Errorf("failed to parse Service object to Manifest: %w", err)
}
manifests = append(manifests, manifest)
}
return manifests, nil
}
func generateVariantWorkloadManifests(workloads, configmaps, secrets []provider.Manifest, variant, nameSuffix string, replicasCalculator func(*int32) int32) ([]provider.Manifest, error) {
manifests := make([]provider.Manifest, 0, len(workloads))
cmNames := make(map[string]struct{}, len(configmaps))
for i := range configmaps {
cmNames[configmaps[i].Key.Name] = struct{}{}
}
secretNames := make(map[string]struct{}, len(secrets))
for i := range secrets {
secretNames[secrets[i].Key.Name] = struct{}{}
}
updatePod := func(pod *corev1.PodTemplateSpec) {
// Add variant labels.
if pod.Labels == nil {
pod.Labels = map[string]string{}
}
pod.Labels[variantLabel] = variant
// Update volumes to use canary's ConfigMaps and Secrets.
for i := range pod.Spec.Volumes {
if cm := pod.Spec.Volumes[i].ConfigMap; cm != nil {
if _, ok := cmNames[cm.Name]; ok {
cm.Name = makeSuffixedName(cm.Name, nameSuffix)
}
}
if s := pod.Spec.Volumes[i].Secret; s != nil {
if _, ok := secretNames[s.SecretName]; ok {
s.SecretName = makeSuffixedName(s.SecretName, nameSuffix)
}
}
}
}
updateDeployment := func(d *appsv1.Deployment) {
d.Name = makeSuffixedName(d.Name, nameSuffix)
if replicasCalculator != nil {
replicas := replicasCalculator(d.Spec.Replicas)
d.Spec.Replicas = &replicas
}
d.Spec.Selector = metav1.AddLabelToSelector(d.Spec.Selector, variantLabel, variant)
updatePod(&d.Spec.Template)
}
for _, m := range workloads {
switch m.Key.Kind {
case provider.KindDeployment:
d := &appsv1.Deployment{}
if err := m.ConvertToStructuredObject(d); err != nil {
return nil, err
}
updateDeployment(d)
manifest, err := provider.ParseFromStructuredObject(d)
if err != nil {
return nil, err
}
manifests = append(manifests, manifest)
default:
return nil, fmt.Errorf("unsupported workload kind %s", m.Key.Kind)
}
}
return manifests, nil
}
func checkVariantSelectorInWorkload(m provider.Manifest, variant string) error {
var (
matchLabelsFields = []string{"spec", "selector", "matchLabels"}
labelsFields = []string{"spec", "template", "metadata", "labels"}
)
matchLabels, err := m.GetNestedStringMap(matchLabelsFields...)
if err != nil {
return err
}
value, ok := matchLabels[variantLabel]
if !ok {
return fmt.Errorf("missing %s key in spec.selector.matchLabels", variantLabel)
}
if value != variant {
return fmt.Errorf("require %s but got %s for %s key in %s", variant, value, variantLabel, strings.Join(matchLabelsFields, "."))
}
labels, err := m.GetNestedStringMap(labelsFields...)
if err != nil {
return err
}
value, ok = labels[variantLabel]
if !ok {
return fmt.Errorf("missing %s key in spec.template.metadata.labels", variantLabel)
}
if value != variant {
return fmt.Errorf("require %s but got %s for %s key in %s", variant, value, variantLabel, strings.Join(labelsFields, "."))
}
return nil
}
func ensureVariantSelectorInWorkload(m provider.Manifest, variant string) error {
variantMap := map[string]string{
variantLabel: variant,
}
if err := m.AddStringMapValues(variantMap, "spec", "selector", "matchLabels"); err != nil {
return err
}
return m.AddStringMapValues(variantMap, "spec", "template", "metadata", "labels")
}
func makeSuffixedName(name, suffix string) string {
if suffix != "" {
return name + "-" + suffix
}
return name
}
| 1 | 10,720 | What will be logged when the `Input.Namespace` was not specified? | pipe-cd-pipe | go |
@@ -102,6 +102,9 @@ public class PMDParameters {
@Parameter(names = "-cache", description = "Specify the location of the cache file for incremental analysis.")
private String cacheLocation = null;
+ @Parameter(names = "-fix", description = "Attempt to automatically fix rule violations found on source files.")
+ private boolean autoFixes = false;
+
// this has to be a public static class, so that JCommander can use it!
public static class PropertyConverter implements IStringConverter<Properties> {
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.cli;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import net.sourceforge.pmd.PMDConfiguration;
import net.sourceforge.pmd.RulePriority;
import net.sourceforge.pmd.lang.LanguageRegistry;
import net.sourceforge.pmd.lang.LanguageVersion;
import com.beust.jcommander.IStringConverter;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.validators.PositiveInteger;
public class PMDParameters {
@Parameter(names = { "-rulesets", "-R" }, description = "Comma separated list of ruleset names to use.",
required = true)
private String rulesets;
@Parameter(names = { "-uri", "-u" }, description = "Database URI for sources.", required = false)
private String uri;
@Parameter(names = { "-dir", "-d" }, description = "Root directory for sources.", required = false)
private String sourceDir;
@Parameter(names = { "-filelist" }, description = "Path to a file containing a list of files to analyze.",
required = false)
private String fileListPath;
@Parameter(names = { "-format", "-f" }, description = "Report format type.")
private String format = "text"; // Enhance to support other usage
@Parameter(names = { "-debug", "-verbose", "-D", "-V" }, description = "Debug mode.")
private boolean debug = false;
@Parameter(names = { "-help", "-h", "-H" }, description = "Display help on usage.", help = true)
private boolean help = false;
@Parameter(names = { "-encoding", "-e" },
description = "Specifies the character set encoding of the source code files PMD is reading (i.e., UTF-8).")
private String encoding = "UTF-8";
@Parameter(names = { "-threads", "-t" }, description = "Sets the number of threads used by PMD.",
validateWith = PositiveInteger.class)
private Integer threads = 1;
@Parameter(names = { "-benchmark", "-b" },
description = "Benchmark mode - output a benchmark report upon completion; default to System.err.")
private boolean benchmark = false;
@Parameter(names = { "-stress", "-S" }, description = "Performs a stress test.")
private boolean stress = false;
@Parameter(names = "-shortnames", description = "Prints shortened filenames in the report.")
private boolean shortnames = false;
@Parameter(names = "-showsuppressed", description = "Report should show suppressed rule violations.")
private boolean showsuppressed = false;
@Parameter(names = "-suppressmarker",
description = "Specifies the string that marks the a line which PMD should ignore; default is NOPMD.")
private String suppressmarker = "NOPMD";
@Parameter(names = { "-minimumpriority", "-min" },
description = "Rule priority threshold; rules with lower priority than configured here won't be used. Default is '5' which is the lowest priority.",
converter = RulePriorityConverter.class)
private RulePriority minimumPriority = RulePriority.LOW;
@Parameter(names = { "-property", "-P" }, description = "{name}={value}: Define a property for the report format.",
converter = PropertyConverter.class)
private List<Properties> properties = new ArrayList<>();
@Parameter(names = { "-reportfile", "-r" }, description = "Sends report output to a file; default to System.out.")
private String reportfile = null;
@Parameter(names = { "-version", "-v" }, description = "Specify version of a language PMD should use.")
private String version = null;
@Parameter(names = { "-language", "-l" }, description = "Specify a language PMD should use.")
private String language = null;
@Parameter(names = "-auxclasspath",
description = "Specifies the classpath for libraries used by the source code. This is used by the type resolution. Alternatively, a 'file://' URL to a text file containing path elements on consecutive lines can be specified.")
private String auxclasspath;
@Parameter(names = { "-failOnViolation", "--failOnViolation" }, arity = 1,
description = "By default PMD exits with status 4 if violations are found. Disable this option with '-failOnViolation false' to exit with 0 instead and just write the report.")
private boolean failOnViolation = true;
@Parameter(names = "-norulesetcompatibility",
description = "Disable the ruleset compatibility filter. The filter is active by default and tries automatically 'fix' old ruleset files with old rule names")
private boolean noRuleSetCompatibility = false;
@Parameter(names = "-cache", description = "Specify the location of the cache file for incremental analysis.")
private String cacheLocation = null;
// this has to be a public static class, so that JCommander can use it!
public static class PropertyConverter implements IStringConverter<Properties> {
private static final char SEPARATOR = '=';
@Override
public Properties convert(String value) {
int indexOfSeparator = value.indexOf(SEPARATOR);
if (indexOfSeparator < 0) {
throw new ParameterException(
"Property name must be separated with an = sign from it value: name=value.");
}
String propertyName = value.substring(0, indexOfSeparator);
String propertyValue = value.substring(indexOfSeparator + 1);
Properties properties = new Properties();
properties.put(propertyName, propertyValue);
return properties;
}
}
// this has to be a public static class, so that JCommander can use it!
public static class RulePriorityConverter implements IStringConverter<RulePriority> {
public int validate(String value) throws ParameterException {
int minPriorityValue = Integer.parseInt(value);
if (minPriorityValue < 1 || minPriorityValue > 5) {
throw new ParameterException(
"Priority values can only be integer value, between 1 and 5," + value + " is not valid");
}
return minPriorityValue;
}
@Override
public RulePriority convert(String value) {
return RulePriority.valueOf(validate(value));
}
}
public static PMDConfiguration transformParametersIntoConfiguration(PMDParameters params) {
if (null == params.getSourceDir() && null == params.getUri() && null == params.getFileListPath()) {
throw new IllegalArgumentException(
"Please provide a parameter for source root directory (-dir or -d), database URI (-uri or -u), or file list path (-filelist).");
}
PMDConfiguration configuration = new PMDConfiguration();
configuration.setInputPaths(params.getSourceDir());
configuration.setInputFilePath(params.getFileListPath());
configuration.setInputUri(params.getUri());
configuration.setReportFormat(params.getFormat());
configuration.setBenchmark(params.isBenchmark());
configuration.setDebug(params.isDebug());
configuration.setMinimumPriority(params.getMinimumPriority());
configuration.setReportFile(params.getReportfile());
configuration.setReportProperties(params.getProperties());
configuration.setReportShortNames(params.isShortnames());
configuration.setRuleSets(params.getRulesets());
configuration.setRuleSetFactoryCompatibilityEnabled(!params.noRuleSetCompatibility);
configuration.setShowSuppressedViolations(params.isShowsuppressed());
configuration.setSourceEncoding(params.getEncoding());
configuration.setStressTest(params.isStress());
configuration.setSuppressMarker(params.getSuppressmarker());
configuration.setThreads(params.getThreads());
configuration.setFailOnViolation(params.isFailOnViolation());
configuration.setAnalysisCacheLocation(params.cacheLocation);
LanguageVersion languageVersion = LanguageRegistry
.findLanguageVersionByTerseName(params.getLanguage() + " " + params.getVersion());
if (languageVersion != null) {
configuration.getLanguageVersionDiscoverer().setDefaultLanguageVersion(languageVersion);
}
try {
configuration.prependClasspath(params.getAuxclasspath());
} catch (IOException e) {
throw new IllegalArgumentException("Invalid auxiliary classpath: " + e.getMessage(), e);
}
return configuration;
}
public boolean isDebug() {
return debug;
}
public boolean isHelp() {
return help;
}
public String getEncoding() {
return encoding;
}
public Integer getThreads() {
return threads;
}
public boolean isBenchmark() {
return benchmark;
}
public boolean isStress() {
return stress;
}
public boolean isShortnames() {
return shortnames;
}
public boolean isShowsuppressed() {
return showsuppressed;
}
public String getSuppressmarker() {
return suppressmarker;
}
public RulePriority getMinimumPriority() {
return minimumPriority;
}
public Properties getProperties() {
Properties result = new Properties();
for (Properties p : properties) {
result.putAll(p);
}
return result;
}
public String getReportfile() {
return reportfile;
}
public String getVersion() {
if (version != null) {
return version;
}
return LanguageRegistry.findLanguageByTerseName(getLanguage()).getDefaultVersion().getVersion();
}
public String getLanguage() {
return language != null ? language : LanguageRegistry.getDefaultLanguage().getTerseName();
}
public String getAuxclasspath() {
return auxclasspath;
}
public String getRulesets() {
return rulesets;
}
public String getSourceDir() {
return sourceDir;
}
public String getFileListPath() {
return fileListPath;
}
public String getFormat() {
return format;
}
public boolean isFailOnViolation() {
return failOnViolation;
}
/**
* @return the uri alternative to source directory.
*/
public String getUri() {
return uri;
}
/**
* @param uri
* the uri specifying the source directory.
*/
public void setUri(String uri) {
this.uri = uri;
}
}
| 1 | 13,427 | I wouldn't expose this until the feature is ready. We are most likely to cut releases while still in development | pmd-pmd | java |
@@ -47,5 +47,6 @@ axe.imports = {
CssSelectorParser: require('css-selector-parser').CssSelectorParser,
doT: require('@deque/dot'),
emojiRegexText: require('emoji-regex'),
- memoize: require('memoizee')
+ memoize: require('memoizee'),
+ ariaQuery: require('aria-query')
}; | 1 | /* global axe */
/**
* Note:
* This file is run via browserify to pull in the required dependencies.
* See - './build/imports-generator'
*/
/**
* Polyfill `Promise`
* Reference: https://www.npmjs.com/package/es6-promise
*/
if (!('Promise' in window)) {
require('es6-promise').polyfill();
}
/**
* Polyfill required TypedArray and functions
* Reference https://github.com/zloirock/core-js/
*/
if (!('Uint32Array' in window)) {
require('core-js/features/typed-array/uint32-array');
}
if (window.Uint32Array) {
if (!('some' in window.Uint32Array.prototype)) {
require('core-js/features/typed-array/some');
}
if (!('reduce' in window.Uint32Array.prototype)) {
require('core-js/features/typed-array/reduce');
}
}
/**
* Polyfill `WeakMap`
* Reference: https://github.com/polygonplanet/weakmap-polyfill
*/
require('weakmap-polyfill');
/**
* Namespace `axe.imports` which holds required external dependencies
*
* @namespace imports
* @memberof axe
*/
axe.imports = {
axios: require('axios'),
CssSelectorParser: require('css-selector-parser').CssSelectorParser,
doT: require('@deque/dot'),
emojiRegexText: require('emoji-regex'),
memoize: require('memoizee')
};
| 1 | 15,304 | I don't think we should pull aria-query into axe-core at this point. There's a much simpler fix for this. Please don't overthink it. | dequelabs-axe-core | js |
@@ -86,18 +86,8 @@ namespace OpenTelemetry
return value;
}
- /// <inheritdoc/>
- public void Dispose()
- {
- if (!this.disposed)
- {
- Slot.Set(this.previousValue);
- this.disposed = true;
- }
- }
-
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal static int IncrementIfTriggered()
+ public static int IncrementIfTriggered()
{
var value = Slot.Get();
| 1 | // <copyright file="SuppressInstrumentationScope.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Runtime.CompilerServices;
using OpenTelemetry.Context;
namespace OpenTelemetry
{
public sealed class SuppressInstrumentationScope : IDisposable
{
// An integer value which controls whether instrumentation should be suppressed (disabled).
// * 0: instrumentation is not suppressed
// * [int.MinValue, -1]: instrumentation is always suppressed
// * [1, int.MaxValue]: instrumentation is suppressed in a reference-counting mode
private static readonly RuntimeContextSlot<int> Slot = RuntimeContext.RegisterSlot<int>("otel.suppress_instrumentation");
private readonly int previousValue;
private bool disposed;
internal SuppressInstrumentationScope(bool value = true)
{
this.previousValue = Slot.Get();
Slot.Set(value ? -1 : 0);
}
internal static bool IsSuppressed => Slot.Get() != 0;
/// <summary>
/// Begins a new scope in which instrumentation is suppressed (disabled).
/// </summary>
/// <param name="value">Value indicating whether to suppress instrumentation.</param>
/// <returns>Object to dispose to end the scope.</returns>
/// <remarks>
/// This is typically used to prevent infinite loops created by
/// collection of internal operations, such as exporting traces over HTTP.
/// <code>
/// public override async Task<ExportResult> ExportAsync(
/// IEnumerable<Activity> batch,
/// CancellationToken cancellationToken)
/// {
/// using (SuppressInstrumentationScope.Begin())
/// {
/// // Instrumentation is suppressed (i.e., Sdk.SuppressInstrumentation == true)
/// }
///
/// // Instrumentation is not suppressed (i.e., Sdk.SuppressInstrumentation == false)
/// }
/// </code>
/// </remarks>
public static IDisposable Begin(bool value = true)
{
return new SuppressInstrumentationScope(value);
}
/// <summary>
/// Enters suppression mode.
/// If suppression mode is enabled (slot is a negative integer), do nothing.
/// If suppression mode is not enabled (slot is zero), enter reference-counting suppression mode.
/// If suppression mode is enabled (slot is a positive integer), increment the ref count.
/// </summary>
/// <returns>The updated suppression slot value.</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int Enter()
{
var value = Slot.Get();
if (value >= 0)
{
Slot.Set(++value);
}
return value;
}
/// <inheritdoc/>
public void Dispose()
{
if (!this.disposed)
{
Slot.Set(this.previousValue);
this.disposed = true;
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static int IncrementIfTriggered()
{
var value = Slot.Get();
if (value > 0)
{
Slot.Set(++value);
}
return value;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static int DecrementIfTriggered()
{
var value = Slot.Get();
if (value > 0)
{
Slot.Set(--value);
}
return value;
}
}
}
| 1 | 19,377 | @ejsmith What if instead of making these two methods public we made it a partial class and moved them to a separate file so that file could be part of the include list? I bring it up just because I don't think it is a very nice API that was ever intended to be public? I think they are only called by a couple of other very internal-y spots. /cc @reyang | open-telemetry-opentelemetry-dotnet | .cs |
@@ -283,9 +283,12 @@ public class ExecuteFlowAction implements TriggerAction {
actions.add(killAct);
}
}
- Trigger slaTrigger =
- new Trigger("azkaban_sla", "azkaban", triggerCond, expireCond,
- actions);
+ Trigger slaTrigger = new Trigger.TriggerBuilder("azkaban_sla",
+ "azkaban",
+ triggerCond,
+ expireCond,
+ actions).build();
+
slaTrigger.getInfo().put("monitored.finished.execution",
String.valueOf(execId));
slaTrigger.setResetOnTrigger(false); | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.executor.ExecutorManagerException;
import azkaban.flow.Flow;
import azkaban.project.Project;
import azkaban.project.ProjectManager;
import azkaban.sla.SlaOption;
import azkaban.trigger.Condition;
import azkaban.trigger.ConditionChecker;
import azkaban.trigger.Trigger;
import azkaban.trigger.TriggerAction;
import azkaban.trigger.TriggerManager;
public class ExecuteFlowAction implements TriggerAction {
public static final String type = "ExecuteFlowAction";
public static final String EXEC_ID = "ExecuteFlowAction.execid";
private static ExecutorManagerAdapter executorManager;
private static TriggerManager triggerManager;
private String actionId;
private int projectId;
private String projectName;
private String flowName;
private String submitUser;
private static ProjectManager projectManager;
private ExecutionOptions executionOptions = new ExecutionOptions();
private List<SlaOption> slaOptions;
private static Logger logger = Logger.getLogger(ExecuteFlowAction.class);
public ExecuteFlowAction(String actionId, int projectId, String projectName,
String flowName, String submitUser, ExecutionOptions executionOptions,
List<SlaOption> slaOptions) {
this.actionId = actionId;
this.projectId = projectId;
this.projectName = projectName;
this.flowName = flowName;
this.submitUser = submitUser;
this.executionOptions = executionOptions;
this.slaOptions = slaOptions;
}
public static void setLogger(Logger logger) {
ExecuteFlowAction.logger = logger;
}
public String getProjectName() {
return projectName;
}
public int getProjectId() {
return projectId;
}
protected void setProjectId(int projectId) {
this.projectId = projectId;
}
public String getFlowName() {
return flowName;
}
protected void setFlowName(String flowName) {
this.flowName = flowName;
}
public String getSubmitUser() {
return submitUser;
}
protected void setSubmitUser(String submitUser) {
this.submitUser = submitUser;
}
public ExecutionOptions getExecutionOptions() {
return executionOptions;
}
protected void setExecutionOptions(ExecutionOptions executionOptions) {
this.executionOptions = executionOptions;
}
public List<SlaOption> getSlaOptions() {
return slaOptions;
}
protected void setSlaOptions(List<SlaOption> slaOptions) {
this.slaOptions = slaOptions;
}
public static ExecutorManagerAdapter getExecutorManager() {
return executorManager;
}
public static void setExecutorManager(ExecutorManagerAdapter executorManager) {
ExecuteFlowAction.executorManager = executorManager;
}
public static TriggerManager getTriggerManager() {
return triggerManager;
}
public static void setTriggerManager(TriggerManager triggerManager) {
ExecuteFlowAction.triggerManager = triggerManager;
}
public static ProjectManager getProjectManager() {
return projectManager;
}
public static void setProjectManager(ProjectManager projectManager) {
ExecuteFlowAction.projectManager = projectManager;
}
@Override
public String getType() {
return type;
}
@SuppressWarnings("unchecked")
@Override
public TriggerAction fromJson(Object obj) {
return createFromJson((HashMap<String, Object>) obj);
}
@SuppressWarnings("unchecked")
public static TriggerAction createFromJson(HashMap<String, Object> obj) {
Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
String objType = (String) jsonObj.get("type");
if (!objType.equals(type)) {
throw new RuntimeException("Cannot create action of " + type + " from "
+ objType);
}
String actionId = (String) jsonObj.get("actionId");
int projectId = Integer.valueOf((String) jsonObj.get("projectId"));
String projectName = (String) jsonObj.get("projectName");
String flowName = (String) jsonObj.get("flowName");
String submitUser = (String) jsonObj.get("submitUser");
ExecutionOptions executionOptions = null;
if (jsonObj.containsKey("executionOptions")) {
executionOptions =
ExecutionOptions.createFromObject(jsonObj.get("executionOptions"));
}
List<SlaOption> slaOptions = null;
if (jsonObj.containsKey("slaOptions")) {
slaOptions = new ArrayList<SlaOption>();
List<Object> slaOptionsObj = (List<Object>) jsonObj.get("slaOptions");
for (Object slaObj : slaOptionsObj) {
slaOptions.add(SlaOption.fromObject(slaObj));
}
}
return new ExecuteFlowAction(actionId, projectId, projectName, flowName,
submitUser, executionOptions, slaOptions);
}
@Override
public Object toJson() {
Map<String, Object> jsonObj = new HashMap<String, Object>();
jsonObj.put("actionId", actionId);
jsonObj.put("type", type);
jsonObj.put("projectId", String.valueOf(projectId));
jsonObj.put("projectName", projectName);
jsonObj.put("flowName", flowName);
jsonObj.put("submitUser", submitUser);
if (executionOptions != null) {
jsonObj.put("executionOptions", executionOptions.toObject());
}
if (slaOptions != null) {
List<Object> slaOptionsObj = new ArrayList<Object>();
for (SlaOption sla : slaOptions) {
slaOptionsObj.add(sla.toObject());
}
jsonObj.put("slaOptions", slaOptionsObj);
}
return jsonObj;
}
@Override
public void doAction() throws Exception {
if (projectManager == null || executorManager == null) {
throw new Exception("ExecuteFlowAction not properly initialized!");
}
Project project = projectManager.getProject(projectId);
if (project == null) {
logger.error("Project to execute " + projectId + " does not exist!");
throw new RuntimeException("Error finding the project to execute "
+ projectId);
}
Flow flow = project.getFlow(flowName);
if (flow == null) {
logger.error("Flow " + flowName + " cannot be found in project "
+ project.getName());
throw new RuntimeException("Error finding the flow to execute "
+ flowName);
}
ExecutableFlow exflow = new ExecutableFlow(project, flow);
exflow.setSubmitUser(submitUser);
exflow.addAllProxyUsers(project.getProxyUsers());
if (executionOptions == null) {
executionOptions = new ExecutionOptions();
}
if (!executionOptions.isFailureEmailsOverridden()) {
executionOptions.setFailureEmails(flow.getFailureEmails());
}
if (!executionOptions.isSuccessEmailsOverridden()) {
executionOptions.setSuccessEmails(flow.getSuccessEmails());
}
exflow.setExecutionOptions(executionOptions);
try {
logger.info("Invoking flow " + project.getName() + "." + flowName);
executorManager.submitExecutableFlow(exflow, submitUser);
logger.info("Invoked flow " + project.getName() + "." + flowName);
} catch (ExecutorManagerException e) {
throw new RuntimeException(e);
}
// deal with sla
if (slaOptions != null && slaOptions.size() > 0) {
int execId = exflow.getExecutionId();
for (SlaOption sla : slaOptions) {
logger.info("Adding sla trigger " + sla.toString() + " to execution "
+ execId);
SlaChecker slaFailChecker =
new SlaChecker("slaFailChecker", sla, execId);
Map<String, ConditionChecker> slaCheckers =
new HashMap<String, ConditionChecker>();
slaCheckers.put(slaFailChecker.getId(), slaFailChecker);
Condition triggerCond =
new Condition(slaCheckers, slaFailChecker.getId()
+ ".isSlaFailed()");
// if whole flow finish before violate sla, just expire
SlaChecker slaPassChecker =
new SlaChecker("slaPassChecker", sla, execId);
Map<String, ConditionChecker> expireCheckers =
new HashMap<String, ConditionChecker>();
expireCheckers.put(slaPassChecker.getId(), slaPassChecker);
Condition expireCond =
new Condition(expireCheckers, slaPassChecker.getId()
+ ".isSlaPassed()");
List<TriggerAction> actions = new ArrayList<TriggerAction>();
List<String> slaActions = sla.getActions();
for (String act : slaActions) {
if (act.equals(SlaOption.ACTION_ALERT)) {
SlaAlertAction slaAlert =
new SlaAlertAction("slaAlert", sla, execId);
actions.add(slaAlert);
} else if (act.equals(SlaOption.ACTION_CANCEL_FLOW)) {
KillExecutionAction killAct =
new KillExecutionAction("killExecution", execId);
actions.add(killAct);
}
}
Trigger slaTrigger =
new Trigger("azkaban_sla", "azkaban", triggerCond, expireCond,
actions);
slaTrigger.getInfo().put("monitored.finished.execution",
String.valueOf(execId));
slaTrigger.setResetOnTrigger(false);
slaTrigger.setResetOnExpire(false);
logger.info("Ready to put in the sla trigger");
triggerManager.insertTrigger(slaTrigger);
logger.info("Sla inserted.");
}
}
}
@Override
public String getDescription() {
return "Execute flow " + getFlowName() + " from project "
+ getProjectName();
}
@Override
public void setContext(Map<String, Object> context) {
}
@Override
public String getId() {
return actionId;
}
}
| 1 | 13,122 | would be good to turn these into constants. | azkaban-azkaban | java |
@@ -158,8 +158,8 @@ func (g *gen) frame() []byte {
return nil
}
var buf bytes.Buffer
- buf.WriteString("// Code generated by gowire. DO NOT EDIT.\n\n")
- buf.WriteString("//go:generate gowire\n")
+ buf.WriteString("// Code generated by wire. DO NOT EDIT.\n\n")
+ buf.WriteString("//go:generate wire\n")
buf.WriteString("//+build !wireinject\n\n")
buf.WriteString("package ")
buf.WriteString(g.prog.Package(g.currPackage).Pkg.Name()) | 1 | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package wire provides compile-time dependency injection logic as a
// Go library.
package wire
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/printer"
"go/token"
"go/types"
"path/filepath"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/loader"
)
// Generate performs dependency injection for a single package,
// returning the gofmt'd Go source code.
func Generate(bctx *build.Context, wd string, pkg string) ([]byte, []error) {
prog, errs := load(bctx, wd, []string{pkg})
if len(errs) > 0 {
return nil, errs
}
if len(prog.InitialPackages()) != 1 {
// This is more of a violated precondition than anything else.
return nil, []error{fmt.Errorf("load: got %d packages", len(prog.InitialPackages()))}
}
pkgInfo := prog.InitialPackages()[0]
g := newGen(prog, pkgInfo.Pkg.Path())
injectorFiles, errs := generateInjectors(g, pkgInfo)
if len(errs) > 0 {
return nil, errs
}
copyNonInjectorDecls(g, injectorFiles, &pkgInfo.Info)
goSrc := g.frame()
fmtSrc, err := format.Source(goSrc)
if err != nil {
// This is likely a bug from a poorly generated source file.
// Return an error and the unformatted source.
return goSrc, []error{err}
}
return fmtSrc, nil
}
// generateInjectors generates the injectors for a given package.
func generateInjectors(g *gen, pkgInfo *loader.PackageInfo) (injectorFiles []*ast.File, _ []error) {
oc := newObjectCache(g.prog)
injectorFiles = make([]*ast.File, 0, len(pkgInfo.Files))
ec := new(errorCollector)
for _, f := range pkgInfo.Files {
for _, decl := range f.Decls {
fn, ok := decl.(*ast.FuncDecl)
if !ok {
continue
}
buildCall := isInjector(&pkgInfo.Info, fn)
if buildCall == nil {
continue
}
if len(injectorFiles) == 0 || injectorFiles[len(injectorFiles)-1] != f {
// This is the first injector generated for this file.
// Write a file header.
name := filepath.Base(g.prog.Fset.File(f.Pos()).Name())
g.p("// Injectors from %s:\n\n", name)
injectorFiles = append(injectorFiles, f)
}
set, errs := oc.processNewSet(pkgInfo, buildCall)
if len(errs) > 0 {
ec.add(notePositionAll(g.prog.Fset.Position(fn.Pos()), errs)...)
continue
}
sig := pkgInfo.ObjectOf(fn.Name).Type().(*types.Signature)
if errs := g.inject(fn.Pos(), fn.Name.Name, sig, set); len(errs) > 0 {
ec.add(errs...)
continue
}
}
}
if len(ec.errors) > 0 {
return nil, ec.errors
}
return injectorFiles, nil
}
// copyNonInjectorDecls copies any non-injector declarations from the
// given files into the generated output.
func copyNonInjectorDecls(g *gen, files []*ast.File, info *types.Info) {
for _, f := range files {
name := filepath.Base(g.prog.Fset.File(f.Pos()).Name())
first := true
for _, decl := range f.Decls {
switch decl := decl.(type) {
case *ast.FuncDecl:
if isInjector(info, decl) != nil {
continue
}
case *ast.GenDecl:
if decl.Tok == token.IMPORT {
continue
}
default:
continue
}
if first {
g.p("// %s:\n\n", name)
first = false
}
// TODO(light): Add line number at top of each declaration.
g.writeAST(info, decl)
g.p("\n\n")
}
}
}
// gen is the file-wide generator state.
type gen struct {
currPackage string
buf bytes.Buffer
imports map[string]string
values map[ast.Expr]string
prog *loader.Program // for positions and determining package names
}
func newGen(prog *loader.Program, pkg string) *gen {
return &gen{
currPackage: pkg,
imports: make(map[string]string),
values: make(map[ast.Expr]string),
prog: prog,
}
}
// frame bakes the built up source body into an unformatted Go source file.
func (g *gen) frame() []byte {
if g.buf.Len() == 0 {
return nil
}
var buf bytes.Buffer
buf.WriteString("// Code generated by gowire. DO NOT EDIT.\n\n")
buf.WriteString("//go:generate gowire\n")
buf.WriteString("//+build !wireinject\n\n")
buf.WriteString("package ")
buf.WriteString(g.prog.Package(g.currPackage).Pkg.Name())
buf.WriteString("\n\n")
if len(g.imports) > 0 {
buf.WriteString("import (\n")
imps := make([]string, 0, len(g.imports))
for path := range g.imports {
imps = append(imps, path)
}
sort.Strings(imps)
for _, path := range imps {
// TODO(light): Omit the local package identifier if it matches
// the package name.
fmt.Fprintf(&buf, "\t%s %q\n", g.imports[path], path)
}
buf.WriteString(")\n\n")
}
buf.Write(g.buf.Bytes())
return buf.Bytes()
}
// inject emits the code for an injector.
func (g *gen) inject(pos token.Pos, name string, sig *types.Signature, set *ProviderSet) []error {
injectSig, err := funcOutput(sig)
if err != nil {
return []error{notePosition(g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: %v", name, err))}
}
params := sig.Params()
given := make([]types.Type, params.Len())
for i := 0; i < params.Len(); i++ {
given[i] = params.At(i).Type()
}
calls, errs := solve(g.prog.Fset, injectSig.out, given, set)
if len(errs) > 0 {
return mapErrors(errs, func(e error) error {
if w, ok := e.(*wireErr); ok {
return notePosition(w.position, fmt.Errorf("inject %s: %v", name, w.error))
}
return notePosition(g.prog.Fset.Position(pos), fmt.Errorf("inject %s: %v", name, e))
})
}
type pendingVar struct {
name string
expr ast.Expr
typeInfo *types.Info
}
var pendingVars []pendingVar
ec := new(errorCollector)
for i := range calls {
c := &calls[i]
if c.hasCleanup && !injectSig.cleanup {
ts := types.TypeString(c.out, nil)
ec.add(notePosition(
g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: provider for %s returns cleanup but injection does not return cleanup function", name, ts)))
}
if c.hasErr && !injectSig.err {
ts := types.TypeString(c.out, nil)
ec.add(notePosition(
g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: provider for %s returns error but injection not allowed to fail", name, ts)))
}
if c.kind == valueExpr {
if err := accessibleFrom(c.valueTypeInfo, c.valueExpr, g.currPackage); err != nil {
// TODO(light): Display line number of value expression.
ts := types.TypeString(c.out, nil)
ec.add(notePosition(
g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: value %s can't be used: %v", name, ts, err)))
}
if g.values[c.valueExpr] == "" {
t := c.valueTypeInfo.TypeOf(c.valueExpr)
name := disambiguate("_wire"+export(typeVariableName(t))+"Value", g.nameInFileScope)
g.values[c.valueExpr] = name
pendingVars = append(pendingVars, pendingVar{
name: name,
expr: c.valueExpr,
typeInfo: c.valueTypeInfo,
})
}
}
}
if len(ec.errors) > 0 {
return ec.errors
}
// Perform one pass to collect all imports, followed by the real pass.
injectPass(name, params, injectSig, calls, &injectorGen{
g: g,
errVar: disambiguate("err", g.nameInFileScope),
discard: true,
})
injectPass(name, params, injectSig, calls, &injectorGen{
g: g,
errVar: disambiguate("err", g.nameInFileScope),
discard: false,
})
if len(pendingVars) > 0 {
g.p("var (\n")
for _, pv := range pendingVars {
g.p("\t%s = ", pv.name)
g.writeAST(pv.typeInfo, pv.expr)
g.p("\n")
}
g.p(")\n\n")
}
return nil
}
// rewritePkgRefs rewrites any package references in an AST into references for the
// generated package.
func (g *gen) rewritePkgRefs(info *types.Info, node ast.Node) ast.Node {
start, end := node.Pos(), node.End()
node = copyAST(node)
// First, rewrite all package names. This lets us know all the
// potentially colliding identifiers.
node = astutil.Apply(node, func(c *astutil.Cursor) bool {
switch node := c.Node().(type) {
case *ast.Ident:
// This is an unqualified identifier (qualified identifiers are peeled off below).
obj := info.ObjectOf(node)
if obj == nil {
return false
}
if pkg := obj.Pkg(); pkg != nil && obj.Parent() == pkg.Scope() && pkg.Path() != g.currPackage {
// An identifier from either a dot import or read from a different package.
newPkgID := g.qualifyImport(pkg.Path())
c.Replace(&ast.SelectorExpr{
X: ast.NewIdent(newPkgID),
Sel: ast.NewIdent(node.Name),
})
return false
}
return true
case *ast.SelectorExpr:
pkgIdent, ok := node.X.(*ast.Ident)
if !ok {
return true
}
pkgName, ok := info.ObjectOf(pkgIdent).(*types.PkgName)
if !ok {
return true
}
// This is a qualified identifier. Rewrite and avoid visiting subexpressions.
newPkgID := g.qualifyImport(pkgName.Imported().Path())
c.Replace(&ast.SelectorExpr{
X: ast.NewIdent(newPkgID),
Sel: ast.NewIdent(node.Sel.Name),
})
return false
default:
return true
}
}, nil)
// Now that we have all the identifiers, rename any variables declared
// in this scope to not collide.
newNames := make(map[types.Object]string)
inNewNames := func(n string) bool {
for _, other := range newNames {
if other == n {
return true
}
}
return false
}
var scopeStack []*types.Scope
pkgScope := g.prog.Package(g.currPackage).Pkg.Scope()
node = astutil.Apply(node, func(c *astutil.Cursor) bool {
if scope := info.Scopes[c.Node()]; scope != nil {
scopeStack = append(scopeStack, scope)
}
id, ok := c.Node().(*ast.Ident)
if !ok {
return true
}
obj := info.ObjectOf(id)
if obj == nil {
// We rewrote this identifier earlier, so it does not need
// further rewriting.
return true
}
if n, ok := newNames[obj]; ok {
// We picked a new name for this symbol. Rewrite it.
c.Replace(ast.NewIdent(n))
return false
}
if par := obj.Parent(); par == nil || par == pkgScope {
// Don't rename methods, field names, or top-level identifiers.
return true
}
// Rename any symbols defined within rewritePkgRefs's node that conflict
// with any symbols in the generated file.
objName := obj.Name()
if pos := obj.Pos(); pos < start || end <= pos || !(g.nameInFileScope(objName) || inNewNames(objName)) {
return true
}
newName := disambiguate(objName, func(n string) bool {
if g.nameInFileScope(n) || inNewNames(n) {
return true
}
if len(scopeStack) > 0 {
// Avoid picking a name that conflicts with other names in the
// current scope.
_, obj := scopeStack[len(scopeStack)-1].LookupParent(n, 0)
if obj != nil {
return true
}
}
return false
})
newNames[obj] = newName
c.Replace(ast.NewIdent(newName))
return false
}, func(c *astutil.Cursor) bool {
if info.Scopes[c.Node()] != nil {
// Should be top of stack; pop it.
scopeStack = scopeStack[:len(scopeStack)-1]
}
return true
})
return node
}
// writeAST prints an AST node into the generated output, rewriting any
// package references it encounters.
func (g *gen) writeAST(info *types.Info, node ast.Node) {
node = g.rewritePkgRefs(info, node)
if err := printer.Fprint(&g.buf, g.prog.Fset, node); err != nil {
panic(err)
}
}
func (g *gen) qualifiedID(path, sym string) string {
name := g.qualifyImport(path)
if name == "" {
return sym
}
return name + "." + sym
}
func (g *gen) qualifyImport(path string) string {
if path == g.currPackage {
return ""
}
// TODO(light): This is depending on details of the current loader.
const vendorPart = "vendor/"
unvendored := path
if i := strings.LastIndex(path, vendorPart); i != -1 && (i == 0 || path[i-1] == '/') {
unvendored = path[i+len(vendorPart):]
}
if name := g.imports[unvendored]; name != "" {
return name
}
// TODO(light): Use parts of import path to disambiguate.
name := disambiguate(g.prog.Package(path).Pkg.Name(), func(n string) bool {
// Don't let an import take the "err" name. That's annoying.
return n == "err" || g.nameInFileScope(n)
})
g.imports[unvendored] = name
return name
}
func (g *gen) nameInFileScope(name string) bool {
for _, other := range g.imports {
if other == name {
return true
}
}
for _, other := range g.values {
if other == name {
return true
}
}
_, obj := g.prog.Package(g.currPackage).Pkg.Scope().LookupParent(name, 0)
return obj != nil
}
func (g *gen) qualifyPkg(pkg *types.Package) string {
return g.qualifyImport(pkg.Path())
}
func (g *gen) p(format string, args ...interface{}) {
fmt.Fprintf(&g.buf, format, args...)
}
// injectorGen is the per-injector pass generator state.
type injectorGen struct {
g *gen
paramNames []string
localNames []string
cleanupNames []string
errVar string
// discard causes ig.p and ig.writeAST to no-op. Useful to run
// generation for side-effects like filling in g.imports.
discard bool
}
// injectPass generates an injector given the output from analysis.
func injectPass(name string, params *types.Tuple, injectSig outputSignature, calls []call, ig *injectorGen) {
ig.p("func %s(", name)
for i := 0; i < params.Len(); i++ {
if i > 0 {
ig.p(", ")
}
pi := params.At(i)
a := pi.Name()
if a == "" || a == "_" {
a = unexport(typeVariableName(pi.Type()))
if a == "" {
a = "arg"
}
}
ig.paramNames = append(ig.paramNames, disambiguate(a, ig.nameInInjector))
ig.p("%s %s", ig.paramNames[i], types.TypeString(pi.Type(), ig.g.qualifyPkg))
}
outTypeString := types.TypeString(injectSig.out, ig.g.qualifyPkg)
if injectSig.cleanup && injectSig.err {
ig.p(") (%s, func(), error) {\n", outTypeString)
} else if injectSig.cleanup {
ig.p(") (%s, func()) {\n", outTypeString)
} else if injectSig.err {
ig.p(") (%s, error) {\n", outTypeString)
} else {
ig.p(") %s {\n", outTypeString)
}
for i := range calls {
c := &calls[i]
lname := unexport(typeVariableName(c.out))
if lname == "" {
lname = "v"
}
lname = disambiguate(lname, ig.nameInInjector)
ig.localNames = append(ig.localNames, lname)
switch c.kind {
case structProvider:
ig.structProviderCall(lname, c)
case funcProviderCall:
ig.funcProviderCall(lname, c, injectSig)
case valueExpr:
ig.valueExpr(lname, c)
default:
panic("unknown kind")
}
}
if len(calls) == 0 {
for i := 0; i < params.Len(); i++ {
if types.Identical(injectSig.out, params.At(i).Type()) {
ig.p("\treturn %s", ig.paramNames[i])
break
}
}
} else {
ig.p("\treturn %s", ig.localNames[len(calls)-1])
}
if injectSig.cleanup {
ig.p(", func() {\n")
for i := len(ig.cleanupNames) - 1; i >= 0; i-- {
ig.p("\t\t%s()\n", ig.cleanupNames[i])
}
ig.p("\t}")
}
if injectSig.err {
ig.p(", nil")
}
ig.p("\n}\n\n")
}
func (ig *injectorGen) funcProviderCall(lname string, c *call, injectSig outputSignature) {
ig.p("\t%s", lname)
prevCleanup := len(ig.cleanupNames)
if c.hasCleanup {
cname := disambiguate("cleanup", ig.nameInInjector)
ig.cleanupNames = append(ig.cleanupNames, cname)
ig.p(", %s", cname)
}
if c.hasErr {
ig.p(", %s", ig.errVar)
}
ig.p(" := ")
ig.p("%s(", ig.g.qualifiedID(c.importPath, c.name))
for i, a := range c.args {
if i > 0 {
ig.p(", ")
}
if a < len(ig.paramNames) {
ig.p("%s", ig.paramNames[a])
} else {
ig.p("%s", ig.localNames[a-len(ig.paramNames)])
}
}
ig.p(")\n")
if c.hasErr {
ig.p("\tif %s != nil {\n", ig.errVar)
for i := prevCleanup - 1; i >= 0; i-- {
ig.p("\t\t%s()\n", ig.cleanupNames[i])
}
ig.p("\t\treturn %s", zeroValue(injectSig.out, ig.g.qualifyPkg))
if injectSig.cleanup {
ig.p(", nil")
}
// TODO(light): Give information about failing provider.
ig.p(", err\n")
ig.p("\t}\n")
}
}
func (ig *injectorGen) structProviderCall(lname string, c *call) {
ig.p("\t%s", lname)
ig.p(" := ")
if _, ok := c.out.(*types.Pointer); ok {
ig.p("&")
}
ig.p("%s{\n", ig.g.qualifiedID(c.importPath, c.name))
for i, a := range c.args {
ig.p("\t\t%s: ", c.fieldNames[i])
if a < len(ig.paramNames) {
ig.p("%s", ig.paramNames[a])
} else {
ig.p("%s", ig.localNames[a-len(ig.paramNames)])
}
ig.p(",\n")
}
ig.p("\t}\n")
}
func (ig *injectorGen) valueExpr(lname string, c *call) {
ig.p("\t%s := %s\n", lname, ig.g.values[c.valueExpr])
}
// nameInInjector reports whether name collides with any other identifier
// in the current injector.
func (ig *injectorGen) nameInInjector(name string) bool {
if name == ig.errVar {
return true
}
for _, a := range ig.paramNames {
if a == name {
return true
}
}
for _, l := range ig.localNames {
if l == name {
return true
}
}
for _, l := range ig.cleanupNames {
if l == name {
return true
}
}
return ig.g.nameInFileScope(name)
}
func (ig *injectorGen) p(format string, args ...interface{}) {
if ig.discard {
return
}
ig.g.p(format, args...)
}
func (ig *injectorGen) writeAST(info *types.Info, node ast.Node) {
node = ig.g.rewritePkgRefs(info, node)
if ig.discard {
return
}
if err := printer.Fprint(&ig.g.buf, ig.g.prog.Fset, node); err != nil {
panic(err)
}
}
// zeroValue returns the shortest expression that evaluates to the zero
// value for the given type.
func zeroValue(t types.Type, qf types.Qualifier) string {
switch u := t.Underlying().(type) {
case *types.Array, *types.Struct:
return types.TypeString(t, qf) + "{}"
case *types.Basic:
info := u.Info()
switch {
case info&types.IsBoolean != 0:
return "false"
case info&(types.IsInteger|types.IsFloat|types.IsComplex) != 0:
return "0"
case info&types.IsString != 0:
return `""`
default:
panic("unreachable")
}
case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice:
return "nil"
default:
panic("unreachable")
}
}
// typeVariableName invents a variable name derived from the type name
// or returns the empty string if one could not be found. There are no
// guarantees about whether the name is exported or unexported: call
// export() or unexport() to convert.
func typeVariableName(t types.Type) string {
if p, ok := t.(*types.Pointer); ok {
t = p.Elem()
}
switch t := t.(type) {
case *types.Basic:
return t.Name()
case *types.Named:
// TODO(light): Include package name when appropriate.
return t.Obj().Name()
}
return ""
}
// unexport converts a name that is potentially exported to an unexported name.
func unexport(name string) string {
if name == "" {
return ""
}
r, sz := utf8.DecodeRuneInString(name)
if !unicode.IsUpper(r) {
// foo -> foo
return name
}
r2, sz2 := utf8.DecodeRuneInString(name[sz:])
if !unicode.IsUpper(r2) {
// Foo -> foo
return string(unicode.ToLower(r)) + name[sz:]
}
// UPPERWord -> upperWord
sbuf := new(strings.Builder)
sbuf.WriteRune(unicode.ToLower(r))
i := sz
r, sz = r2, sz2
for unicode.IsUpper(r) && sz > 0 {
r2, sz2 := utf8.DecodeRuneInString(name[i+sz:])
if sz2 > 0 && unicode.IsLower(r2) {
break
}
i += sz
sbuf.WriteRune(unicode.ToLower(r))
r, sz = r2, sz2
}
sbuf.WriteString(name[i:])
return sbuf.String()
}
// export converts a name that is potentially unexported to an exported name.
func export(name string) string {
if name == "" {
return ""
}
r, sz := utf8.DecodeRuneInString(name)
if unicode.IsUpper(r) {
// Foo -> Foo
return name
}
// fooBar -> FooBar
sbuf := new(strings.Builder)
sbuf.WriteRune(unicode.ToUpper(r))
sbuf.WriteString(name[sz:])
return sbuf.String()
}
// disambiguate picks a unique name, preferring name if it is already unique.
func disambiguate(name string, collides func(string) bool) string {
if !collides(name) {
return name
}
buf := []byte(name)
if len(buf) > 0 && buf[len(buf)-1] >= '0' && buf[len(buf)-1] <= '9' {
buf = append(buf, '_')
}
base := len(buf)
for n := 2; ; n++ {
buf = strconv.AppendInt(buf[:base], int64(n), 10)
sbuf := string(buf)
if !collides(sbuf) {
return sbuf
}
}
}
// accessibleFrom reports whether node can be copied to wantPkg without
// violating Go visibility rules.
func accessibleFrom(info *types.Info, node ast.Node, wantPkg string) error {
var unexportError error
ast.Inspect(node, func(node ast.Node) bool {
if unexportError != nil {
return false
}
ident, ok := node.(*ast.Ident)
if !ok {
return true
}
obj := info.ObjectOf(ident)
if _, ok := obj.(*types.PkgName); ok {
// Local package names are fine, since we can just reimport them.
return true
}
if pkg := obj.Pkg(); pkg != nil && !ast.IsExported(ident.Name) && pkg.Path() != wantPkg {
unexportError = fmt.Errorf("uses unexported identifier %s", obj.Name())
return false
}
return true
})
return unexportError
}
var (
errorType = types.Universe.Lookup("error").Type()
cleanupType = types.NewSignature(nil, nil, nil, false)
)
| 1 | 10,503 | Seems like it should be `generated by Wire` here and in the SUT? | google-go-cloud | go |
@@ -173,7 +173,7 @@ func TestStorageProtocolBasic(t *testing.T) {
}
}
- ref, err := c.ProposeDeal(ctx, mineraddr, protonode.Cid(), 1, 150)
+ ref, err := c.ProposeDeal(ctx, mineraddr, protonode.Cid(), 1, 150, false)
assert.NoError(err)
requireQueryDeal := func() (DealState, string) {
resp, err := c.QueryDeal(ctx, ref.ProposalCid) | 1 | package storage_test
import (
"context"
"sync"
"testing"
"time"
unixfs "gx/ipfs/QmQXze9tG878pa4Euya4rrDpyTNX3kQe4dhCaBzBozGgpe/go-unixfs"
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
cbor "gx/ipfs/QmRoARq3nkUb13HSKZGepCZSWe5GrVPwx7xURJGZ7KWv9V/go-ipld-cbor"
dag "gx/ipfs/QmTQdH4848iTVCJmKXYyRiK72HufWTLYQQ8iN3JaQ8K1Hq/go-merkledag"
mactor "github.com/filecoin-project/go-filecoin/actor/builtin/miner"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/api/impl"
"github.com/filecoin-project/go-filecoin/node"
"github.com/filecoin-project/go-filecoin/plumbing"
"github.com/filecoin-project/go-filecoin/plumbing/cfg"
"github.com/filecoin-project/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/plumbing/mthdsig"
"github.com/filecoin-project/go-filecoin/proofs"
. "github.com/filecoin-project/go-filecoin/protocol/storage"
th "github.com/filecoin-project/go-filecoin/testhelpers"
"github.com/filecoin-project/go-filecoin/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSerializeProposal(t *testing.T) {
t.Parallel()
p := &DealProposal{}
p.Size = types.NewBytesAmount(5)
v, _ := cid.Decode("QmcrriCMhjb5ZWzmPNxmP53px47tSPcXBNaMtLdgcKFJYk")
p.PieceRef = v
_, err := cbor.DumpObject(p)
if err != nil {
t.Fatal(err)
}
}
// TODO: we need to really rethink how this sort of testing can be done
// cleaner. The gengen stuff helps, but its still difficult to make actor
// method invocations
func TestStorageProtocolBasic(t *testing.T) {
t.Parallel()
assert := assert.New(t)
require := require.New(t)
ctx := context.Background()
seed := node.MakeChainSeed(t, node.TestGenCfg)
// make two nodes, one of which is the miner (and gets the miner peer key),
// and set up their syncers with fake verifiers that always mark a proof as
// valid.
configOpts := []node.ConfigOpt{node.VerifierConfigOption(proofs.NewFakeVerifier(true, nil))}
initOpts := []node.InitOpt{
node.AutoSealIntervalSecondsOpt(1),
node.PeerKeyOpt(node.PeerKeys[0]),
}
tno := node.TestNodeOptions{
Seed: seed,
InitOpts: initOpts,
ConfigOpts: configOpts,
OfflineMode: false,
}
minerNode := node.GenNode(t, &tno)
minerAPI := impl.New(minerNode)
clientVerifier := proofs.NewFakeVerifier(true, nil)
clientNode := node.MakeNodeWithChainSeed(t, seed, []node.ConfigOpt{node.VerifierConfigOption(clientVerifier)})
// TODO we need a principled way to construct an API that can be used both by node and by
// tests. It should enable selective replacement of dependencies.
sigGetter := mthdsig.NewGetter(minerNode.ChainReader)
msgSender := msg.NewSender(minerNode.Repo, minerNode.Wallet, minerNode.ChainReader, minerNode.MsgPool, minerNode.PubSub.Publish)
msgWaiter := msg.NewWaiter(minerNode.ChainReader, minerNode.Blockstore, minerNode.CborStore())
config := cfg.NewConfig(minerNode.Repo)
plumbingAPI := plumbing.New(sigGetter, msgSender, msgWaiter, config)
// Give the miner node the right private key, and set them up with
// the miner actor
seed.GiveKey(t, minerNode, 0)
mineraddr, minerOwnerAddr := seed.GiveMiner(t, minerNode, 0)
seed.GiveKey(t, clientNode, 1)
cni := NewClientNodeImpl(
dag.NewDAGService(clientNode.BlockService()),
clientNode.Host(),
clientNode.Lookup(),
func(_ context.Context, _ address.Address, _ string, _ []byte, _ *address.Address) ([][]byte, uint8, error) {
// This is only used for getting the price of an ask.
a := &mactor.Ask{
Price: types.NewAttoFILFromFIL(50),
}
enc, err := cbor.DumpObject(a)
if err != nil {
return nil, 0, err
}
return [][]byte{enc}, 0, nil
},
)
c, err := NewClient(cni, clientNode.Repo.ClientDealsDatastore())
require.NoError(err)
_, err = NewMiner(ctx, mineraddr, minerOwnerAddr, minerNode, minerNode.Repo.MinerDealsDatastore(), minerNode.Repo.DealsAwaitingSealDatastore(), plumbingAPI)
assert.NoError(err)
assert.NoError(minerNode.Start(ctx))
assert.NoError(clientNode.Start(ctx))
node.ConnectNodes(t, minerNode, clientNode)
err = minerAPI.Mining().Start(ctx)
assert.NoError(err)
defer minerAPI.Mining().Stop(ctx)
sectorSize, err := minerNode.SectorBuilder().GetMaxUserBytesPerStagedSector()
require.NoError(err)
data := unixfs.NewFSNode(unixfs.TFile)
bytes := make([]byte, sectorSize)
for i := 0; uint64(i) < sectorSize; i++ {
bytes[i] = byte(i)
}
data.SetData(bytes)
raw, err := data.GetBytes()
assert.NoError(err)
protonode := dag.NodeWithData(raw)
assert.NoError(clientNode.BlockService().AddBlock(protonode))
var foundCommit bool
var foundPoSt bool
var wg sync.WaitGroup
wg.Add(2)
old := minerNode.AddNewlyMinedBlock
var bCount, mCount int
minerNode.AddNewlyMinedBlock = func(ctx context.Context, blk *types.Block) {
bCount++
mCount += len(blk.Messages)
old(ctx, blk)
if !foundCommit {
for i, msg := range blk.Messages {
if msg.Message.Method == "commitSector" {
assert.False(foundCommit, "multiple commitSector submissions must not happen")
assert.Equal(uint8(0), blk.MessageReceipts[i].ExitCode, "seal submission failed")
foundCommit = true
wg.Done()
break
}
}
}
if !foundPoSt {
for i, msg := range blk.Messages {
if msg.Message.Method == "submitPoSt" {
assert.False(foundPoSt, "multiple post submissions must not happen")
assert.Equal(uint8(0), blk.MessageReceipts[i].ExitCode, "post submission failed")
foundPoSt = true
wg.Done()
break
}
}
}
}
ref, err := c.ProposeDeal(ctx, mineraddr, protonode.Cid(), 1, 150)
assert.NoError(err)
requireQueryDeal := func() (DealState, string) {
resp, err := c.QueryDeal(ctx, ref.ProposalCid)
require.NoError(err)
return resp.State, resp.Message
}
time.Sleep(time.Millisecond * 100) // Bad dignifiedquire, bad!
var done bool
for i := 0; i < 5; i++ {
state, message := requireQueryDeal()
assert.NotEqual(Failed, state, message)
if state == Staged {
done = true
break
}
time.Sleep(time.Millisecond * 500)
}
require.True(done)
if th.WaitTimeout(&wg, 120*time.Second) {
state, message := requireQueryDeal()
require.NotEqual(Failed, state, message)
require.Failf("TestStorageProtocolBasic failed", "waiting for submission timed out. Saw %d blocks with %d messages while waiting", bCount, mCount)
}
require.True(foundCommit, "no commitSector on chain")
require.True(foundPoSt, "no submitPoSt on chain")
// Now all things should be ready
done = false
for i := 0; i < 10; i++ {
resp, err := c.QueryDeal(ctx, ref.ProposalCid)
assert.NoError(err)
assert.NotEqual(Failed, resp.State, resp.Message)
if resp.State == Posted {
done = true
assert.True(resp.ProofInfo.SectorID > 0)
break
}
time.Sleep(time.Millisecond * 500)
}
assert.True(done, "failed to finish transfer")
}
| 1 | 15,953 | I know the main cases show up in the daemon test already but a little bit more unit test coverage would be nice and probably not too annoying to set up. Ideas: 1. Verify that the string "no duplicates" is in the LastDuplicate field when we propose a new deal. 2. Verify that proposing a duplicate deal fails when the duplicates boolean is set false. 3. Start out in a state with more than 1 duplicates. Verify that the LastDuplicate field is set to the last duplicate when we propose with a true duplicates bool. | filecoin-project-venus | go |
@@ -74,7 +74,7 @@ namespace OpenTelemetry
{
if (timeoutMilliseconds < 0 && timeoutMilliseconds != Timeout.Infinite)
{
- throw new ArgumentOutOfRangeException(nameof(timeoutMilliseconds));
+ throw new ArgumentOutOfRangeException(nameof(timeoutMilliseconds), timeoutMilliseconds, $"timeout milliseconds should not be below 0 or infinite, but was: {timeoutMilliseconds}");
}
if (Interlocked.Increment(ref this.shutdownCount) > 1) | 1 | // <copyright file="BaseExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Threading;
using OpenTelemetry.Internal;
namespace OpenTelemetry
{
/// <summary>
/// Enumeration used to define the result of an export operation.
/// </summary>
public enum ExportResult
{
/// <summary>
/// Export succeeded.
/// </summary>
Success = 0,
/// <summary>
/// Export failed.
/// </summary>
Failure = 1,
}
/// <summary>
/// Exporter base class.
/// </summary>
/// <typeparam name="T">The type of object to be exported.</typeparam>
public abstract class BaseExporter<T> : IDisposable
where T : class
{
private int shutdownCount;
/// <summary>
/// Exports a batch of telemetry objects.
/// </summary>
/// <param name="batch">Batch of telemetry objects to export.</param>
/// <returns>Result of the export operation.</returns>
public abstract ExportResult Export(in Batch<T> batch);
/// <summary>
/// Attempts to shutdown the exporter, blocks the current thread until
/// shutdown completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number of milliseconds to wait, or <c>Timeout.Infinite</c> to
/// wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when shutdown succeeded; otherwise, <c>false</c>.
/// </returns>
/// <exception cref="System.ArgumentOutOfRangeException">
/// Thrown when the <c>timeoutMilliseconds</c> is smaller than -1.
/// </exception>
/// <remarks>
/// This function guarantees thread-safety. Only the first call will
/// win, subsequent calls will be no-op.
/// </remarks>
public bool Shutdown(int timeoutMilliseconds = Timeout.Infinite)
{
if (timeoutMilliseconds < 0 && timeoutMilliseconds != Timeout.Infinite)
{
throw new ArgumentOutOfRangeException(nameof(timeoutMilliseconds));
}
if (Interlocked.Increment(ref this.shutdownCount) > 1)
{
return false; // shutdown already called
}
try
{
return this.OnShutdown(timeoutMilliseconds);
}
catch (Exception ex)
{
OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.Shutdown), ex);
return false;
}
}
/// <inheritdoc/>
public void Dispose()
{
this.Dispose(true);
GC.SuppressFinalize(this);
}
/// <summary>
/// Called by <c>Shutdown</c>. This function should block the current
/// thread until shutdown completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number of milliseconds to wait, or <c>Timeout.Infinite</c> to
/// wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when shutdown succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>Shutdown</c>. This function should not throw
/// exceptions.
/// </remarks>
protected virtual bool OnShutdown(int timeoutMilliseconds)
{
return true;
}
/// <summary>
/// Releases the unmanaged resources used by this class and optionally
/// releases the managed resources.
/// </summary>
/// <param name="disposing">
/// <see langword="true"/> to release both managed and unmanaged resources;
/// <see langword="false"/> to release only unmanaged resources.
/// </param>
protected virtual void Dispose(bool disposing)
{
}
}
}
| 1 | 17,498 | `Timeout.Infinite` is actually `-1`. Here the correct message can be taken from Line:67. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -38,6 +38,7 @@ using System.Linq;
using Xunit;
using NLog.Common;
using System.Text;
+using Xunit.Extensions;
namespace NLog.UnitTests.Common
{ | 1 | //
// Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
using System;
using System.Globalization;
using System.IO;
using System.Linq;
using Xunit;
using NLog.Common;
using System.Text;
namespace NLog.UnitTests.Common
{
public class InternalLoggerTests : NLogTestBase
{
/// <summary>
/// Test the return values of all Is[Level]Enabled() methods.
/// </summary>
[Fact]
public void IsEnabledTests()
{
// Setup LogLevel to minimum named level.
InternalLogger.LogLevel = LogLevel.Trace;
Assert.True(InternalLogger.IsTraceEnabled);
Assert.True(InternalLogger.IsDebugEnabled);
Assert.True(InternalLogger.IsInfoEnabled);
Assert.True(InternalLogger.IsWarnEnabled);
Assert.True(InternalLogger.IsErrorEnabled);
Assert.True(InternalLogger.IsFatalEnabled);
// Setup LogLevel to maximum named level.
InternalLogger.LogLevel = LogLevel.Fatal;
Assert.False(InternalLogger.IsTraceEnabled);
Assert.False(InternalLogger.IsDebugEnabled);
Assert.False(InternalLogger.IsInfoEnabled);
Assert.False(InternalLogger.IsWarnEnabled);
Assert.False(InternalLogger.IsErrorEnabled);
Assert.True(InternalLogger.IsFatalEnabled);
// Switch off the internal logging.
InternalLogger.LogLevel = LogLevel.Off;
Assert.False(InternalLogger.IsTraceEnabled);
Assert.False(InternalLogger.IsDebugEnabled);
Assert.False(InternalLogger.IsInfoEnabled);
Assert.False(InternalLogger.IsWarnEnabled);
Assert.False(InternalLogger.IsErrorEnabled);
Assert.False(InternalLogger.IsFatalEnabled);
}
[Fact]
public void WriteToStringWriterTests()
{
// Expected result is the same for both types of method invocation.
const string expected = "Warn WWW\nError EEE\nFatal FFF\nTrace TTT\nDebug DDD\nInfo III\n";
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
StringWriter writer1 = new StringWriter()
{
NewLine = "\n"
};
InternalLogger.LogWriter = writer1;
// Named (based on LogLevel) public methods.
InternalLogger.Warn("WWW");
InternalLogger.Error("EEE");
InternalLogger.Fatal("FFF");
InternalLogger.Trace("TTT");
InternalLogger.Debug("DDD");
InternalLogger.Info("III");
Assert.True(writer1.ToString() == expected);
//
// Reconfigure the LogWriter.
StringWriter writer2 = new StringWriter()
{
NewLine = "\n"
};
InternalLogger.LogWriter = writer2;
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW");
InternalLogger.Log(LogLevel.Error, "EEE");
InternalLogger.Log(LogLevel.Fatal, "FFF");
InternalLogger.Log(LogLevel.Trace, "TTT");
InternalLogger.Log(LogLevel.Debug, "DDD");
InternalLogger.Log(LogLevel.Info, "III");
Assert.True(writer2.ToString() == expected);
}
[Fact]
public void WriteToStringWriterWithArgsTests()
{
// Expected result is the same for both types of method invocation.
const string expected = "Warn WWW 0\nError EEE 0, 1\nFatal FFF 0, 1, 2\nTrace TTT 0, 1, 2\nDebug DDD 0, 1\nInfo III 0\n";
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
StringWriter writer1 = new StringWriter()
{
NewLine = "\n"
};
InternalLogger.LogWriter = writer1;
// Named (based on LogLevel) public methods.
InternalLogger.Warn("WWW {0}", 0);
InternalLogger.Error("EEE {0}, {1}", 0, 1);
InternalLogger.Fatal("FFF {0}, {1}, {2}", 0, 1, 2);
InternalLogger.Trace("TTT {0}, {1}, {2}", 0, 1, 2);
InternalLogger.Debug("DDD {0}, {1}", 0, 1);
InternalLogger.Info("III {0}", 0);
Assert.True(writer1.ToString() == expected);
//
// Reconfigure the LogWriter.
StringWriter writer2 = new StringWriter()
{
NewLine = "\n"
};
InternalLogger.LogWriter = writer2;
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW {0}", 0);
InternalLogger.Log(LogLevel.Error, "EEE {0}, {1}", 0, 1);
InternalLogger.Log(LogLevel.Fatal, "FFF {0}, {1}, {2}", 0, 1, 2);
InternalLogger.Log(LogLevel.Trace, "TTT {0}, {1}, {2}", 0, 1, 2);
InternalLogger.Log(LogLevel.Debug, "DDD {0}, {1}", 0, 1);
InternalLogger.Log(LogLevel.Info, "III {0}", 0);
Assert.True(writer2.ToString() == expected);
}
#if !SILVERLIGHT
[Fact]
public void WriteToConsoleOutTests()
{
// Expected result is the same for both types of method invocation.
const string expected = "Warn WWW\nError EEE\nFatal FFF\nTrace TTT\nDebug DDD\nInfo III\n";
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
InternalLogger.LogToConsole = true;
StringWriter consoleOutWriter1 = new StringWriter()
{
NewLine = "\n"
};
// Redirect the console output to a StringWriter.
Console.SetOut(consoleOutWriter1);
// Named (based on LogLevel) public methods.
InternalLogger.Warn("WWW");
InternalLogger.Error("EEE");
InternalLogger.Fatal("FFF");
InternalLogger.Trace("TTT");
InternalLogger.Debug("DDD");
InternalLogger.Info("III");
Assert.True(consoleOutWriter1.ToString() == expected);
//
// Redirect the console output to another StringWriter.
StringWriter consoleOutWriter2 = new StringWriter()
{
NewLine = "\n"
};
Console.SetOut(consoleOutWriter2);
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW");
InternalLogger.Log(LogLevel.Error, "EEE");
InternalLogger.Log(LogLevel.Fatal, "FFF");
InternalLogger.Log(LogLevel.Trace, "TTT");
InternalLogger.Log(LogLevel.Debug, "DDD");
InternalLogger.Log(LogLevel.Info, "III");
Assert.True(consoleOutWriter2.ToString() == expected);
}
[Fact]
public void WriteToConsoleErrorTests()
{
// Expected result is the same for both types of method invocation.
const string expected = "Warn WWW\nError EEE\nFatal FFF\nTrace TTT\nDebug DDD\nInfo III\n";
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
InternalLogger.LogToConsoleError = true;
StringWriter consoleWriter1 = new StringWriter()
{
NewLine = "\n"
};
// Redirect the console output to a StringWriter.
Console.SetError(consoleWriter1);
// Named (based on LogLevel) public methods.
InternalLogger.Warn("WWW");
InternalLogger.Error("EEE");
InternalLogger.Fatal("FFF");
InternalLogger.Trace("TTT");
InternalLogger.Debug("DDD");
InternalLogger.Info("III");
Assert.True(consoleWriter1.ToString() == expected);
//
// Redirect the console output to another StringWriter.
StringWriter consoleWriter2 = new StringWriter()
{
NewLine = "\n"
};
Console.SetError(consoleWriter2);
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW");
InternalLogger.Log(LogLevel.Error, "EEE");
InternalLogger.Log(LogLevel.Fatal, "FFF");
InternalLogger.Log(LogLevel.Trace, "TTT");
InternalLogger.Log(LogLevel.Debug, "DDD");
InternalLogger.Log(LogLevel.Info, "III");
Assert.True(consoleWriter2.ToString() == expected);
}
[Fact]
public void WriteToFileTests()
{
string expected =
"Warn WWW" + Environment.NewLine +
"Error EEE" + Environment.NewLine +
"Fatal FFF" + Environment.NewLine +
"Trace TTT" + Environment.NewLine +
"Debug DDD" + Environment.NewLine +
"Info III" + Environment.NewLine;
var tempFile = Path.GetTempFileName();
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
InternalLogger.LogFile = tempFile;
try
{
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW");
InternalLogger.Log(LogLevel.Error, "EEE");
InternalLogger.Log(LogLevel.Fatal, "FFF");
InternalLogger.Log(LogLevel.Trace, "TTT");
InternalLogger.Log(LogLevel.Debug, "DDD");
InternalLogger.Log(LogLevel.Info, "III");
AssertFileContents(tempFile, expected, Encoding.UTF8);
}
finally
{
if (File.Exists(tempFile))
{
File.Delete(tempFile);
}
}
}
[Fact]
public void TimestampTests()
{
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = true;
InternalLogger.LogToConsole = true;
StringWriter consoleOutWriter = new StringWriter()
{
NewLine = "\n"
};
// Redirect the console output to a StringWriter.
Console.SetOut(consoleOutWriter);
// Named (based on LogLevel) public methods.
InternalLogger.Warn("WWW");
InternalLogger.Error("EEE");
InternalLogger.Fatal("FFF");
InternalLogger.Trace("TTT");
InternalLogger.Debug("DDD");
InternalLogger.Info("III");
string expectedDateTime = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss", CultureInfo.InvariantCulture);
var strings = consoleOutWriter.ToString().Split(new[] { '\n' }, StringSplitOptions.RemoveEmptyEntries);
foreach (var str in strings)
{
Assert.Contains(expectedDateTime + ".", str);
}
}
/// <summary>
/// Test exception overloads
/// </summary>
[Fact]
public void ExceptionTests()
{
using (new InternalLoggerScope())
{
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.LogToConsole = true;
InternalLogger.IncludeTimestamp = false;
var ex1 = new Exception("e1");
var ex2 = new Exception("e2", new Exception("inner"));
var ex3 = new NLogConfigurationException("config error");
var ex4 = new NLogConfigurationException("config error", ex2);
var ex5 = new PathTooLongException();
ex5.Data["key1"] = "value1";
Exception ex6 = null;
const string prefix = " Exception: ";
string expected =
"Warn WWW" + prefix + ex1 + Environment.NewLine +
"Error EEE" + prefix + ex2 + Environment.NewLine +
"Fatal FFF" + prefix + ex3 + Environment.NewLine +
"Trace TTT" + prefix + ex4 + Environment.NewLine +
"Debug DDD" + prefix + ex5 + Environment.NewLine +
"Info III" + Environment.NewLine;
StringWriter consoleOutWriter = new StringWriter()
{
NewLine = Environment.NewLine
};
// Redirect the console output to a StringWriter.
Console.SetOut(consoleOutWriter);
// Named (based on LogLevel) public methods.
InternalLogger.Warn(ex1, "WWW");
InternalLogger.Error(ex2, "EEE");
InternalLogger.Fatal(ex3, "FFF");
InternalLogger.Trace(ex4, "TTT");
InternalLogger.Debug(ex5, "DDD");
InternalLogger.Info(ex6, "III");
consoleOutWriter.Flush();
var strings = consoleOutWriter.ToString();
Assert.Equal(expected, strings);
}
}
[Fact]
public void CreateDirectoriesIfNeededTests()
{
string expected =
"Warn WWW" + Environment.NewLine +
"Error EEE" + Environment.NewLine +
"Fatal FFF" + Environment.NewLine +
"Trace TTT" + Environment.NewLine +
"Debug DDD" + Environment.NewLine +
"Info III" + Environment.NewLine;
var tempPath = Path.GetTempPath();
var tempFileName = Path.GetRandomFileName();
var randomSubDirectory = Path.Combine(tempPath, Path.GetRandomFileName());
string tempFile = Path.Combine(randomSubDirectory, tempFileName);
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
Assert.False(Directory.Exists(randomSubDirectory));
// Set the log file, which will only create the needed directories
InternalLogger.LogFile = tempFile;
Assert.True(Directory.Exists(randomSubDirectory));
try
{
Assert.False(File.Exists(tempFile));
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW");
InternalLogger.Log(LogLevel.Error, "EEE");
InternalLogger.Log(LogLevel.Fatal, "FFF");
InternalLogger.Log(LogLevel.Trace, "TTT");
InternalLogger.Log(LogLevel.Debug, "DDD");
InternalLogger.Log(LogLevel.Info, "III");
AssertFileContents(tempFile, expected, Encoding.UTF8);
Assert.True(File.Exists(tempFile));
}
finally
{
if (File.Exists(tempFile))
{
File.Delete(tempFile);
}
if (Directory.Exists(randomSubDirectory))
{
Directory.Delete(randomSubDirectory);
}
}
}
[Fact]
public void CreateFileInCurrentDirectoryTests()
{
string expected =
"Warn WWW" + Environment.NewLine +
"Error EEE" + Environment.NewLine +
"Fatal FFF" + Environment.NewLine +
"Trace TTT" + Environment.NewLine +
"Debug DDD" + Environment.NewLine +
"Info III" + Environment.NewLine;
// Store off the previous log file
string previousLogFile = InternalLogger.LogFile;
var tempFileName = Path.GetRandomFileName();
InternalLogger.LogLevel = LogLevel.Trace;
InternalLogger.IncludeTimestamp = false;
Assert.False(File.Exists(tempFileName));
// Set the log file, which only has a filename
InternalLogger.LogFile = tempFileName;
try
{
Assert.False(File.Exists(tempFileName));
// Invoke Log(LogLevel, string) for every log level.
InternalLogger.Log(LogLevel.Warn, "WWW");
InternalLogger.Log(LogLevel.Error, "EEE");
InternalLogger.Log(LogLevel.Fatal, "FFF");
InternalLogger.Log(LogLevel.Trace, "TTT");
InternalLogger.Log(LogLevel.Debug, "DDD");
InternalLogger.Log(LogLevel.Info, "III");
AssertFileContents(tempFileName, expected, Encoding.UTF8);
Assert.True(File.Exists(tempFileName));
}
finally
{
if (File.Exists(tempFileName))
{
File.Delete(tempFileName);
}
}
}
#endif
}
}
| 1 | 12,929 | I think we need `#if !SILVERLIGHT` here and at the test. | NLog-NLog | .cs |
@@ -26,6 +26,7 @@ import javafx.collections.transformation.SortedList;
import javafx.event.EventHandler;
import javafx.scene.input.MouseEvent;
import me.xdrop.fuzzywuzzy.FuzzySearch;
+import org.eclipse.jgit.util.StringUtils;
import org.phoenicis.javafx.settings.JavaFxSettingsManager;
import org.phoenicis.javafx.views.common.ExpandedList;
import org.phoenicis.javafx.views.common.ThemeManager; | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.javafx.views.mainwindow.apps;
import javafx.application.Platform;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.collections.transformation.FilteredList;
import javafx.collections.transformation.SortedList;
import javafx.event.EventHandler;
import javafx.scene.input.MouseEvent;
import me.xdrop.fuzzywuzzy.FuzzySearch;
import org.phoenicis.javafx.settings.JavaFxSettingsManager;
import org.phoenicis.javafx.views.common.ExpandedList;
import org.phoenicis.javafx.views.common.ThemeManager;
import org.phoenicis.javafx.views.common.widgets.lists.CombinedListWidget;
import org.phoenicis.javafx.views.common.widgets.lists.ListWidgetEntry;
import org.phoenicis.javafx.views.mainwindow.MainWindowView;
import org.phoenicis.repository.dto.ApplicationDTO;
import org.phoenicis.repository.dto.CategoryDTO;
import org.phoenicis.repository.dto.ScriptDTO;
import org.phoenicis.tools.ToolsConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Comparator;
import java.util.List;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import static org.phoenicis.configuration.localisation.Localisation.tr;
public class ViewApps extends MainWindowView<ApplicationSidebar> {
private final Logger LOGGER = LoggerFactory.getLogger(ViewApps.class);
private final CombinedListWidget<ApplicationDTO> availableApps;
private final ApplicationFilter filter;
private Consumer<ScriptDTO> onSelectScript;
private ObservableList<CategoryDTO> categories;
private FilteredList<CategoryDTO> installableCategories;
private SortedList<CategoryDTO> sortedCategories;
private ObservableList<ApplicationDTO> applications;
private FilteredList<ApplicationDTO> filteredApplications;
private SortedList<ApplicationDTO> sortedApplications;
public ViewApps(ThemeManager themeManager, JavaFxSettingsManager javaFxSettingsManager,
ToolsConfiguration toolsConfiguration) {
super(tr("Apps"), themeManager);
this.availableApps = new CombinedListWidget<ApplicationDTO>(ListWidgetEntry::create,
(element, event) -> showAppDetails(element, javaFxSettingsManager));
this.filter = new ApplicationFilter(toolsConfiguration.operatingSystemFetcher(),
(filterText, application) -> FuzzySearch.partialRatio(application.getName().toLowerCase(),
filterText) > javaFxSettingsManager.getFuzzySearchRatio());
// initialising the category lists
this.categories = FXCollections.observableArrayList();
this.installableCategories = this.categories
.filtered(category -> category.getType() == CategoryDTO.CategoryType.INSTALLERS);
this.sortedCategories = this.installableCategories.sorted(Comparator.comparing(CategoryDTO::getName));
// initialising the application lists
this.applications = new ExpandedList<ApplicationDTO, CategoryDTO>(this.installableCategories,
CategoryDTO::getApplications);
this.filteredApplications = new FilteredList<ApplicationDTO>(this.applications);
this.filteredApplications.predicateProperty().bind(filter.applicationFilterProperty());
this.sortedApplications = this.filteredApplications.sorted(Comparator.comparing(ApplicationDTO::getName));
this.sidebar = new ApplicationSidebar(availableApps, filter, javaFxSettingsManager);
// create the bindings between the visual components and the observable lists
this.sidebar.bindCategories(this.sortedCategories);
this.availableApps.bind(this.sortedApplications);
// set the category selection consumers
this.sidebar.setOnCategorySelection(category -> {
filter.setFilterCategory(category);
this.closeDetailsView();
});
this.sidebar.setOnAllCategorySelection(() -> {
filter.setFilterCategory(null);
this.closeDetailsView();
});
this.setSidebar(this.sidebar);
}
public void setOnSelectScript(Consumer<ScriptDTO> onSelectScript) {
this.onSelectScript = onSelectScript;
}
/**
* Populate with a list of categories containing apps, and then scripts
*
* @param categories CategoryDTO
*/
public void populate(List<CategoryDTO> categories) {
List<CategoryDTO> filteredCategories = categories.stream()
.filter(category -> !category.getApplications().isEmpty()).collect(Collectors.toList());
Platform.runLater(() -> {
this.categories.setAll(filteredCategories);
this.filter.clearAll();
this.sidebar.selectAllCategories();
this.closeDetailsView();
this.setCenter(availableApps);
});
}
public void setOnRetryButtonClicked(EventHandler<? super MouseEvent> event) {
getFailurePanel().getRetryButton().setOnMouseClicked(event);
}
private void showAppDetails(ApplicationDTO application, JavaFxSettingsManager javaFxSettingsManager) {
final AppPanel appPanel = new AppPanel(application, filter, themeManager, javaFxSettingsManager);
appPanel.setOnScriptInstall(this::installScript);
appPanel.setOnClose(this::closeDetailsView);
appPanel.setMaxWidth(400);
this.showDetailsView(appPanel);
}
private void installScript(ScriptDTO scriptDTO) {
this.onSelectScript.accept(scriptDTO);
}
}
| 1 | 11,230 | Can you change this import to the apache dependency? I didn't even know that jgit contains such a method... | PhoenicisOrg-phoenicis | java |
@@ -34,11 +34,11 @@ func GenerateServiceAccountName(gServiceAccount, clusterName string) string {
}
// MakeServiceAccount creates a K8s ServiceAccount object for the Namespace.
-func MakeServiceAccount(namespace string, gServiceAccount, clusterName string) *corev1.ServiceAccount {
+func MakeServiceAccount(namespace string, kServiceAccount, gServiceAccount, clusterName string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
- Name: GenerateServiceAccountName(gServiceAccount, clusterName),
+ Name: kServiceAccount,
Annotations: map[string]string{
WorkloadIdentityKey: gServiceAccount,
}, | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
WorkloadIdentityKey = "iam.gke.io/gcp-service-account"
)
// GenerateServiceAccountName generates a k8s ServiceAccount name according to GCP ServiceAccount
func GenerateServiceAccountName(gServiceAccount, clusterName string) string {
return fmt.Sprintf("%s-%s", strings.Split(gServiceAccount, "@")[0], clusterName)
}
// MakeServiceAccount creates a K8s ServiceAccount object for the Namespace.
func MakeServiceAccount(namespace string, gServiceAccount, clusterName string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: GenerateServiceAccountName(gServiceAccount, clusterName),
Annotations: map[string]string{
WorkloadIdentityKey: gServiceAccount,
},
},
}
}
| 1 | 14,788 | Four strings in a row makes me nervous, can we use an arguments struct instead? Or reuse the struct I recommend in the reconciler. | google-knative-gcp | go |
@@ -50,13 +50,15 @@ interpreted as encoded JSON and formatted accordingly. If '-r', value
is displayed without a newline. If '-t', the RFC 11 object is displayed.
'-a treeobj' causes the lookup to be relative to an RFC 11 snapshot reference.
-*put* [-j|-r|-t] [-n] 'key=value' ['key=value...']::
+*put* [-j|-r|-t] [-n] [-A] 'key=value' ['key=value...']::
Store 'value' under 'key' and commit it. If it already has a value,
overwrite it. If no options, value is stored directly. If '-j', it is
first encoded as JSON, then stored. If '-r', the value may be read from
standard input if specified as "-", and may include embedded NULL bytes.
If '-t', value is stored as a RFC 11 object. '-n' prevents the commit
-from being merged with with other contemporaneous commits.
+from being merged with with other contemporaneous commits. '-A' appends the
+value to a key instead of overwriting the value. Append is incompatible with
+the -j option.
*ls* [-R] [-d] [-F] [-w COLS] [-1] ['key' ...]::
Display directory referred to by _key_, or "." (root) if unspecified. | 1 | // flux-help-include: true
FLUX-KVS(1)
===========
:doctype: manpage
NAME
----
flux-kvs - Flux key-value store utility
SYNOPSIS
--------
*flux* *kvs* 'COMMAND' ['OPTIONS']
DESCRIPTION
-----------
The Flux key-value store (KVS) is a simple, distributed data storage
service used a building block by other Flux components.
flux-kvs(1) is a command line utility that operates on the KVS.
It is a very thin layer on top of a C API.
The Flux KVS stores JSON values under string keys. The keys are
hierarchical, using "." as a path separator, analogous to "/" separated
UNIX file paths. A single "." represents the root directory of
the KVS.
The KVS is distributed among the ranks of a comms session. Rank 0
is the master, and other ranks are caching slaves. All writes are flushed
to the master during a commit operation. Data is stored in a hash tree
such that every commit results in a new root hash. Each new root hash
is multicast across the session. When slaves update their root hash,
they atomically update their view to match the master. There may be a
delay after a commit while old data is served on a slave that has not yet
updated its root hash, thus the Flux KVS consistency model is "eventually
consistent". Slaves cache data temporally and fault in new data through
their parent in the overlay network.
flux-kvs(1) runs a KVS 'COMMAND'. The possible commands and their
arguments are described below.
COMMANDS
--------
*get* [-j|-r|-t] [-a treeobj] 'key' ['key...']::
Retrieve the value stored under 'key'. If nothing has been stored under
'key', display an error message. If no options, value is displayed with
a newline appended (if value length is nonzero). If '-j', value is
interpreted as encoded JSON and formatted accordingly. If '-r', value
is displayed without a newline. If '-t', the RFC 11 object is displayed.
'-a treeobj' causes the lookup to be relative to an RFC 11 snapshot reference.
*put* [-j|-r|-t] [-n] 'key=value' ['key=value...']::
Store 'value' under 'key' and commit it. If it already has a value,
overwrite it. If no options, value is stored directly. If '-j', it is
first encoded as JSON, then stored. If '-r', the value may be read from
standard input if specified as "-", and may include embedded NULL bytes.
If '-t', value is stored as a RFC 11 object. '-n' prevents the commit
from being merged with with other contemporaneous commits.
*ls* [-R] [-d] [-F] [-w COLS] [-1] ['key' ...]::
Display directory referred to by _key_, or "." (root) if unspecified.
Options are roughly equivalent to a subset of ls(1) options.
'-R' lists directory recursively. '-d' displays directory not its contents.
'-F' classifies files with one character suffix (. is directory, @ is symlink).
'-w COLS' sets the terminal width in characters. '-1' causes output to be
displayed in one column.
*dir* [-R] [-d] [-w COLS] [-a treeobj] ['key']::
Display all keys and their values under the directory 'key'.
If 'key' does not exist or is not a directory, display an error message.
If 'key' is not provided, "." (root of the namespace) is assumed. If '-R'
is specified, recursively display keys under subdirectories. If '-d' is
specified, do not output key values. Output is truncated to fit the
terminal width. '-w COLS' sets the terminal width (0=unlimited).
'-a treeobj' causes the lookup to be relative to an RFC 11 snapshot reference.
*unlink* [-R] [-f] 'key' ['key...']::
Remove 'key' from the KVS and commit the change. If 'key' represents
a directory, specify '-R' to remove all keys underneath it. If '-f' is
specified, ignore nonexistent files.
*link* 'target' 'linkname'::
Create a new name for 'target', similar to a symbolic link, and commit
the change. 'target' does not have to exist. If 'linkname' exists,
it is overwritten.
*readlink* [-a treeobj] 'key' ['key...']::
Retrieve the key a link refers to rather than its value, as would be
returned by *get*. '-a treeobj' causes the lookup to be relative to
an RFC 11 snapshot reference.
*mkdir* 'key' ['key...']::
Create an empty directory and commit the change. If 'key' exists,
it is overwritten.
*copy* 'source' 'destination'::
Copy 'source' key to 'destination' key. If a directory is copied, a new
reference is created; it is unnecessary for *copy* to recurse into 'source'.
*move* 'source' 'destination'::
Like *copy*, but 'source' is unlinked after the copy.
*dropcache* [--all]::
Tell the local KVS to drop any cache it is holding. If '--all' is
specified, send an event across the comms session instructing all KVS
instances to drop their caches.
*watch* [-R] [-d] [-o] [-c count] 'key'::
Watch 'key' and output changes. If 'key' is a single value, each
change will be displayed on a line of output. If 'key' is a
directory, changes within the directory and changes within it will be
displayed. If '-R' is specified, recursively display keys under
subdirectories. If '-d' is specified, do not output key values. If
'count' is specified, display at most 'count' changes. Otherwise,
this command runs forever. If '-o' is specified, output the current
value before outputting changes.
*version*::
Display the current KVS version, an integer value. The version starts
at zero and is incremented on each KVS commit. Note that some commits
may be aggregated for performance and the version will be incremented
once for the aggregation, so it cannot be used as a direct count of
commit requests.
*wait* 'version'::
Block until the KVS version reaches 'version' or greater. A simple form
of synchronization between peers is: node A puts a value, commits it,
reads version, sends version to node B. Node B waits for version, gets
value.
AUTHOR
------
This page is maintained by the Flux community.
RESOURCES
---------
Github: <http://github.com/flux-framework>
COPYRIGHT
---------
include::COPYRIGHT.adoc[]
| 1 | 19,467 | should work with no options, if my suggestion is accepted. JSON is the one that gets weird since a JSON object appended to a JSON object isn't valid JSON. | flux-framework-flux-core | c |
@@ -49,7 +49,7 @@ func init() {
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
- flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
+ flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
}
var commandDefinition = &cobra.Command{ | 1 | package info
// FIXME once translations are implemented will need a no-escape
// option for Put so we can make these tests work agaig
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/info/internal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra"
)
var (
writeJSON string
checkNormalization bool
checkControl bool
checkLength bool
checkStreaming bool
uploadWait time.Duration
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
positionRightRe = regexp.MustCompile(`(?s)^position-right-([[:xdigit:]]+)-(.*)$`)
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
}
var commandDefinition = &cobra.Command{
Use: "info [remote:path]+",
Short: `Discovers file name or other limitations for paths.`,
Long: `rclone info discovers what filenames and upload methods are possible
to write to the paths passed in and how long they can be. It can take some
time. It will write test files into the remote:path passed in. It outputs
a bit of go code for each one.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1e6, command, args)
for i := range args {
f := cmd.NewFsDir(args[i : i+1])
cmd.Run(false, false, command, func() error {
return readInfo(context.Background(), f)
})
}
},
}
type results struct {
ctx context.Context
f fs.Fs
mu sync.Mutex
stringNeedsEscaping map[string]internal.Position
controlResults map[string]internal.ControlResult
maxFileLength int
canWriteUnnormalized bool
canReadUnnormalized bool
canReadRenormalized bool
canStream bool
}
func newResults(ctx context.Context, f fs.Fs) *results {
return &results{
ctx: ctx,
f: f,
stringNeedsEscaping: make(map[string]internal.Position),
controlResults: make(map[string]internal.ControlResult),
}
}
// Print the results to stdout
func (r *results) Print() {
fmt.Printf("// %s\n", r.f.Name())
if checkControl {
escape := []string{}
for c, needsEscape := range r.stringNeedsEscaping {
if needsEscape != internal.PositionNone {
k := strconv.Quote(c)
k = k[1 : len(k)-1]
escape = append(escape, fmt.Sprintf("'%s'", k))
}
}
sort.Strings(escape)
fmt.Printf("stringNeedsEscaping = []rune{\n")
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
fmt.Printf("}\n")
}
if checkLength {
fmt.Printf("maxFileLength = %d\n", r.maxFileLength)
}
if checkNormalization {
fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized)
fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized)
fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized)
}
if checkStreaming {
fmt.Printf("canStream = %v\n", r.canStream)
}
}
// WriteJSON writes the results to a JSON file when requested
func (r *results) WriteJSON() {
if writeJSON == "" {
return
}
report := internal.InfoReport{
Remote: r.f.Name(),
}
if checkControl {
report.ControlCharacters = &r.controlResults
}
if checkLength {
report.MaxFileLength = &r.maxFileLength
}
if checkNormalization {
report.CanWriteUnnormalized = &r.canWriteUnnormalized
report.CanReadUnnormalized = &r.canReadUnnormalized
report.CanReadRenormalized = &r.canReadRenormalized
}
if checkStreaming {
report.CanStream = &r.canStream
}
if f, err := os.Create(writeJSON); err != nil {
fs.Errorf(r.f, "Creating JSON file failed: %s", err)
} else {
defer fs.CheckClose(f, &err)
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
err := enc.Encode(report)
if err != nil {
fs.Errorf(r.f, "Writing JSON file failed: %s", err)
}
}
fs.Infof(r.f, "Wrote JSON file: %s", writeJSON)
}
// writeFile writes a file with some random contents
func (r *results) writeFile(path string) (fs.Object, error) {
contents := random.String(50)
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
obj, err := r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
if uploadWait > 0 {
time.Sleep(uploadWait)
}
return obj, err
}
// check whether normalization is enforced and check whether it is
// done on the files anyway
func (r *results) checkUTF8Normalization() {
unnormalized := "Héroique"
normalized := "Héroique"
_, err := r.writeFile(unnormalized)
if err != nil {
r.canWriteUnnormalized = false
return
}
r.canWriteUnnormalized = true
_, err = r.f.NewObject(r.ctx, unnormalized)
if err == nil {
r.canReadUnnormalized = true
}
_, err = r.f.NewObject(r.ctx, normalized)
if err == nil {
r.canReadRenormalized = true
}
}
func (r *results) checkStringPositions(k, s string) {
fs.Infof(r.f, "Writing position file 0x%0X", s)
positionError := internal.PositionNone
res := internal.ControlResult{
Text: s,
WriteError: make(map[internal.Position]string, 3),
GetError: make(map[internal.Position]string, 3),
InList: make(map[internal.Position]internal.Presence, 3),
}
for _, pos := range internal.PositionList {
path := ""
switch pos {
case internal.PositionMiddle:
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
case internal.PositionLeft:
path = fmt.Sprintf("%s-position-left-%0X", s, s)
case internal.PositionRight:
path = fmt.Sprintf("position-right-%0X-%s", s, s)
default:
panic("invalid position: " + pos.String())
}
_, writeError := r.writeFile(path)
if writeError != nil {
res.WriteError[pos] = writeError.Error()
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeError)
} else {
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
}
obj, getErr := r.f.NewObject(r.ctx, path)
if getErr != nil {
res.GetError[pos] = getErr.Error()
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
} else {
if obj.Size() != 50 {
res.GetError[pos] = fmt.Sprintf("invalid size %d", obj.Size())
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
} else {
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
}
}
if writeError != nil || getErr != nil {
positionError += pos
}
}
r.mu.Lock()
r.stringNeedsEscaping[k] = positionError
r.controlResults[k] = res
r.mu.Unlock()
}
// check we can write a file with the control chars
func (r *results) checkControls() {
fs.Infof(r.f, "Trying to create control character file names")
// Concurrency control
tokens := make(chan struct{}, fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
tokens <- struct{}{}
}
var wg sync.WaitGroup
for i := rune(0); i < 128; i++ {
s := string(i)
if i == 0 || i == '/' {
// We're not even going to check NULL or /
r.stringNeedsEscaping[s] = internal.PositionAll
continue
}
wg.Add(1)
go func(s string) {
defer wg.Done()
token := <-tokens
k := s
r.checkStringPositions(k, s)
tokens <- token
}(s)
}
for _, s := range []string{"\", "\u00A0", "\xBF", "\xFE"} {
wg.Add(1)
go func(s string) {
defer wg.Done()
token := <-tokens
k := s
r.checkStringPositions(k, s)
tokens <- token
}(s)
}
wg.Wait()
r.checkControlsList()
fs.Infof(r.f, "Done trying to create control character file names")
}
func (r *results) checkControlsList() {
l, err := r.f.List(context.TODO(), "")
if err != nil {
fs.Errorf(r.f, "Listing control character file names failed: %s", err)
return
}
namesMap := make(map[string]struct{}, len(l))
for _, s := range l {
namesMap[path.Base(s.Remote())] = struct{}{}
}
for path := range namesMap {
var pos internal.Position
var hex, value string
if g := positionLeftRe.FindStringSubmatch(path); g != nil {
pos, hex, value = internal.PositionLeft, g[2], g[1]
} else if g := positionMiddleRe.FindStringSubmatch(path); g != nil {
pos, hex, value = internal.PositionMiddle, g[1], g[2]
} else if g := positionRightRe.FindStringSubmatch(path); g != nil {
pos, hex, value = internal.PositionRight, g[1], g[2]
} else {
fs.Infof(r.f, "Unknown path %q", path)
continue
}
var hexValue []byte
for ; len(hex) >= 2; hex = hex[2:] {
if b, err := strconv.ParseUint(hex[:2], 16, 8); err != nil {
fs.Infof(r.f, "Invalid path %q: %s", path, err)
continue
} else {
hexValue = append(hexValue, byte(b))
}
}
if hex != "" {
fs.Infof(r.f, "Invalid path %q", path)
continue
}
hexStr := string(hexValue)
k := hexStr
switch r.controlResults[k].InList[pos] {
case internal.Absent:
if hexStr == value {
r.controlResults[k].InList[pos] = internal.Present
} else {
r.controlResults[k].InList[pos] = internal.Renamed
}
case internal.Present:
r.controlResults[k].InList[pos] = internal.Multiple
case internal.Renamed:
r.controlResults[k].InList[pos] = internal.Multiple
}
delete(namesMap, path)
}
if len(namesMap) > 0 {
fs.Infof(r.f, "Found additional control character file names:")
for name := range namesMap {
fs.Infof(r.f, "%q", name)
}
}
}
// find the max file name size we can use
func (r *results) findMaxLength() {
const maxLen = 16 * 1024
name := make([]byte, maxLen)
for i := range name {
name[i] = 'a'
}
// Find the first size of filename we can't write
i := sort.Search(len(name), func(i int) (fail bool) {
defer func() {
if err := recover(); err != nil {
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
fail = true
}
}()
path := string(name[:i])
_, err := r.writeFile(path)
if err != nil {
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
return true
}
fs.Infof(r.f, "Wrote file with name length %d", i)
return false
})
r.maxFileLength = i - 1
fs.Infof(r.f, "Max file length is %d", r.maxFileLength)
}
func (r *results) checkStreaming() {
putter := r.f.Put
if r.f.Features().PutStream != nil {
fs.Infof(r.f, "Given remote has specialized streaming function. Using that to test streaming.")
putter = r.f.Features().PutStream
}
contents := "thinking of test strings is hard"
buf := bytes.NewBufferString(contents)
hashIn := hash.NewMultiHasher()
in := io.TeeReader(buf, hashIn)
objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
objR, err := putter(r.ctx, in, objIn)
if err != nil {
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
r.canStream = false
return
}
hashes := hashIn.Sums()
types := objR.Fs().Hashes().Array()
for _, Hash := range types {
sum, err := objR.Hash(r.ctx, Hash)
if err != nil {
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err)
r.canStream = false
return
}
if !hash.Equals(hashes[Hash], sum) {
fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", Hash, hashes[Hash], sum)
r.canStream = false
return
}
}
if int64(len(contents)) != objR.Size() {
fs.Infof(r.f, "Streamed file has incorrect file size: expecting %d got %d", len(contents), objR.Size())
r.canStream = false
return
}
r.canStream = true
}
func readInfo(ctx context.Context, f fs.Fs) error {
err := f.Mkdir(ctx, "")
if err != nil {
return errors.Wrap(err, "couldn't mkdir")
}
r := newResults(ctx, f)
if checkControl {
r.checkControls()
}
if checkLength {
r.findMaxLength()
}
if checkNormalization {
r.checkUTF8Normalization()
}
if checkStreaming {
r.checkStreaming()
}
r.Print()
r.WriteJSON()
return nil
}
| 1 | 12,082 | I presume this isn't some special term... | rclone-rclone | go |
@@ -79,6 +79,18 @@ function pAsHeadingEvaluate(node, options, virtualNode) {
const nextStyle = nextSibling ? getStyleValues(nextSibling) : null;
const prevStyle = prevSibling ? getStyleValues(prevSibling) : null;
+ if (node && nextSibling) {
+ const headingLength = node.textContent.trim().length;
+ const paragraphLength = nextSibling.textContent.trim().length;
+ if (headingLength > paragraphLength) {
+ return true;
+ } else if (headingLength < paragraphLength / 2) {
+ return false;
+ } else {
+ undefined;
+ }
+ }
+
if (!nextStyle || !isHeaderStyle(currStyle, nextStyle, margins)) {
return true;
} | 1 | import { findUpVirtual } from '../../commons/dom';
function normalizeFontWeight(weight) {
switch (weight) {
case 'lighter':
return 100;
case 'normal':
return 400;
case 'bold':
return 700;
case 'bolder':
return 900;
}
weight = parseInt(weight);
return !isNaN(weight) ? weight : 400;
}
function getTextContainer(elm) {
let nextNode = elm;
const outerText = elm.textContent.trim();
let innerText = outerText;
while (innerText === outerText && nextNode !== undefined) {
let i = -1;
elm = nextNode;
if (elm.children.length === 0) {
return elm;
}
do {
// find the first non-empty child
i++;
innerText = elm.children[i].textContent.trim();
} while (innerText === '' && i + 1 < elm.children.length);
nextNode = elm.children[i];
}
return elm;
}
function getStyleValues(node) {
const style = window.getComputedStyle(getTextContainer(node));
return {
fontWeight: normalizeFontWeight(style.getPropertyValue('font-weight')),
fontSize: parseInt(style.getPropertyValue('font-size')),
isItalic: style.getPropertyValue('font-style') === 'italic'
};
}
function isHeaderStyle(styleA, styleB, margins) {
return margins.reduce((out, margin) => {
return (
out ||
((!margin.size || styleA.fontSize / margin.size > styleB.fontSize) &&
(!margin.weight ||
styleA.fontWeight - margin.weight > styleB.fontWeight) &&
(!margin.italic || (styleA.isItalic && !styleB.isItalic)))
);
}, false);
}
function pAsHeadingEvaluate(node, options, virtualNode) {
const siblings = Array.from(node.parentNode.children);
const currentIndex = siblings.indexOf(node);
options = options || {};
const margins = options.margins || [];
const nextSibling = siblings
.slice(currentIndex + 1)
.find(elm => elm.nodeName.toUpperCase() === 'P');
const prevSibling = siblings
.slice(0, currentIndex)
.reverse()
.find(elm => elm.nodeName.toUpperCase() === 'P');
const currStyle = getStyleValues(node);
const nextStyle = nextSibling ? getStyleValues(nextSibling) : null;
const prevStyle = prevSibling ? getStyleValues(prevSibling) : null;
if (!nextStyle || !isHeaderStyle(currStyle, nextStyle, margins)) {
return true;
}
const blockquote = findUpVirtual(virtualNode, 'blockquote');
if (blockquote && blockquote.nodeName.toUpperCase() === 'BLOCKQUOTE') {
return undefined;
}
if (prevStyle && !isHeaderStyle(currStyle, prevStyle, margins)) {
return undefined;
}
return false;
}
export default pAsHeadingEvaluate;
| 1 | 17,021 | Did you mean to return here? ;) You'll also need to add an `incomplete` message to the rule metadata. | dequelabs-axe-core | js |
@@ -540,6 +540,14 @@ const (
// This is replaced at runtime
JivaClusterIPHolder JivaAnnotations = "__CLUSTER_IP__"
+ // JivaCloneIPHolder is used as a placeholder for sync controller IP address
+ // which will be used as cloneIP
+ //
+ // NOTE:
+ // This is replaced at runtime
+ JivaCloneIPHolder JivaAnnotations = "__CLONE_IP__"
+
+ JivaReplicaTypeHolder JivaAnnotations = "clone"
// JivaStorageSizeHolder is used as a placeholder for persistent volume's
// storage capacity
// | 1 | package v1
// NomadEnvironmentVariable is a typed label that defines environment variables
// that are understood by Nomad
type NomadEnvironmentVariable string
const (
// NomadAddressEnvKey is the environment variable that determines the
// Nomad server address where the Job request can be directed to.
NomadAddressEnvKey NomadEnvironmentVariable = "NOMAD_ADDR"
// NomadRegionEnvKey is the environment variable that determines the Nomad region
// where the Job request can be directed to.
NomadRegionEnvKey NomadEnvironmentVariable = "NOMAD_REGION"
)
// EnvironmentVariableLabel is a typed label that defines environment variable
// labels that are passed as request options during provisioning.
type EnvironmentVariableLabel string
const (
// EnvVariableContextLbl is the label that can be optionally set as one of the
// request option during VSM provisioning operations. Its value is used
// to set the context (/ prefix) against the environment variables for that
// particular request.
EnvVariableContextLbl EnvironmentVariableLabel = "env.mapi.openebs.io/env-var-ctx"
)
// EnvironmentVariableDefaults is a typed label that defines the environment variable
// defaults
type EnvironmentVariableDefaults string
const (
// Default value for environment variable context
EnvVariableContextDef EnvironmentVariableDefaults = "DEFAULT"
)
// EnvironmentVariableKey is a typed label that define the environment variables
type EnvironmentVariableKey string
const (
// PVPProfileNameEnvVarKey is the environment variable key for persistent
// volume provisioner's profile name
//
// Usage:
// <CTX>_PVP_PROFILE_NAME = <some value>
PVPProfileNameEnvVarKey EnvironmentVariableKey = "_PVP_PROFILE_NAME"
// PVPNameEnvVarKey is the environment variable key for persistent volume
// provisioner's name
//
// Usage:
// <CTX>_PVP_NAME = <some value>
PVPNameEnvVarKey EnvironmentVariableKey = "_PVP_NAME"
// PVPControllerImageEnvVarKey is the environment variable key for persistent
// volume provisioner's controller image
//
// Usage:
// <CTX>_CONTROLLER_IMAGE = <some value>
PVPControllerImageEnvVarKey EnvironmentVariableKey = "_CONTROLLER_IMAGE"
// PVPPersistentPathEnvVarKey is the environment variable key for persistent
// volume provisioner's replica persistent path
//
// Usage:
// <CTX>_PERSISTENT_PATH = <some value>
PVPPersistentPathEnvVarKey EnvironmentVariableKey = "_PERSISTENT_PATH"
// PVPStorageSizeEnvVarKey is the environment variable key for persistent
// volume provisioner's replica size
//
// Usage:
// <CTX>_STORAGE_SIZE = <some value>
PVPStorageSizeEnvVarKey EnvironmentVariableKey = "_STORAGE_SIZE"
// PVPReplicaCountEnvVarKey is the environment variable key for persistent
// volume provisioner's replica count
//
// Usage:
// <CTX>_REPLICA_COUNT = <some value>
PVPReplicaCountEnvVarKey EnvironmentVariableKey = "_REPLICA_COUNT"
// PVPReplicaImageEnvVarKey is the environment variable key for persistent
// volume provisioner's replica image
//
// Usage:
// <CTX>_REPLICA_IMAGE = <some value>
PVPReplicaImageEnvVarKey EnvironmentVariableKey = "_REPLICA_IMAGE"
// PVPControllerCountEnvVarKey is the environment variable key for persistent
// volume provisioner's controller count
//
// Usage:
// <CTX>_CONTROLLER_COUNT = <some value>
PVPControllerCountEnvVarKey EnvironmentVariableKey = "_CONTROLLER_COUNT"
// PVPReplicaTopologyKeyEnvVarKey is the environment variable key for persistent
// volume provisioner's replica topology key
//
// Usage:
// <CTX>_REPLICA_TOPOLOGY_KEY = <some value>
PVPReplicaTopologyKeyEnvVarKey EnvironmentVariableKey = "_REPLICA_TOPOLOGY_KEY"
// PVPControllerNodeTaintTolerationEnvVarKey is the environment variable key
// for persistent volume provisioner's node taint toleration
//
// Usage:
// <CTX>_CONTROLLER_NODE_TAINT_TOLERATION = <some value>
PVPControllerNodeTaintTolerationEnvVarKey EnvironmentVariableKey = "_CONTROLLER_NODE_TAINT_TOLERATION"
// PVPReplicaNodeTaintTolerationEnvVarKey is the environment variable key for
// persistent volume provisioner's node taint toleration
//
// Usage:
// <CTX>__REPLICA_NODE_TAINT_TOLERATION = <some value>
PVPReplicaNodeTaintTolerationEnvVarKey EnvironmentVariableKey = "_REPLICA_NODE_TAINT_TOLERATION"
// OrchestratorNameEnvVarKey is the environment variable key for
// orchestration provider's name
//
// Usage:
// <CTX>_ORCHESTRATOR_NAME = <some value>
OrchestratorNameEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_NAME"
// OrchestratorRegionEnvVarKey is the environment variable key for orchestration
// provider's region
//
// Usage:
// <CTX>_ORCHESTRATOR_REGION = <some value>
OrchestratorRegionEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_REGION"
// OrchestratorDCEnvVarKey is the environment variable key for orchestration
// provider's datacenter
//
// Usage:
// <CTX>_ORCHESTRATOR_DC = <some value>
OrchestratorDCEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_DC"
// OrchestratorAddressEnvVarKey is the environment variable key for orchestration
// provider's address
//
// Usage:
// <CTX>_<REGION>_<DC>_ORCHESTRATOR_ADDR = 10.20.1.1
OrchestratorAddressEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_ADDR"
// OrchestratorCNTypeEnvVarKey is the environment variable key for orchestration
// provider's network type
//
// Usage:
// <CTX>_ORCHESTRATOR_CN_TYPE = <some value>
OrchestratorCNTypeEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_CN_TYPE"
// OrchestratorCNInterfaceEnvVarKey is the environment variable key for orchestration
// provider's network interface
//
// Usage:
// <CTX>_ORCHESTRATOR_CN_INTERFACE = <some value>
OrchestratorCNInterfaceEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_CN_INTERFACE"
// OrchestratorCNAddrEnvVarKey is the environment variable key for orchestration
// provider's network address
//
// Usage:
// <CTX>_ORCHESTRATOR_CN_ADDRESS = <some value>
OrchestratorCNAddrEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_CN_ADDRESS"
// OrchestratorNSEnvVarKey is the environment variable key for orchestration
// provider's namespace
//
// Usage:
// <CTX>_ORCHESTRATOR_NS = <some value>
OrchestratorNSEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_NS"
// OrchestratorInClusterEnvVarKey is the environment variable key for orchestration
// provider's in-cluster flag
//
// Usage:
// <CTX>_ORCHESTRATOR_IN_CLUSTER = <some value>
OrchestratorInClusterEnvVarKey EnvironmentVariableKey = "_ORCHESTRATOR_IN_CLUSTER"
)
// OrchProviderProfileLabel is a typed label to determine orchestration provider
// profile's values.
type OrchProviderProfileLabel string
const (
// Label / Tag for an orchestrator profile name
OrchProfileNameLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/profile-name"
// Label / Tag for an orchestrator region
OrchRegionLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/region"
// Label / Tag for an orchestrator datacenter
OrchDCLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/dc"
// OrchAddrLbl is the Label / Tag for an orchestrator address
OrchAddrLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/address"
// Label / Tag for an orchestrator namespace
OrchNSLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/ns"
// OrchInClusterLbl is the label for setting the in cluster flag. This is used
// during provisioning operations. It sets if the provisioning is meant to be
// within cluster or outside the cluster.
OrchInClusterLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/in-cluster"
// OrchCNTypeLbl is the Label / Tag for an orchestrator's networking type
OrchCNTypeLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-type"
// OrchCNNetworkAddrLbl is the Label / Tag for an orchestrator's network address
// in CIDR notation
OrchCNNetworkAddrLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-addr"
// OrchCNSubnetLbl is the Label / Tag for an orchestrator's network subnet
OrchCNSubnetLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-subnet"
// OrchCNInterfaceLbl is the Label / Tag for an orchestrator's network interface
OrchCNInterfaceLbl OrchProviderProfileLabel = "orchprovider.mapi.openebs.io/cn-interface"
)
// OrchProviderDefaults is a typed label to provide default values w.r.t
// orchestration provider properties.
type OrchProviderDefaults string
const (
// Default value for orchestrator's network address
// NOTE: Should be in valid CIDR notation
OrchNetworkAddrDef OrchProviderDefaults = "172.28.128.1/24"
// Default value for orchestrator's in-cluster flag
OrchInClusterDef OrchProviderDefaults = "true"
// Default value for orchestrator namespace
OrchNSDef OrchProviderDefaults = "default"
// OrchRegionDef is the default value of orchestrator region
OrchRegionDef OrchProviderDefaults = "global"
// OrchDCDef is the default value of orchestrator datacenter
OrchDCDef OrchProviderDefaults = "dc1"
// OrchAddressDef is the default value of orchestrator address
OrchAddressDef OrchProviderDefaults = "127.0.0.1"
// OrchCNTypeDef is the default value of orchestrator network type
OrchCNTypeDef OrchProviderDefaults = "host"
// OrchCNInterfaceDef is the default value of orchestrator network interface
OrchCNInterfaceDef OrchProviderDefaults = "enp0s8"
)
// VolumeProvisionerProfileLabel is a typed label to determine volume provisioner
// profile values.
type VolumeProvisionerProfileLabel string
const (
// Label / Tag for a persistent volume provisioner profile's name
PVPProfileNameLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/profile-name"
// Label / Tag for a persistent volume provisioner's replica support
PVPReqReplicaLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/req-replica"
// Label / Tag for a persistent volume provisioner's networking support
PVPReqNetworkingLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/req-networking"
// Deprecate
// Label / Tag for a persistent volume provisioner's replica count
PVPReplicaCountLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-count"
// Label / Tag for a persistent volume provisioner's persistent path count
PVPPersistentPathCountLbl VolumeProvisionerProfileLabel = PVPReplicaCountLbl
// Label / Tag for a persistent volume provisioner's storage size
PVPStorageSizeLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/storage-size"
// Label / Tag for a persistent volume provisioner's replica IPs
PVPReplicaIPsLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-ips"
// Label / Tag for a persistent volume provisioner's replica image
PVPReplicaImageLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-image"
// Label / Tag for a persistent volume provisioner's controller count
PVPControllerCountLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-count"
// Label / Tag for a persistent volume provisioner's controller image
PVPControllerImageLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-image"
// Label / Tag for a persistent volume provisioner's controller IPs
PVPControllerIPsLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-ips"
// Label / Tag for a persistent volume provisioner's persistent path
PVPPersistentPathLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/persistent-path"
// Label / Tag for a persistent volume provisioner's controller node taint toleration
PVPControllerNodeTaintTolerationLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/controller-node-taint-toleration"
// Label / Tag for a persistent volume provisioner's replica node taint toleration
PVPReplicaNodeTaintTolerationLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-node-taint-toleration"
// PVPReplicaTopologyKeyLbl is the label for a persistent volume provisioner's
// VSM replica topology key
PVPReplicaTopologyKeyLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/replica-topology-key"
// PVPNodeAffinityExpressionsLbl is the label to determine the node affinity
// of the replica(s).
//
// NOTE:
// 1. These are comma separated key value pairs, where each
// key & value is separated by an operator e.g. In, NotIn, Exists, DoesNotExist
//
// 2. The key & value should have been labeled against a node or group of
// nodes belonging to the K8s cluster
//
// 3. The replica count should match the number of of pairs provided
//
// Usage:
// For OpenEBS volume with 2 replicas:
// volumeprovisioner.mapi.openebs.io/node-affinity-expressions=
// "<replica-identifier>=kubernetes.io/hostname:In:node1,
// <another-replica-identifier>=kubernetes.io/hostname:In:node2"
//
// Usage:
// For OpenEBS volume with 3 replicas:
// volumeprovisioner.mapi.openebs.io/node-affinity-expressions=
// "<replica-identifier>=kubernetes.io/hostname:In:node1,
// <another-replica-identifier>=kubernetes.io/hostname:In:node2,
// <yet-another-replica-identifier>=kubernetes.io/hostname:In:node3"
//
// Usage:
// For OpenEBS volume with 3 replicas:
// volumeprovisioner.mapi.openebs.io/node-affinity-expressions=
// "<replica-identifier>=volumeprovisioner.mapi.openebs.io/replica-zone-1-ssd-1:In:zone-1-ssd-1,
// <another-replica-identifier>=openebs.io/replica-zone-1-ssd-2:In:zone-1-ssd-2,
// <yet-another-replica-identifier>=openebs.io/replica-zone-2-ssd-1:In:zone-2-ssd-1"
//
// Usage:
// For OpenEBS volume with 3 replicas:
// volumeprovisioner.mapi.openebs.io/node-affinity-expressions=
// "<replica-identifier>=openebs.io/replica-zone-1-grp-1:In:zone-1-grp-1,
// <another-replica-identifier>=openebs.io/replica-zone-1-grp-2:In:zone-1-grp-2,
// <yet-another-replica-identifier>=openebs.io/replica-zone-2-grp-1:In:zone-2-grp-1"
//PVPNodeAffinityExpressionsLbl VolumeProvisionerProfileLabel = "volumeprovisioner.mapi.openebs.io/node-affinity-expressions"
// PVPNodeSelectorKeyLbl is the label to build the node affinity
// of the replica based on the key & the replica identifier
//
// NOTE:
// PVPNodeAffinityExpressionsLbl is used here as key is a part of the expressions
//PVPNodeSelectorKeyLbl VolumeProvisionerProfileLabel = PVPNodeAffinityExpressionsLbl
// PVPNodeSelectorOpLbl is the label to build the node affinity
// of the replica based on the operator & the replica identifier
//
// NOTE:
// PVPNodeAffinityExpressionsLbl is used here as operator is a part of the expressions
//PVPNodeSelectorOpLbl VolumeProvisionerProfileLabel = PVPNodeAffinityExpressionsLbl
// PVPNodeSelectorValueLbl is the label to build the node affinity
// of the replica based on the operator & the replica identifier
//
// NOTE:
// PVPNodeAffinityExpressionsLbl is used here as value is a part of the expressions
//PVPNodeSelectorValueLbl VolumeProvisionerProfileLabel = PVPNodeAffinityExpressionsLbl
// PVPSCNameLbl is the key used to specify the name of storage class. This
// applies when OpenEBS volume is orchestrated by Maya using Kubernetes.
PVPSCNameLbl VolumeProvisionerProfileLabel = "sc/name"
// PVPSCNamespaceLbl is the key used to specify the namespace of storage
// class. This applies when OpenEBS volume is orchestrated by Maya using
// Kubernetes.
//PVPSCNamespaceLbl VolumeProvisionerProfileLabel = "sc/namespace"
)
// Deprecate
type MayaAPIServiceOutputLabel string
// Deprecate all these constants
const (
ReplicaStatusAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-status"
ControllerStatusAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/controller-status"
TargetPortalsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/targetportals"
ClusterIPsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/cluster-ips"
ReplicaIPsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-ips"
ControllerIPsAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/controller-ips"
IQNAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/iqn"
VolumeSizeAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/volume-size"
// Deprecate
ReplicaCountAPILbl MayaAPIServiceOutputLabel = "vsm.openebs.io/replica-count"
)
// VolumeProvsionerDefaults is a typed label to provide default values w.r.t
// volume provisioner properties.
type VolumeProvisionerDefaults string
const (
// Default value for persistent volume provisioner's controller count
PVPControllerCountDef VolumeProvisionerDefaults = "1"
// Default value for persistent volume provisioner's replica count
PVPReplicaCountDef VolumeProvisionerDefaults = "2"
// Default value for persistent volume provisioner's persistent path count
// This should be equal to persistent volume provisioner's replica count
PVPPersistentPathCountDef VolumeProvisionerDefaults = PVPReplicaCountDef
// Default value for persistent volume provisioner's controller image
PVPControllerImageDef VolumeProvisionerDefaults = "openebs/jiva:latest"
// Default value for persistent volume provisioner's support for replica
PVPReqReplicaDef VolumeProvisionerDefaults = "true"
// Default value for persistent volume provisioner's replica image
PVPReplicaImageDef VolumeProvisionerDefaults = "openebs/jiva:latest"
// Default value for persistent volume provisioner's networking support
PVPReqNetworkingDef VolumeProvisionerDefaults = "false"
// PVPPersistentPathDef is the default value for persistent volume provisioner's
// replica persistent path
PVPPersistentPathDef VolumeProvisionerDefaults = "/var/openebs"
// PVPStorageSizeDef is the default value for persistent volume provisioner's
// replica size
PVPStorageSizeDef VolumeProvisionerDefaults = "1G"
// PVPNodeSelectorKeyDef is the default value for volume replica's node selector
// key
//PVPNodeSelectorKeyDef VolumeProvisionerDefaults = "kubernetes.io/hostname"
// PVPNodeSelectorOpDef is the default value for volume replica's node selector
// operator
//PVPNodeSelectorOpDef VolumeProvisionerDefaults = "In"
)
// NameLabel type will be used to identify various maya api service components
// via this typed label
type NameLabel string
const (
// Label / Tag for an orchestrator name
OrchestratorNameLbl NameLabel = "orchprovider.mapi.openebs.io/name"
// Label / Tag for a persistent volume provisioner name
VolumeProvisionerNameLbl NameLabel = "volumeprovisioner.mapi.openebs.io/name"
)
// OrchestratorRegistry type will be used to register various maya api service
// orchestrators.
type OrchProviderRegistry string
const (
// K8sOrchestrator states Kubernetes as orchestration provider plugin.
// This is used for registering Kubernetes as an orchestration provider in maya
// api server.
K8sOrchestrator OrchProviderRegistry = "kubernetes"
// NomadOrchestrator states Nomad as orchestration provider plugin.
// This is used for registering Nomad as an orchestration provider in maya api
// server.
NomadOrchestrator OrchProviderRegistry = "nomad"
// DefaultOrchestrator provides the default orchestration provider
DefaultOrchestrator = K8sOrchestrator
)
// VolumeProvisionerRegistry type will be used to register various maya api
// service volume provisioners.
type VolumeProvisionerRegistry string
const (
// JivaVolumeProvisioner states Jiva as persistent volume provisioner plugin.
// This is used for registering Jiva as a volume provisioner in maya api server.
JivaVolumeProvisioner VolumeProvisionerRegistry = "jiva"
// DefaultVolumeProvisioner provides the default persistent volume provisioner
// plugin.
DefaultVolumeProvisioner VolumeProvisionerRegistry = JivaVolumeProvisioner
)
// OrchProviderProfileRegistry type will be used to register various maya api
// service orchestrator profiles
type OrchProviderProfileRegistry string
const (
// This is the name of PVC as orchestration provider profile
// This is used for labelling PVC as a orchestration provider profile
PVCOrchestratorProfile OrchProviderProfileRegistry = "pvc"
)
// VolumeProvisionerProfileRegistry type will be used to register various maya api service
// persistent volume provisioner profiles
type VolumeProvisionerProfileRegistry string
const (
// This is the name of volume provisioner profile
VolumeProvisionerProfile VolumeProvisionerProfileRegistry = "vol"
)
type GenericAnnotations string
const (
// VolumeProvisionerSelectorKey is used to filter VSMs
VolumeProvisionerSelectorKey GenericAnnotations = "openebs/volume-provisioner"
// ControllerSelectorKey is used to filter controllers
ControllerSelectorKey GenericAnnotations = "openebs/controller"
// ControllerSelectorKeyEquals is used to filter controller when
// selector logic is used
ControllerSelectorKeyEquals GenericAnnotations = ControllerSelectorKey + "="
// ReplicaCountSelectorKey is used to filter replicas
//ReplicaCountSelectorKey GenericAnnotations = "openebs/replica-count"
// ReplicaSelectorKey is used to filter replicas
ReplicaSelectorKey GenericAnnotations = "openebs/replica"
// ReplicaSelectorKeyEquals is used to filter replica when
// selector logic is used
ReplicaSelectorKeyEquals GenericAnnotations = ReplicaSelectorKey + "="
// ServiceSelectorKey is used to filter services
ServiceSelectorKey GenericAnnotations = "openebs/controller-service"
// ServiceSelectorKeyEquals is used to filter services when selector logic is
// used
ServiceSelectorKeyEquals GenericAnnotations = ServiceSelectorKey + "="
// SelectorEquals is used to filter
SelectorEquals GenericAnnotations = "="
// VSMSelectorKey is used to filter vsm
VSMSelectorKey GenericAnnotations = "vsm"
// VSMSelectorKeyEquals is used to filter vsm when selector logic is used
VSMSelectorKeyEquals GenericAnnotations = VSMSelectorKey + "="
// ControllerSuffix is used as a suffix for controller related names
ControllerSuffix GenericAnnotations = "-ctrl"
// ReplicaSuffix is used as a suffix for replica related names
ReplicaSuffix GenericAnnotations = "-rep"
// ServiceSuffix is used as a suffix for service related names
ServiceSuffix GenericAnnotations = "-svc"
// ContainerSuffix is used as a suffix for container related names
ContainerSuffix GenericAnnotations = "-con"
)
// TODO
// Move these to jiva folder
//
// JivaAnnotations will be used to provide filtering options like
// named-labels, named-suffix, named-prefix, constants, etc.
//
// NOTE:
// These value(s) are generally used / remembered by the consumers of
// maya api service
type JivaAnnotations string
// TODO
// Rename these const s.t. they start with Jiva as Key Word
const (
// JivaVolumeProvisionerSelectorValue is used to filter jiva based objects
JivaVolumeProvisionerSelectorValue JivaAnnotations = "jiva"
// JivaControllerSelectorValue is used to filter jiva controller objects
JivaControllerSelectorValue JivaAnnotations = "jiva-controller"
// JivaReplicaSelectorValue is used to filter jiva replica objects
JivaReplicaSelectorValue JivaAnnotations = "jiva-replica"
// JivaServiceSelectorValue is used to filter jiva service objects
JivaServiceSelectorValue JivaAnnotations = "jiva-controller-service"
// PortNameISCSI is the name given to iscsi ports
PortNameISCSI JivaAnnotations = "iscsi"
// PortNameAPI is the name given to api ports
PortNameAPI JivaAnnotations = "api"
// JivaCtrlIPHolder is used as a placeholder for persistent volume controller's
// IP address
//
// NOTE:
// This is replaced at runtime
JivaClusterIPHolder JivaAnnotations = "__CLUSTER_IP__"
// JivaStorageSizeHolder is used as a placeholder for persistent volume's
// storage capacity
//
// NOTE:
// This is replaced at runtime
JivaStorageSizeHolder JivaAnnotations = "__STOR_SIZE__"
//
JivaVolumeNameHolder JivaAnnotations = "__VOLUME_NAME__"
)
// JivaDefaults is a typed label to provide DEFAULT values to Jiva based
// persistent volume properties
type JivaDefaults string
const (
// JivaControllerFrontendDef is used to provide default frontend for jiva
// persistent volume controller
JivaControllerFrontendDef JivaDefaults = "gotgt"
// Jiva's iSCSI Qualified IQN value.
JivaIqnFormatPrefix JivaDefaults = "iqn.2016-09.com.openebs.jiva"
// JivaISCSIPortDef is used to provide default iscsi port value for jiva
// based persistent volumes
JivaISCSIPortDef JivaDefaults = "3260"
// JivaPersistentMountPathDef is the default mount path used by jiva based
// persistent volumes
JivaPersistentMountPathDef JivaDefaults = "/openebs"
// JivaPersistentMountNameDef is the default mount path name used by jiva based
// persistent volumes
JivaPersistentMountNameDef JivaDefaults = "openebs"
// JivaAPIPortDef is used to provide management port for persistent volume
// storage
JivaAPIPortDef JivaDefaults = "9501"
// JivaReplicaPortOneDef is used to provide port for jiva based persistent
// volume replica
JivaReplicaPortOneDef JivaDefaults = "9502"
// JivaReplicaPortTwoDef is used to provide port for jiva based persistent
// volume replica
JivaReplicaPortTwoDef JivaDefaults = "9503"
// JivaReplicaPortThreeDef is used to provide port for jiva based persistent
// volume replica
JivaReplicaPortThreeDef JivaDefaults = "9504"
// JivaBackEndIPPrefixLbl is used to provide the label for VSM replica IP on
// Nomad
JivaBackEndIPPrefixLbl JivaDefaults = "JIVA_REP_IP_"
)
// These will be used to provide array based constants that are
// related to jiva volume provisioner
var (
// JivaCtrlCmd is the command used to start jiva controller
JivaCtrlCmd = []string{"launch"}
// JivaCtrlArgs is the set of arguments provided to JivaCtrlCmd
//JivaCtrlArgs = []string{"controller", "--frontend", string(JivaControllerFrontendDef), string(JivaVolumeNameDef)}
JivaCtrlArgs = []string{"controller", "--frontend", string(JivaControllerFrontendDef), "--clusterIP", string(JivaClusterIPHolder), string(JivaVolumeNameHolder)}
// JivaReplicaCmd is the command used to start jiva replica
JivaReplicaCmd = []string{"launch"}
// JivaReplicaArgs is the set of arguments provided to JivaReplicaCmd
JivaReplicaArgs = []string{"replica", "--frontendIP", string(JivaClusterIPHolder), "--size", string(JivaStorageSizeHolder), string(JivaPersistentMountPathDef)}
)
// TODO
// Move these to k8s folder
//
// K8sAnnotations will be used to provide string based constants that are
// related to kubernetes as orchestration provider
type K8sAnnotations string
const (
// K8sKindPod is used to state the k8s Pod
K8sKindPod K8sAnnotations = "Pod"
// K8sKindDeployment is used to state the k8s Deployment
K8sKindDeployment K8sAnnotations = "Deployment"
// K8sKindService is used to state the k8s Service
K8sKindService K8sAnnotations = "Service"
// K8sServiceVersion is used to state the k8s Service version
K8sServiceVersion K8sAnnotations = "v1"
// K8sPodVersion is used to state the k8s Pod version
K8sPodVersion K8sAnnotations = "v1"
// K8sDeploymentVersion is used to state the k8s Deployment version
K8sDeploymentVersion K8sAnnotations = "extensions/v1beta1"
// K8sHostnameTopologyKey is used to specify the hostname as topology key
K8sHostnameTopologyKey K8sAnnotations = "kubernetes.io/hostname"
)
| 1 | 7,536 | Why are we calling these as annotations? I guess this is an old type that we are re-using. But lets make it a point to avoid once we feel that this design/code is not good. | openebs-maya | go |
@@ -38,6 +38,8 @@ public class RestClientSenderFilter implements ConsumerFilter {
@Override
public CompletableFuture<Response> onFilter(Invocation invocation, FilterNode nextNode) {
+ invocation.onStartSendRequest();
+
CompletableFuture<Response> future = new RestClientSender(invocation)
.send();
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.transport.rest.client;
import java.util.concurrent.CompletableFuture;
import javax.annotation.Nonnull;
import org.apache.servicecomb.core.Invocation;
import org.apache.servicecomb.core.filter.ConsumerFilter;
import org.apache.servicecomb.core.filter.FilterNode;
import org.apache.servicecomb.swagger.invocation.Response;
import org.springframework.stereotype.Component;
@Component
public class RestClientSenderFilter implements ConsumerFilter {
public static final String NAME = "rest-client-sender";
@Nonnull
@Override
public String getName() {
return NAME;
}
@Override
public CompletableFuture<Response> onFilter(Invocation invocation, FilterNode nextNode) {
CompletableFuture<Response> future = new RestClientSender(invocation)
.send();
return invocation.optimizeSyncConsumerThread(future);
}
}
| 1 | 12,278 | `invocation.getInvocationStageTrace().startSend()` is inside `RestClientSender.send` | apache-servicecomb-java-chassis | java |
@@ -218,8 +218,8 @@ func (ap *actPool) Add(ctx context.Context, act action.SealedEnvelope) error {
if act.GasPrice().Cmp(ap.cfg.MinGasPrice()) < 0 {
actpoolMtc.WithLabelValues("gasPriceLower").Inc()
log.L().Info("action rejected due to low gas price",
- zap.String("act hash", hex.EncodeToString(hash[:])),
- zap.String("gas price", act.GasPrice().String()))
+ zap.String("actionHash", hex.EncodeToString(hash[:])),
+ zap.String("gasPrice", act.GasPrice().String()))
return action.ErrUnderpriced
}
if err := ap.validate(ctx, act); err != nil { | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package actpool
import (
"context"
"encoding/hex"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
"github.com/iotexproject/iotex-core/pkg/tracer"
)
var (
actpoolMtc = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "iotex_actpool_rejection_metrics",
Help: "actpool metrics.",
}, []string{"type"})
)
func init() {
prometheus.MustRegister(actpoolMtc)
}
// ActPool is the interface of actpool
type ActPool interface {
action.SealedEnvelopeValidator
// Reset resets actpool state
Reset()
// PendingActionMap returns an action map with all accepted actions
PendingActionMap() map[string][]action.SealedEnvelope
// Add adds an action into the pool after passing validation
Add(ctx context.Context, act action.SealedEnvelope) error
// GetPendingNonce returns pending nonce in pool given an account address
GetPendingNonce(addr string) (uint64, error)
// GetUnconfirmedActs returns unconfirmed actions in pool given an account address
GetUnconfirmedActs(addr string) []action.SealedEnvelope
// GetActionByHash returns the pending action in pool given action's hash
GetActionByHash(hash hash.Hash256) (action.SealedEnvelope, error)
// GetSize returns the act pool size
GetSize() uint64
// GetCapacity returns the act pool capacity
GetCapacity() uint64
// GetGasSize returns the act pool gas size
GetGasSize() uint64
// GetGasCapacity returns the act pool gas capacity
GetGasCapacity() uint64
// DeleteAction deletes an invalid action from pool
DeleteAction(address.Address)
// ReceiveBlock will be called when a new block is committed
ReceiveBlock(*block.Block) error
AddActionEnvelopeValidators(...action.SealedEnvelopeValidator)
}
// SortedActions is a slice of actions that implements sort.Interface to sort by Value.
type SortedActions []action.SealedEnvelope
func (p SortedActions) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p SortedActions) Len() int { return len(p) }
func (p SortedActions) Less(i, j int) bool { return p[i].Nonce() < p[j].Nonce() }
// Option sets action pool construction parameter
type Option func(pool *actPool) error
// EnableExperimentalActions enables the action pool to take experimental actions
func EnableExperimentalActions() Option {
return func(pool *actPool) error {
pool.enableExperimentalActions = true
return nil
}
}
// actPool implements ActPool interface
type actPool struct {
mutex sync.RWMutex
cfg config.ActPool
sf protocol.StateReader
accountActs map[string]ActQueue
accountDesActs map[string]map[hash.Hash256]action.SealedEnvelope
allActions map[hash.Hash256]action.SealedEnvelope
gasInPool uint64
actionEnvelopeValidators []action.SealedEnvelopeValidator
timerFactory *prometheustimer.TimerFactory
enableExperimentalActions bool
senderBlackList map[string]bool
}
// NewActPool constructs a new actpool
func NewActPool(sf protocol.StateReader, cfg config.ActPool, opts ...Option) (ActPool, error) {
if sf == nil {
return nil, errors.New("Try to attach a nil state reader")
}
senderBlackList := make(map[string]bool)
for _, bannedSender := range cfg.BlackList {
senderBlackList[bannedSender] = true
}
ap := &actPool{
cfg: cfg,
sf: sf,
senderBlackList: senderBlackList,
accountActs: make(map[string]ActQueue),
accountDesActs: make(map[string]map[hash.Hash256]action.SealedEnvelope),
allActions: make(map[hash.Hash256]action.SealedEnvelope),
}
for _, opt := range opts {
if err := opt(ap); err != nil {
return nil, err
}
}
timerFactory, err := prometheustimer.New(
"iotex_action_pool_perf",
"Performance of action pool",
[]string{"type"},
[]string{"default"},
)
if err != nil {
return nil, err
}
ap.timerFactory = timerFactory
return ap, nil
}
func (ap *actPool) AddActionEnvelopeValidators(fs ...action.SealedEnvelopeValidator) {
ap.actionEnvelopeValidators = append(ap.actionEnvelopeValidators, fs...)
}
// Reset resets actpool state
// Step I: remove all the actions in actpool that have already been committed to block
// Step II: update pending balance of each account if it still exists in pool
// Step III: update queue's status in each account and remove invalid actions following queue's update
// Specifically, first reset the pending nonce based on confirmed nonce in order to prevent omitting reevaluation of
// unconfirmed but pending actions in pool after update of pending balance
// Then starting from the current confirmed nonce, iteratively update pending nonce if nonces are consecutive and pending
// balance is sufficient, and remove all the subsequent actions once the pending balance becomes insufficient
func (ap *actPool) Reset() {
ap.mutex.Lock()
defer ap.mutex.Unlock()
ap.reset()
}
func (ap *actPool) ReceiveBlock(*block.Block) error {
ap.mutex.Lock()
defer ap.mutex.Unlock()
ap.reset()
return nil
}
// PendingActionIterator returns an action interator with all accepted actions
func (ap *actPool) PendingActionMap() map[string][]action.SealedEnvelope {
ap.mutex.Lock()
defer ap.mutex.Unlock()
// Remove the actions that are already timeout
ap.reset()
actionMap := make(map[string][]action.SealedEnvelope)
for from, queue := range ap.accountActs {
actionMap[from] = append(actionMap[from], queue.PendingActs()...)
}
return actionMap
}
func (ap *actPool) Add(ctx context.Context, act action.SealedEnvelope) error {
ap.mutex.Lock()
defer ap.mutex.Unlock()
ctx, span := tracer.NewSpan(ctx, "actPool.Add")
defer span.End()
// Reject action if pool space is full
if uint64(len(ap.allActions)) >= ap.cfg.MaxNumActsPerPool {
actpoolMtc.WithLabelValues("overMaxNumActsPerPool").Inc()
return action.ErrTxPoolOverflow
}
intrinsicGas, err := act.IntrinsicGas()
if err != nil {
actpoolMtc.WithLabelValues("failedGetIntrinsicGas").Inc()
return err
}
if ap.gasInPool+intrinsicGas > ap.cfg.MaxGasLimitPerPool {
actpoolMtc.WithLabelValues("overMaxGasLimitPerPool").Inc()
return action.ErrGasLimit
}
hash, err := act.Hash()
if err != nil {
return err
}
// Reject action if it already exists in pool
if _, exist := ap.allActions[hash]; exist {
actpoolMtc.WithLabelValues("existedAction").Inc()
return action.ErrExistedInPool
}
// Reject action if the gas price is lower than the threshold
if act.GasPrice().Cmp(ap.cfg.MinGasPrice()) < 0 {
actpoolMtc.WithLabelValues("gasPriceLower").Inc()
log.L().Info("action rejected due to low gas price",
zap.String("act hash", hex.EncodeToString(hash[:])),
zap.String("gas price", act.GasPrice().String()))
return action.ErrUnderpriced
}
if err := ap.validate(ctx, act); err != nil {
return err
}
caller := act.SrcPubkey().Address()
if caller == nil {
return action.ErrAddress
}
return ap.enqueueAction(caller, act, hash, act.Nonce())
}
// GetPendingNonce returns pending nonce in pool or confirmed nonce given an account address
func (ap *actPool) GetPendingNonce(addr string) (uint64, error) {
addrStr, err := address.FromString(addr)
if err != nil {
return 0, err
}
ap.mutex.RLock()
defer ap.mutex.RUnlock()
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingNonce(), nil
}
confirmedState, err := accountutil.AccountState(ap.sf, addrStr)
if err != nil {
return 0, err
}
return confirmedState.Nonce + 1, err
}
// GetUnconfirmedActs returns unconfirmed actions in pool given an account address
func (ap *actPool) GetUnconfirmedActs(addr string) []action.SealedEnvelope {
ap.mutex.RLock()
defer ap.mutex.RUnlock()
var ret []action.SealedEnvelope
if queue, ok := ap.accountActs[addr]; ok {
ret = queue.AllActs()
}
if desMap, ok := ap.accountDesActs[addr]; ok {
if desMap != nil {
sortActions := make(SortedActions, 0)
for _, v := range desMap {
sortActions = append(sortActions, v)
}
sort.Stable(sortActions)
ret = append(ret, sortActions...)
}
}
return ret
}
// GetActionByHash returns the pending action in pool given action's hash
func (ap *actPool) GetActionByHash(hash hash.Hash256) (action.SealedEnvelope, error) {
ap.mutex.RLock()
defer ap.mutex.RUnlock()
act, ok := ap.allActions[hash]
if !ok {
return action.SealedEnvelope{}, errors.Wrapf(action.ErrNotFound, "action hash %x does not exist in pool", hash)
}
return act, nil
}
// GetSize returns the act pool size
func (ap *actPool) GetSize() uint64 {
ap.mutex.RLock()
defer ap.mutex.RUnlock()
return uint64(len(ap.allActions))
}
// GetCapacity returns the act pool capacity
func (ap *actPool) GetCapacity() uint64 {
return ap.cfg.MaxNumActsPerPool
}
// GetGasSize returns the act pool gas size
func (ap *actPool) GetGasSize() uint64 {
ap.mutex.RLock()
defer ap.mutex.RUnlock()
return ap.gasInPool
}
// GetGasCapacity returns the act pool gas capacity
func (ap *actPool) GetGasCapacity() uint64 {
return ap.cfg.MaxGasLimitPerPool
}
func (ap *actPool) Validate(ctx context.Context, selp action.SealedEnvelope) error {
ap.mutex.RLock()
defer ap.mutex.RUnlock()
return ap.validate(ctx, selp)
}
func (ap *actPool) DeleteAction(caller address.Address) {
ap.mutex.RLock()
defer ap.mutex.RUnlock()
pendingActs := ap.accountActs[caller.String()].AllActs()
ap.removeInvalidActs(pendingActs)
delete(ap.accountActs, caller.String())
}
func (ap *actPool) validate(ctx context.Context, selp action.SealedEnvelope) error {
span := tracer.SpanFromContext(ctx)
span.AddEvent("actPool.validate")
defer span.End()
caller := selp.SrcPubkey().Address()
if caller == nil {
return errors.New("failed to get address")
}
if _, ok := ap.senderBlackList[caller.String()]; ok {
actpoolMtc.WithLabelValues("blacklisted").Inc()
return errors.Wrap(action.ErrAddress, "action source address is blacklisted")
}
// if already validated
selpHash, err := selp.Hash()
if err != nil {
return err
}
if _, ok := ap.allActions[selpHash]; ok {
return nil
}
for _, ev := range ap.actionEnvelopeValidators {
if err := ev.Validate(ctx, selp); err != nil {
return err
}
}
return nil
}
//======================================
// private functions
//======================================
func (ap *actPool) enqueueAction(addr address.Address, act action.SealedEnvelope, actHash hash.Hash256, actNonce uint64) error {
confirmedState, err := accountutil.AccountState(ap.sf, addr)
if err != nil {
actpoolMtc.WithLabelValues("failedToGetNonce").Inc()
return errors.Wrapf(err, "failed to get sender's nonce for action %x", actHash)
}
confirmedNonce := confirmedState.Nonce
if actNonce <= confirmedNonce {
return action.ErrNonceTooLow
}
sender := addr.String()
queue := ap.accountActs[sender]
if queue == nil {
queue = NewActQueue(ap, sender, WithTimeOut(ap.cfg.ActionExpiry))
ap.accountActs[sender] = queue
// Initialize pending nonce for new account
pendingNonce := confirmedNonce + 1
queue.SetPendingNonce(pendingNonce)
// Initialize balance for new account
state, err := accountutil.AccountState(ap.sf, addr)
if err != nil {
actpoolMtc.WithLabelValues("failedToGetBalance").Inc()
return errors.Wrapf(err, "failed to get sender's balance for action %x", actHash)
}
queue.SetPendingBalance(state.Balance)
}
if actNonce-confirmedNonce >= ap.cfg.MaxNumActsPerAcct+1 {
// Nonce exceeds current range
log.L().Debug("Rejecting action because nonce is too large.",
log.Hex("hash", actHash[:]),
zap.Uint64("startNonce", confirmedNonce+1),
zap.Uint64("actNonce", actNonce))
actpoolMtc.WithLabelValues("nonceTooLarge").Inc()
return action.ErrNonceTooHigh
}
cost, err := act.Cost()
if err != nil {
actpoolMtc.WithLabelValues("failedToGetCost").Inc()
return errors.Wrapf(err, "failed to get cost of action %x", actHash)
}
if queue.PendingBalance().Cmp(cost) < 0 {
// Pending balance is insufficient
actpoolMtc.WithLabelValues("insufficientBalance").Inc()
log.L().Info("insufficient balance for action",
zap.String("act hash", hex.EncodeToString(actHash[:])),
zap.String("cost", cost.String()),
zap.String("pending balance", queue.PendingBalance().String()),
zap.String("sender", sender),
)
return action.ErrInsufficientFunds
}
if err := queue.Put(act); err != nil {
actpoolMtc.WithLabelValues("failedPutActQueue").Inc()
log.L().Info("failed put action into ActQueue",
zap.String("act hash", hex.EncodeToString(actHash[:])))
return err
}
ap.allActions[actHash] = act
//add actions to destination map
desAddress, ok := act.Destination()
if ok && !strings.EqualFold(sender, desAddress) {
desQueue := ap.accountDesActs[desAddress]
if desQueue == nil {
ap.accountDesActs[desAddress] = make(map[hash.Hash256]action.SealedEnvelope)
}
ap.accountDesActs[desAddress][actHash] = act
}
intrinsicGas, _ := act.IntrinsicGas()
ap.gasInPool += intrinsicGas
// If the pending nonce equals this nonce, update queue
nonce := queue.PendingNonce()
if actNonce == nonce {
ap.updateAccount(sender)
}
return nil
}
// removeConfirmedActs removes processed (committed to block) actions from pool
func (ap *actPool) removeConfirmedActs() {
for from, queue := range ap.accountActs {
addr, _ := address.FromString(from)
confirmedState, err := accountutil.AccountState(ap.sf, addr)
if err != nil {
log.L().Error("Error when removing confirmed actions", zap.Error(err))
return
}
pendingNonce := confirmedState.Nonce + 1
// Remove all actions that are committed to new block
acts := queue.FilterNonce(pendingNonce)
ap.removeInvalidActs(acts)
//del actions in destination map
ap.deleteAccountDestinationActions(acts...)
// Delete the queue entry if it becomes empty
if queue.Empty() {
delete(ap.accountActs, from)
}
}
}
func (ap *actPool) removeInvalidActs(acts []action.SealedEnvelope) {
for _, act := range acts {
hash, err := act.Hash()
if err != nil {
log.L().Debug("Skipping action due to hash error", zap.Error(err))
continue
}
log.L().Debug("Removed invalidated action.", log.Hex("hash", hash[:]))
delete(ap.allActions, hash)
intrinsicGas, _ := act.IntrinsicGas()
ap.subGasFromPool(intrinsicGas)
//del actions in destination map
ap.deleteAccountDestinationActions(act)
}
}
// deleteAccountDestinationActions just for destination map
func (ap *actPool) deleteAccountDestinationActions(acts ...action.SealedEnvelope) {
for _, act := range acts {
hash, err := act.Hash()
if err != nil {
log.L().Debug("Skipping action due to hash error", zap.Error(err))
continue
}
desAddress, ok := act.Destination()
if ok {
dst := ap.accountDesActs[desAddress]
if dst != nil {
delete(dst, hash)
}
}
}
}
// updateAccount updates queue's status and remove invalidated actions from pool if necessary
func (ap *actPool) updateAccount(sender string) {
queue := ap.accountActs[sender]
acts := queue.UpdateQueue(queue.PendingNonce())
if len(acts) > 0 {
ap.removeInvalidActs(acts)
}
// Delete the queue entry if it becomes empty
if queue.Empty() {
delete(ap.accountActs, sender)
}
}
func (ap *actPool) reset() {
timer := ap.timerFactory.NewTimer("reset")
defer timer.End()
// Remove confirmed actions in actpool
ap.removeConfirmedActs()
for from, queue := range ap.accountActs {
// Reset pending balance for each account
addr, _ := address.FromString(from)
state, err := accountutil.AccountState(ap.sf, addr)
if err != nil {
log.L().Error("Error when resetting actpool state.", zap.Error(err))
return
}
queue.SetPendingBalance(state.Balance)
// Reset pending nonce and remove invalid actions for each account
confirmedNonce := state.Nonce
pendingNonce := confirmedNonce + 1
queue.SetPendingNonce(pendingNonce)
ap.updateAccount(from)
}
}
func (ap *actPool) subGasFromPool(gas uint64) {
if ap.gasInPool < gas {
ap.gasInPool = 0
return
}
ap.gasInPool -= gas
}
| 1 | 24,113 | use "actionHash" to be consistent with other places | iotexproject-iotex-core | go |
@@ -4,11 +4,14 @@
package net.sourceforge.pmd.lang.rule.properties;
+import java.util.Collections;
import java.util.Enumeration;
import java.util.Map;
+import net.sourceforge.pmd.EnumeratedPropertyDescriptor;
import net.sourceforge.pmd.PropertyDescriptorFactory;
-import net.sourceforge.pmd.lang.rule.properties.factories.BasicPropertyDescriptorFactory;
+import net.sourceforge.pmd.PropertyDescriptorField;
+import net.sourceforge.pmd.util.CollectionUtil;
/**
* Defines a datatype with a set of preset values of any type as held within a | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule.properties;
import java.util.Enumeration;
import java.util.Map;
import net.sourceforge.pmd.PropertyDescriptorFactory;
import net.sourceforge.pmd.lang.rule.properties.factories.BasicPropertyDescriptorFactory;
/**
* Defines a datatype with a set of preset values of any type as held within a
* pair of maps. While the values are not serialized out, the labels are and
* serve as keys to obtain the values. The choices() method provides the ordered
* selections to be used in an editor widget.
*
* @author Brian Remedios
* @param <E>
*/
public class EnumeratedProperty<E> extends AbstractEnumeratedProperty<E, Object> {
public static final PropertyDescriptorFactory FACTORY = new BasicPropertyDescriptorFactory<EnumeratedProperty>(
Enumeration.class) {
@Override
public EnumeratedProperty createWith(Map<String, String> valuesById) {
return new EnumeratedProperty(nameIn(valuesById), descriptionIn(valuesById), labelsIn(valuesById),
choicesIn(valuesById), indexIn(valuesById), 0f);
}
};
/**
* Constructor for EnumeratedProperty.
*
* @param theName
* String
* @param theDescription
* String
* @param theLabels
* String[]
* @param theChoices
* E[]
* @param defaultIndex
* int
* @param theUIOrder
* float
* @throws IllegalArgumentException
*/
public EnumeratedProperty(String theName, String theDescription, String[] theLabels, E[] theChoices,
int defaultIndex, float theUIOrder) {
super(theName, theDescription, theLabels, theChoices, new int[] { defaultIndex }, theUIOrder, false);
}
/**
* @return Class
* @see net.sourceforge.pmd.PropertyDescriptor#type()
*/
@Override
public Class<Object> type() {
return Object.class;
}
/**
* @param value
* Object
* @return String
* @see net.sourceforge.pmd.PropertyDescriptor#errorFor(Object)
*/
@Override
public String errorFor(Object value) {
return labelsByChoice.containsKey(value) ? null : nonLegalValueMsgFor(value);
}
/**
* @param value
* String
* @return Object
* @throws IllegalArgumentException
* @see net.sourceforge.pmd.PropertyDescriptor#valueFrom(String)
*/
@Override
public Object valueFrom(String value) throws IllegalArgumentException {
return choiceFrom(value);
}
/**
*
* @param value
* Object
* @return String
* @see net.sourceforge.pmd.PropertyDescriptor#asDelimitedString(Object)
*/
@Override
public String asDelimitedString(Object value) {
return labelsByChoice.get(value);
}
}
| 1 | 12,320 | Can we derive `Class<E>` by looking at one of the label choices, if at least one is always provided? Given these are immutable, a real-world case for an empty enumerations eludes me. | pmd-pmd | java |
@@ -5364,6 +5364,17 @@ TEST_F(VkLayerTest, AndroidHardwareBufferCreateYCbCrSampler) {
m_errorMonitor->SetUnexpectedError("VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651");
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
m_errorMonitor->VerifyFound();
+
+ m_errorMonitor->ExpectSuccess();
+ efa.externalFormat = AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420;
+ sycci.format = VK_FORMAT_UNDEFINED;
+ sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709;
+ sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW;
+ // Spec says if we use VkExternalFormatANDROID value of components is ignored.
+ sycci.components = {VK_COMPONENT_SWIZZLE_ZERO, VK_COMPONENT_SWIZZLE_ZERO, VK_COMPONENT_SWIZZLE_ZERO, VK_COMPONENT_SWIZZLE_ZERO};
+ vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
+ m_errorMonitor->VerifyNotFound();
+ vk::DestroySamplerYcbcrConversion(dev, ycbcr_conv, nullptr);
}
TEST_F(VkLayerTest, AndroidHardwareBufferPhysDevImageFormatProp2) { | 1 | /*
* Copyright (c) 2015-2020 The Khronos Group Inc.
* Copyright (c) 2015-2020 Valve Corporation
* Copyright (c) 2015-2020 LunarG, Inc.
* Copyright (c) 2015-2020 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Courtney Goeltzenleuchter <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Mike Stroyan <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: Cody Northrop <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Jeremy Kniager <[email protected]>
* Author: Shannon McPherson <[email protected]>
* Author: John Zulauf <[email protected]>
*/
#include "cast_utils.h"
#include "layer_validation_tests.h"
class MessageIdFilter {
public:
MessageIdFilter(const char *filter_string) {
local_string = filter_string;
filter_string_value.arrayString.pCharArray = local_string.data();
filter_string_value.arrayString.count = local_string.size();
strncpy(filter_setting_val.name, "message_id_filter", sizeof(filter_setting_val.name));
filter_setting_val.type = VK_LAYER_SETTING_VALUE_TYPE_STRING_ARRAY_EXT;
filter_setting_val.data = filter_string_value;
filter_setting = {static_cast<VkStructureType>(VK_STRUCTURE_TYPE_INSTANCE_LAYER_SETTINGS_EXT), nullptr, 1,
&filter_setting_val};
}
VkLayerSettingsEXT *pnext{&filter_setting};
private:
VkLayerSettingValueDataEXT filter_string_value{};
VkLayerSettingValueEXT filter_setting_val;
VkLayerSettingsEXT filter_setting;
std::string local_string;
};
class CustomStypeList {
public:
CustomStypeList(const char *stype_id_string) {
local_string = stype_id_string;
custom_stype_value.arrayString.pCharArray = local_string.data();
custom_stype_value.arrayString.count = local_string.size();
strncpy(custom_stype_setting_val.name, "custom_stype_list", sizeof(custom_stype_setting_val.name));
custom_stype_setting_val.type = VK_LAYER_SETTING_VALUE_TYPE_STRING_ARRAY_EXT;
custom_stype_setting_val.data = custom_stype_value;
custom_stype_setting = {static_cast<VkStructureType>(VK_STRUCTURE_TYPE_INSTANCE_LAYER_SETTINGS_EXT), nullptr, 1,
&custom_stype_setting_val};
}
CustomStypeList(const std::vector<uint32_t> &stype_id_array) {
local_vector = stype_id_array;
custom_stype_value.arrayInt32.pInt32Array = local_vector.data();
custom_stype_value.arrayInt32.count = local_vector.size();
strncpy(custom_stype_setting_val.name, "custom_stype_list", sizeof(custom_stype_setting_val.name));
custom_stype_setting_val.type = VK_LAYER_SETTING_VALUE_TYPE_UINT32_ARRAY_EXT;
custom_stype_setting_val.data = custom_stype_value;
custom_stype_setting = {static_cast<VkStructureType>(VK_STRUCTURE_TYPE_INSTANCE_LAYER_SETTINGS_EXT), nullptr, 1,
&custom_stype_setting_val};
}
VkLayerSettingsEXT *pnext{&custom_stype_setting};
private:
VkLayerSettingValueDataEXT custom_stype_value{};
VkLayerSettingValueEXT custom_stype_setting_val;
VkLayerSettingsEXT custom_stype_setting;
std::string local_string;
std::vector<uint32_t> local_vector;
};
class DuplicateMsgLimit {
public:
DuplicateMsgLimit(const uint32_t limit) {
limit_value.value32 = limit;
strncpy(limit_setting_val.name, "duplicate_message_limit", sizeof(limit_setting_val.name));
limit_setting_val.type = VK_LAYER_SETTING_VALUE_TYPE_UINT32_EXT;
limit_setting_val.data = limit_value;
limit_setting = {static_cast<VkStructureType>(VK_STRUCTURE_TYPE_INSTANCE_LAYER_SETTINGS_EXT), nullptr, 1,
&limit_setting_val};
}
VkLayerSettingsEXT *pnext{&limit_setting};
private:
VkLayerSettingValueDataEXT limit_value{};
VkLayerSettingValueEXT limit_setting_val;
VkLayerSettingsEXT limit_setting;
};
TEST_F(VkLayerTest, VersionCheckPromotedAPIs) {
TEST_DESCRIPTION("Validate that promoted APIs are not valid in old versions.");
SetTargetApiVersion(VK_API_VERSION_1_0);
ASSERT_NO_FATAL_FAILURE(Init());
PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2");
assert(vkGetPhysicalDeviceProperties2);
VkPhysicalDeviceProperties2 phys_dev_props_2{};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-API-Version-Violation");
vkGetPhysicalDeviceProperties2(gpu(), &phys_dev_props_2);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnsupportedPnextApiVersion) {
TEST_DESCRIPTION("Validate that newer pnext structs are not valid for old Vulkan versions.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(Init());
if (IsPlatform(kNexusPlayer)) {
printf("%s This test should not run on Nexus Player\n", kSkipPrefix);
return;
}
auto phys_dev_props_2 = lvl_init_struct<VkPhysicalDeviceProperties2>();
auto bad_version_1_1_struct = lvl_init_struct<VkPhysicalDeviceVulkan12Properties>();
phys_dev_props_2.pNext = &bad_version_1_1_struct;
// VkPhysDevVulkan12Props was introduced in 1.2, so try adding it to a 1.1 pNext chain
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPhysicalDeviceProperties2-pNext-pNext");
vk::GetPhysicalDeviceProperties2(gpu(), &phys_dev_props_2);
m_errorMonitor->VerifyFound();
// 1.1 context, VK_KHR_depth_stencil_resolve is NOT enabled, but using its struct is valid
if (DeviceExtensionSupported(VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME)) {
auto unenabled_device_ext_struct = lvl_init_struct<VkPhysicalDeviceDepthStencilResolveProperties>();
phys_dev_props_2.pNext = &unenabled_device_ext_struct;
if (DeviceValidationVersion() >= VK_API_VERSION_1_1) {
m_errorMonitor->ExpectSuccess();
vk::GetPhysicalDeviceProperties2(gpu(), &phys_dev_props_2);
m_errorMonitor->VerifyNotFound();
} else {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-API-Version-Violation");
vk::GetPhysicalDeviceProperties2(gpu(), &phys_dev_props_2);
m_errorMonitor->VerifyFound();
}
}
}
TEST_F(VkLayerTest, PrivateDataExtTest) {
TEST_DESCRIPTION("Test private data extension use.");
ASSERT_NO_FATAL_FAILURE(InitFramework());
if (IsPlatform(kMockICD) || DeviceSimulation()) {
printf("%s Test not supported by MockICD, skipping.\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_PRIVATE_DATA_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_PRIVATE_DATA_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_PRIVATE_DATA_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkDestroyPrivateDataSlotEXT pfn_vkDestroyPrivateDataSlotEXT =
(PFN_vkDestroyPrivateDataSlotEXT)vk::GetDeviceProcAddr(m_device->handle(), "vkDestroyPrivateDataSlotEXT");
PFN_vkCreatePrivateDataSlotEXT pfn_vkCreatePrivateDataSlotEXT =
(PFN_vkCreatePrivateDataSlotEXT)vk::GetDeviceProcAddr(m_device->handle(), "vkCreatePrivateDataSlotEXT");
PFN_vkGetPrivateDataEXT pfn_vkGetPrivateDataEXT =
(PFN_vkGetPrivateDataEXT)vk::GetDeviceProcAddr(m_device->handle(), "vkGetPrivateDataEXT");
PFN_vkSetPrivateDataEXT pfn_vkSetPrivateDataEXT =
(PFN_vkSetPrivateDataEXT)vk::GetDeviceProcAddr(m_device->handle(), "vkSetPrivateDataEXT");
VkPrivateDataSlotEXT data_slot;
VkPrivateDataSlotCreateInfoEXT data_create_info;
data_create_info.sType = VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT;
data_create_info.pNext = NULL;
data_create_info.flags = 0;
VkResult err = pfn_vkCreatePrivateDataSlotEXT(m_device->handle(), &data_create_info, NULL, &data_slot);
if (err != VK_SUCCESS) {
printf("%s Failed to create private data slot, VkResult %d.\n", kSkipPrefix, err);
}
VkSampler sampler;
VkSamplerCreateInfo sampler_info;
sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
sampler_info.pNext = NULL;
sampler_info.flags = 0;
sampler_info.magFilter = VK_FILTER_LINEAR;
sampler_info.minFilter = VK_FILTER_LINEAR;
sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
sampler_info.mipLodBias = 0.0f;
sampler_info.anisotropyEnable = VK_FALSE;
sampler_info.maxAnisotropy = 16;
sampler_info.compareEnable = VK_FALSE;
sampler_info.compareOp = VK_COMPARE_OP_ALWAYS;
sampler_info.minLod = 0.0f;
sampler_info.maxLod = 0.0f;
sampler_info.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
sampler_info.unnormalizedCoordinates = VK_FALSE;
vk::CreateSampler(m_device->handle(), &sampler_info, NULL, &sampler);
static const uint64_t data_value = 0x70AD;
err = pfn_vkSetPrivateDataEXT(m_device->handle(), VK_OBJECT_TYPE_SAMPLER, (uint64_t)sampler, data_slot, data_value);
if (err != VK_SUCCESS) {
printf("%s Failed to set private data. VkResult = %d", kSkipPrefix, err);
}
m_errorMonitor->ExpectSuccess();
uint64_t data;
pfn_vkGetPrivateDataEXT(m_device->handle(), VK_OBJECT_TYPE_SAMPLER, (uint64_t)sampler, data_slot, &data);
if (data != data_value) {
m_errorMonitor->SetError("Got unexpected private data, %s.\n");
}
pfn_vkDestroyPrivateDataSlotEXT(m_device->handle(), data_slot, NULL);
vk::DestroySampler(m_device->handle(), sampler, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, CustomStypeStructString) {
TEST_DESCRIPTION("Positive Test for ability to specify custom pNext structs using a list (string)");
// Create a custom structure
typedef struct CustomStruct {
VkStructureType sType;
const void *pNext;
uint32_t custom_data;
} CustomStruct;
uint32_t custom_stype = 3000300000;
CustomStruct custom_struct;
custom_struct.pNext = nullptr;
custom_struct.sType = static_cast<VkStructureType>(custom_stype);
custom_struct.custom_data = 44;
// Communicate list of structinfo pairs to layers
auto stype_list = CustomStypeList("3000300000,24");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor, stype_list.pnext));
ASSERT_NO_FATAL_FAILURE(InitState());
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBufferObj buffer;
buffer.init(*m_device, buffer_create_info);
VkBufferView buffer_view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.pNext = &custom_struct; // Add custom struct through pNext
bvci.buffer = buffer.handle();
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::CreateBufferView(m_device->device(), &bvci, NULL, &buffer_view);
m_errorMonitor->VerifyNotFound();
vk::DestroyBufferView(m_device->device(), buffer_view, nullptr);
}
TEST_F(VkLayerTest, CustomStypeStructArray) {
TEST_DESCRIPTION("Positive Test for ability to specify custom pNext structs using a vector of integers");
// Create a custom structure
typedef struct CustomStruct {
VkStructureType sType;
const void *pNext;
uint32_t custom_data;
} CustomStruct;
const uint32_t custom_stype_a = 3000300000;
CustomStruct custom_struct_a;
custom_struct_a.pNext = nullptr;
custom_struct_a.sType = static_cast<VkStructureType>(custom_stype_a);
custom_struct_a.custom_data = 44;
const uint32_t custom_stype_b = 3000300001;
CustomStruct custom_struct_b;
custom_struct_b.pNext = &custom_struct_a;
custom_struct_b.sType = static_cast<VkStructureType>(custom_stype_b);
custom_struct_b.custom_data = 88;
// Communicate list of structinfo pairs to layers, including a duplicate which should get filtered out
std::vector<uint32_t> custom_struct_info = {custom_stype_a, sizeof(CustomStruct), custom_stype_b,
sizeof(CustomStruct), custom_stype_a, sizeof(CustomStruct)};
auto stype_list = CustomStypeList(custom_struct_info);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor, stype_list.pnext));
ASSERT_NO_FATAL_FAILURE(InitState());
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBufferObj buffer;
buffer.init(*m_device, buffer_create_info);
VkBufferView buffer_view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.pNext = &custom_struct_b; // Add custom struct through pNext
bvci.buffer = buffer.handle();
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::CreateBufferView(m_device->device(), &bvci, NULL, &buffer_view);
m_errorMonitor->VerifyNotFound();
vk::DestroyBufferView(m_device->device(), buffer_view, nullptr);
}
TEST_F(VkLayerTest, DuplicateMessageLimit) {
TEST_DESCRIPTION("Use the duplicate_message_id setting and verify correct operation");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
auto msg_limit = DuplicateMsgLimit(3);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor, msg_limit.pnext));
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
// Create an invalid pNext structure to trigger the stateless validation warning
VkBaseOutStructure bogus_struct{};
bogus_struct.sType = static_cast<VkStructureType>(0x33333333);
auto properties2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&bogus_struct);
// Should get the first three errors just fine
m_errorMonitor->SetDesiredFailureMsg((kErrorBit | kWarningBit), "VUID-VkPhysicalDeviceProperties2-pNext-pNext");
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg((kErrorBit | kWarningBit), "VUID-VkPhysicalDeviceProperties2-pNext-pNext");
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg((kErrorBit | kWarningBit), "VUID-VkPhysicalDeviceProperties2-pNext-pNext");
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
m_errorMonitor->VerifyFound();
// Limit should prevent the message from coming through a fourth time
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, MessageIdFilterString) {
TEST_DESCRIPTION("Validate that message id string filtering is working");
// This test would normally produce an unexpected error or two. Use the message filter instead of
// the error_monitor's SetUnexpectedError to test the filtering.
auto filter_setting = MessageIdFilter("VUID-VkRenderPassCreateInfo-pNext-01963");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor, filter_setting.pnext));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkAttachmentDescription attach = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr};
VkInputAttachmentAspectReference iaar = {0, 0, VK_IMAGE_ASPECT_METADATA_BIT};
VkRenderPassInputAttachmentAspectCreateInfo rpiaaci = {VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO,
nullptr, 1, &iaar};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpiaaci, 0, 1, &attach, 1, &subpass, 0, nullptr};
m_errorMonitor->SetUnexpectedError("VUID-VkRenderPassCreateInfo2-attachment-02525");
TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkInputAttachmentAspectReference-aspectMask-01964",
nullptr);
}
TEST_F(VkLayerTest, MessageIdFilterHexInt) {
TEST_DESCRIPTION("Validate that message id hex int filtering is working");
// This test would normally produce an unexpected error or two. Use the message filter instead of
// the error_monitor's SetUnexpectedError to test the filtering.
auto filter_setting = MessageIdFilter("0xa19880e3");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor, filter_setting.pnext));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkAttachmentDescription attach = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr};
VkInputAttachmentAspectReference iaar = {0, 0, VK_IMAGE_ASPECT_METADATA_BIT};
VkRenderPassInputAttachmentAspectCreateInfo rpiaaci = {VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO,
nullptr, 1, &iaar};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpiaaci, 0, 1, &attach, 1, &subpass, 0, nullptr};
m_errorMonitor->SetUnexpectedError("VUID-VkRenderPassCreateInfo2-attachment-02525");
TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkInputAttachmentAspectReference-aspectMask-01964",
nullptr);
}
TEST_F(VkLayerTest, MessageIdFilterInt) {
TEST_DESCRIPTION("Validate that message id decimal int filtering is working");
// This test would normally produce an unexpected error or two. Use the message filter instead of
// the error_monitor's SetUnexpectedError to test the filtering.
auto filter_setting = MessageIdFilter("2711126243");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor, filter_setting.pnext));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkAttachmentDescription attach = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 0, nullptr, nullptr, nullptr, 0, nullptr};
VkInputAttachmentAspectReference iaar = {0, 0, VK_IMAGE_ASPECT_METADATA_BIT};
VkRenderPassInputAttachmentAspectCreateInfo rpiaaci = {VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO,
nullptr, 1, &iaar};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, &rpiaaci, 0, 1, &attach, 1, &subpass, 0, nullptr};
m_errorMonitor->SetUnexpectedError("VUID-VkRenderPassCreateInfo2-attachment-02525");
TestRenderPassCreate(m_errorMonitor, m_device->device(), &rpci, false, "VUID-VkInputAttachmentAspectReference-aspectMask-01964",
nullptr);
}
struct LayerStatusCheckData {
std::function<void(const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, LayerStatusCheckData *)> callback;
ErrorMonitor *error_monitor;
};
TEST_F(VkLayerTest, LayerInfoMessages) {
TEST_DESCRIPTION("Ensure layer prints startup status messages.");
auto ici = GetInstanceCreateInfo();
LayerStatusCheckData callback_data;
auto local_callback = [](const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, LayerStatusCheckData *data) {
std::string message(pCallbackData->pMessage);
if ((data->error_monitor->GetMessageFlags() & kInformationBit) &&
(message.find("UNASSIGNED-khronos-validation-createinstance-status-message") == std::string::npos)) {
data->error_monitor->SetError("UNASSIGNED-Khronos-validation-createinstance-status-message-not-found");
} else if ((data->error_monitor->GetMessageFlags() & kPerformanceWarningBit) &&
(message.find("UNASSIGNED-khronos-Validation-debug-build-warning-message") == std::string::npos)) {
data->error_monitor->SetError("UNASSIGNED-khronos-validation-createinstance-debug-warning-message-not-found");
}
};
callback_data.error_monitor = m_errorMonitor;
callback_data.callback = local_callback;
VkInstance local_instance;
auto callback_create_info = lvl_init_struct<VkDebugUtilsMessengerCreateInfoEXT>();
callback_create_info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT;
callback_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
callback_create_info.pfnUserCallback = DebugUtilsCallback;
callback_create_info.pUserData = &callback_data;
ici.pNext = &callback_create_info;
// Create an instance, error if layer status INFO message not found
m_errorMonitor->ExpectSuccess();
ASSERT_VK_SUCCESS(vk::CreateInstance(&ici, nullptr, &local_instance));
m_errorMonitor->VerifyNotFound();
vk::DestroyInstance(local_instance, nullptr);
#ifndef NDEBUG
// Create an instance, error if layer DEBUG_BUILD warning message not found
callback_create_info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
callback_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
m_errorMonitor->ExpectSuccess();
ASSERT_VK_SUCCESS(vk::CreateInstance(&ici, nullptr, &local_instance));
m_errorMonitor->VerifyNotFound();
vk::DestroyInstance(local_instance, nullptr);
#endif
}
TEST_F(VkLayerTest, RequiredParameter) {
TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pFeatures specified as NULL");
// Specify NULL for a pointer to a handle
// Expected to trigger an error with
// parameter_validation::validate_required_pointer
vk::GetPhysicalDeviceFeatures(gpu(), NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pQueueFamilyPropertyCount specified as NULL");
// Specify NULL for pointer to array count
// Expected to trigger an error with parameter_validation::validate_array
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewport-viewportCount-arraylength");
// Specify 0 for a required array count
// Expected to trigger an error with parameter_validation::validate_array
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_commandBuffer->SetViewport(0, 0, &viewport);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCreateImage-pCreateInfo-parameter");
// Specify a null pImageCreateInfo struct pointer
VkImage test_image;
vk::CreateImage(device(), NULL, NULL, &test_image);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewport-pViewports-parameter");
// Specify NULL for a required array
// Expected to trigger an error with parameter_validation::validate_array
m_commandBuffer->SetViewport(0, 1, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter memory specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle
// Expected to trigger an error with
// parameter_validation::validate_required_handle
vk::UnmapMemory(device(), VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pFences[0] specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle array entry
// Expected to trigger an error with
// parameter_validation::validate_required_handle_array
VkFence fence = VK_NULL_HANDLE;
vk::ResetFences(device(), 1, &fence);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pAllocateInfo specified as NULL");
// Specify NULL for a required struct pointer
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkDeviceMemory memory = VK_NULL_HANDLE;
vk::AllocateMemory(device(), NULL, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "value of faceMask must not be 0");
// Specify 0 for a required VkFlags parameter
// Expected to trigger an error with parameter_validation::validate_flags
m_commandBuffer->SetStencilReference(0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0");
// Specify 0 for a required VkFlags array entry
// Expected to trigger an error with
// parameter_validation::validate_flags_array
VkSemaphore semaphore = VK_NULL_HANDLE;
VkPipelineStageFlags stageFlags = 0;
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &semaphore;
submitInfo.pWaitDstStageMask = &stageFlags;
vk::QueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-sType-sType");
stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
// Set a bogus sType and see what happens
submitInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &semaphore;
submitInfo.pWaitDstStageMask = &stageFlags;
vk::QueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-parameter");
stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
// Set a null pointer for pWaitSemaphores
submitInfo.pWaitSemaphores = NULL;
submitInfo.pWaitDstStageMask = &stageFlags;
vk::QueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCreateRenderPass-pCreateInfo-parameter");
VkRenderPass render_pass;
vk::CreateRenderPass(device(), nullptr, nullptr, &render_pass);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SpecLinks) {
TEST_DESCRIPTION("Test that spec links in a typical error message are well-formed");
ASSERT_NO_FATAL_FAILURE(InitFramework());
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitState());
#ifdef ANNOTATED_SPEC_LINK
bool test_annotated_spec_link = true;
#else // ANNOTATED_SPEC_LINK
bool test_annotated_spec_link = false;
#endif // ANNOTATED_SPEC_LINK
std::string spec_version;
if (test_annotated_spec_link) {
std::string major_version = std::to_string(VK_VERSION_MAJOR(VK_HEADER_VERSION_COMPLETE));
std::string minor_version = std::to_string(VK_VERSION_MINOR(VK_HEADER_VERSION_COMPLETE));
std::string patch_version = std::to_string(VK_VERSION_PATCH(VK_HEADER_VERSION_COMPLETE));
spec_version = "doc/view/" + major_version + "." + minor_version + "." + patch_version + ".0/windows";
} else {
spec_version = "registry/vulkan/specs";
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, spec_version);
vk::GetPhysicalDeviceFeatures(gpu(), NULL);
m_errorMonitor->VerifyFound();
// Now generate a 'default' message and check the link
bool ycbcr_support = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) ||
(DeviceValidationVersion() >= VK_API_VERSION_1_1));
bool maintenance2_support =
(DeviceExtensionEnabled(VK_KHR_MAINTENANCE2_EXTENSION_NAME) || (DeviceValidationVersion() >= VK_API_VERSION_1_1));
if (!((m_device->format_properties(VK_FORMAT_R8_UINT).optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) &&
(ycbcr_support ^ maintenance2_support))) {
printf("%s Device does not support format and extensions required, skipping test case", kSkipPrefix);
return;
}
VkImageCreateInfo imageInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
0,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R8_UINT,
{128, 128, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
imageInfo.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
VkImageObj mutImage(m_device);
mutImage.init(&imageInfo);
ASSERT_TRUE(mutImage.initialized());
VkImageViewCreateInfo imgViewInfo = {};
imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imgViewInfo.format = VK_FORMAT_B8G8R8A8_UNORM; // different than createImage
imgViewInfo.subresourceRange.layerCount = 1;
imgViewInfo.subresourceRange.baseMipLevel = 0;
imgViewInfo.subresourceRange.levelCount = 1;
imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imgViewInfo.image = mutImage.handle();
// VUIDs 01759 and 01760 should generate 'default' spec URLs, to search the registry
CreateImageViewTest(*this, &imgViewInfo, "Vulkan-Docs/search");
}
TEST_F(VkLayerTest, UsePnextOnlyStructWithoutExtensionEnabled) {
TEST_DESCRIPTION(
"Validate that using VkPipelineTessellationDomainOriginStateCreateInfo in VkPipelineTessellationStateCreateInfo.pNext "
"in a 1.0 context will generate an error message.");
SetTargetApiVersion(VK_API_VERSION_1_0);
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().tessellationShader) {
printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix);
return;
}
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj tcs(m_device, bindStateTscShaderText, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this);
VkShaderObj tes(m_device, bindStateTeshaderText, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
VkPipelineTessellationDomainOriginStateCreateInfo tessellationDomainOriginStateInfo = {
VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, VK_NULL_HANDLE,
VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT};
VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
&tessellationDomainOriginStateInfo, 0, 3};
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.gp_ci_.pTessellationState = &tsci;
pipe.gp_ci_.pInputAssemblyState = &iasci;
pipe.shader_stages_ = {vs.GetStageCreateInfo(), tcs.GetStageCreateInfo(), tes.GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.InitState();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pNext-pNext");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineTessellationStateCreateInfo-pNext-pNext");
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, PnextOnlyStructValidation) {
TEST_DESCRIPTION("See if checks occur on structs ONLY used in pnext chains.");
if (!(CheckDescriptorIndexingSupportAndInitFramework(this, m_instance_extension_names, m_device_extension_names, NULL,
m_errorMonitor))) {
printf("Descriptor indexing or one of its dependencies not supported, skipping tests\n");
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device passing in a bad PdevFeatures2 value
auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
// Set one of the features values to an invalid boolean value
indexing_features.descriptorBindingUniformBufferUpdateAfterBind = 800;
uint32_t queue_node_count;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, NULL);
VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count];
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, queue_props);
float priorities[] = {1.0f};
VkDeviceQueueCreateInfo queue_info{};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.flags = 0;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = &priorities[0];
VkDeviceCreateInfo dev_info = {};
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.pNext = NULL;
dev_info.queueCreateInfoCount = 1;
dev_info.pQueueCreateInfos = &queue_info;
dev_info.enabledLayerCount = 0;
dev_info.ppEnabledLayerNames = NULL;
dev_info.enabledExtensionCount = m_device_extension_names.size();
dev_info.ppEnabledExtensionNames = m_device_extension_names.data();
dev_info.pNext = &features2;
VkDevice dev;
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "is neither VK_TRUE nor VK_FALSE");
m_errorMonitor->SetUnexpectedError("Failed to create");
vk::CreateDevice(gpu(), &dev_info, NULL, &dev);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ReservedParameter) {
TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, " must be 0");
// Specify 0 for a reserved VkFlags parameter
// Expected to trigger an error with
// parameter_validation::validate_reserved_flags
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_info.flags = 1;
vk::CreateEvent(device(), &event_info, NULL, &event_handle);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DebugMarkerNameTest) {
TEST_DESCRIPTION("Ensure debug marker object names are printed in debug report output");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), kValidationLayerName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME);
} else {
printf("%s Debug Marker Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT =
(PFN_vkDebugMarkerSetObjectNameEXT)vk::GetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT");
if (!(fpvkDebugMarkerSetObjectNameEXT)) {
printf("%s Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n", kSkipPrefix);
return;
}
if (DeviceSimulation()) {
printf("%sSkipping object naming test.\n", kSkipPrefix);
return;
}
VkBuffer buffer;
VkDeviceMemory memory_1, memory_2;
std::string memory_name = "memory_name";
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.size = 1;
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer);
VkMemoryRequirements memRequirements;
vk::GetBufferMemoryRequirements(device(), buffer, &memRequirements);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = memRequirements.size;
memory_allocate_info.memoryTypeIndex = 0;
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_1);
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_2);
VkDebugMarkerObjectNameInfoEXT name_info = {};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
name_info.pNext = nullptr;
name_info.object = (uint64_t)memory_2;
name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT;
name_info.pObjectName = memory_name.c_str();
fpvkDebugMarkerSetObjectNameEXT(device(), &name_info);
vk::BindBufferMemory(device(), buffer, memory_1, 0);
// Test core_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, memory_name);
vk::BindBufferMemory(device(), buffer, memory_2, 0);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), memory_1, nullptr);
memory_1 = VK_NULL_HANDLE;
vk::FreeMemory(device(), memory_2, nullptr);
memory_2 = VK_NULL_HANDLE;
vk::DestroyBuffer(device(), buffer, nullptr);
buffer = VK_NULL_HANDLE;
VkCommandBuffer commandBuffer;
std::string commandBuffer_name = "command_buffer_name";
VkCommandPool commandpool_1;
VkCommandPool commandpool_2;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_1);
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_2);
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = commandpool_1;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(device(), &command_buffer_allocate_info, &commandBuffer);
name_info.object = (uint64_t)commandBuffer;
name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT;
name_info.pObjectName = commandBuffer_name.c_str();
fpvkDebugMarkerSetObjectNameEXT(device(), &name_info);
VkCommandBufferBeginInfo cb_begin_Info = {};
cb_begin_Info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cb_begin_Info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vk::BeginCommandBuffer(commandBuffer, &cb_begin_Info);
const VkRect2D scissor = {{-1, 0}, {16, 16}};
const VkRect2D scissors[] = {scissor, scissor};
// Test parameter_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::CmdSetScissor(commandBuffer, 0, 1, scissors);
m_errorMonitor->VerifyFound();
// Test object_tracker layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::FreeCommandBuffers(device(), commandpool_2, 1, &commandBuffer);
m_errorMonitor->VerifyFound();
vk::DestroyCommandPool(device(), commandpool_1, NULL);
vk::DestroyCommandPool(device(), commandpool_2, NULL);
}
TEST_F(VkLayerTest, DebugUtilsNameTest) {
TEST_DESCRIPTION("Ensure debug utils object names are printed in debug messenger output");
// Skip test if extension not supported
if (InstanceExtensionSupported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
} else {
printf("%s Debug Utils Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkSetDebugUtilsObjectNameEXT fpvkSetDebugUtilsObjectNameEXT =
(PFN_vkSetDebugUtilsObjectNameEXT)vk::GetInstanceProcAddr(instance(), "vkSetDebugUtilsObjectNameEXT");
ASSERT_TRUE(fpvkSetDebugUtilsObjectNameEXT); // Must be extant if extension is enabled
PFN_vkCreateDebugUtilsMessengerEXT fpvkCreateDebugUtilsMessengerEXT =
(PFN_vkCreateDebugUtilsMessengerEXT)vk::GetInstanceProcAddr(instance(), "vkCreateDebugUtilsMessengerEXT");
ASSERT_TRUE(fpvkCreateDebugUtilsMessengerEXT); // Must be extant if extension is enabled
PFN_vkDestroyDebugUtilsMessengerEXT fpvkDestroyDebugUtilsMessengerEXT =
(PFN_vkDestroyDebugUtilsMessengerEXT)vk::GetInstanceProcAddr(instance(), "vkDestroyDebugUtilsMessengerEXT");
ASSERT_TRUE(fpvkDestroyDebugUtilsMessengerEXT); // Must be extant if extension is enabled
PFN_vkCmdInsertDebugUtilsLabelEXT fpvkCmdInsertDebugUtilsLabelEXT =
(PFN_vkCmdInsertDebugUtilsLabelEXT)vk::GetInstanceProcAddr(instance(), "vkCmdInsertDebugUtilsLabelEXT");
ASSERT_TRUE(fpvkCmdInsertDebugUtilsLabelEXT); // Must be extant if extension is enabled
if (DeviceSimulation()) {
printf("%sSkipping object naming test.\n", kSkipPrefix);
return;
}
DebugUtilsLabelCheckData callback_data;
auto empty_callback = [](const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, DebugUtilsLabelCheckData *data) {
data->count++;
};
callback_data.count = 0;
callback_data.callback = empty_callback;
auto callback_create_info = lvl_init_struct<VkDebugUtilsMessengerCreateInfoEXT>();
callback_create_info.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
callback_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
callback_create_info.pfnUserCallback = DebugUtilsCallback;
callback_create_info.pUserData = &callback_data;
VkDebugUtilsMessengerEXT my_messenger = VK_NULL_HANDLE;
fpvkCreateDebugUtilsMessengerEXT(instance(), &callback_create_info, nullptr, &my_messenger);
VkBuffer buffer;
VkDeviceMemory memory_1, memory_2;
std::string memory_name = "memory_name";
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.size = 1;
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer);
VkMemoryRequirements memRequirements;
vk::GetBufferMemoryRequirements(device(), buffer, &memRequirements);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = memRequirements.size;
memory_allocate_info.memoryTypeIndex = 0;
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_1);
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_2);
VkDebugUtilsObjectNameInfoEXT name_info = {};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
name_info.pNext = nullptr;
name_info.objectType = VK_OBJECT_TYPE_DEVICE_MEMORY;
name_info.pObjectName = memory_name.c_str();
// Pass in bad handle make sure ObjectTracker catches it
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDebugUtilsObjectNameInfoEXT-objectType-02590");
name_info.objectHandle = (uint64_t)0xcadecade;
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
m_errorMonitor->VerifyFound();
// Pass in 'unknown' object type and see if parameter validation catches it
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDebugUtilsObjectNameInfoEXT-objectType-02589");
name_info.objectHandle = (uint64_t)memory_2;
name_info.objectType = VK_OBJECT_TYPE_UNKNOWN;
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
m_errorMonitor->VerifyFound();
name_info.objectType = VK_OBJECT_TYPE_DEVICE_MEMORY;
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
vk::BindBufferMemory(device(), buffer, memory_1, 0);
// Test core_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, memory_name);
vk::BindBufferMemory(device(), buffer, memory_2, 0);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), memory_1, nullptr);
memory_1 = VK_NULL_HANDLE;
vk::FreeMemory(device(), memory_2, nullptr);
memory_2 = VK_NULL_HANDLE;
vk::DestroyBuffer(device(), buffer, nullptr);
buffer = VK_NULL_HANDLE;
VkCommandBuffer commandBuffer;
std::string commandBuffer_name = "command_buffer_name";
VkCommandPool commandpool_1;
VkCommandPool commandpool_2;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_1);
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_2);
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = commandpool_1;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(device(), &command_buffer_allocate_info, &commandBuffer);
name_info.objectHandle = (uint64_t)commandBuffer;
name_info.objectType = VK_OBJECT_TYPE_COMMAND_BUFFER;
name_info.pObjectName = commandBuffer_name.c_str();
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
VkCommandBufferBeginInfo cb_begin_Info = {};
cb_begin_Info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cb_begin_Info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vk::BeginCommandBuffer(commandBuffer, &cb_begin_Info);
const VkRect2D scissor = {{-1, 0}, {16, 16}};
const VkRect2D scissors[] = {scissor, scissor};
auto command_label = lvl_init_struct<VkDebugUtilsLabelEXT>();
command_label.pLabelName = "Command Label 0123";
command_label.color[0] = 0.;
command_label.color[1] = 1.;
command_label.color[2] = 2.;
command_label.color[3] = 3.0;
bool command_label_test = false;
auto command_label_callback = [command_label, &command_label_test](const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
DebugUtilsLabelCheckData *data) {
data->count++;
command_label_test = false;
if (pCallbackData->cmdBufLabelCount == 1) {
command_label_test = pCallbackData->pCmdBufLabels[0] == command_label;
}
};
callback_data.callback = command_label_callback;
fpvkCmdInsertDebugUtilsLabelEXT(commandBuffer, &command_label);
// Test parameter_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::CmdSetScissor(commandBuffer, 0, 1, scissors);
m_errorMonitor->VerifyFound();
// Check the label test
if (!command_label_test) {
ADD_FAILURE() << "Command label '" << command_label.pLabelName << "' not passed to callback.";
}
// Test object_tracker layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::FreeCommandBuffers(device(), commandpool_2, 1, &commandBuffer);
m_errorMonitor->VerifyFound();
vk::DestroyCommandPool(device(), commandpool_1, NULL);
vk::DestroyCommandPool(device(), commandpool_2, NULL);
fpvkDestroyDebugUtilsMessengerEXT(instance(), my_messenger, nullptr);
}
TEST_F(VkLayerTest, InvalidStructSType) {
TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "parameter pAllocateInfo->sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkMemoryAllocateInfo alloc_info = {};
VkDeviceMemory memory = VK_NULL_HANDLE;
vk::AllocateMemory(device(), &alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "parameter pSubmits[0].sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type_array
VkSubmitInfo submit_info = {};
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidStructPNext) {
TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(Init());
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
m_errorMonitor->SetDesiredFailureMsg((kErrorBit | kWarningBit), "value of pCreateInfo->pNext must be NULL");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL.
// Need to pick a function that has no allowed pNext structure types.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkEvent event = VK_NULL_HANDLE;
VkEventCreateInfo event_alloc_info = {};
// Zero-initialization will provide the correct sType
VkApplicationInfo app_info = {};
event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_alloc_info.pNext = &app_info;
vk::CreateEvent(device(), &event_alloc_info, NULL, &event);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg((kErrorBit | kWarningBit), " chain includes a structure with unexpected VkStructureType ");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use
// a function that has allowed pNext structure types and specify
// a structure type that is not allowed.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkDeviceMemory memory = VK_NULL_HANDLE;
VkMemoryAllocateInfo memory_alloc_info = {};
memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_alloc_info.pNext = &app_info;
vk::AllocateMemory(device(), &memory_alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg((kErrorBit | kWarningBit), " chain includes a structure with unexpected VkStructureType ");
// Same concept as above, but unlike vkAllocateMemory where VkMemoryAllocateInfo is a const
// in vkGetPhysicalDeviceProperties2, VkPhysicalDeviceProperties2 is not a const
VkPhysicalDeviceProperties2 physical_device_properties2 = {};
physical_device_properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
physical_device_properties2.pNext = &app_info;
vkGetPhysicalDeviceProperties2KHR(gpu(), &physical_device_properties2);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"does not fall within the begin..end range of the core VkFormat enumeration tokens");
// Specify an invalid VkFormat value
// Expected to trigger an error with
// parameter_validation::validate_ranged_enum
VkFormatProperties format_properties;
vk::GetPhysicalDeviceFormatProperties(gpu(), static_cast<VkFormat>(8000), &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadMask) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags bitmask value
// Expected to trigger an error with parameter_validation::validate_flags
VkImageFormatProperties image_format_properties;
vk::GetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
static_cast<VkImageUsageFlags>(1 << 25), 0, &image_format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadFlag) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags array entry
// Expected to trigger an error with parameter_validation::validate_flags_array
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
// `stage_flags` is set to a value which, currently, is not a defined stage flag
// `VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM` works well for this
VkPipelineStageFlags stage_flags = VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM;
// `waitSemaphoreCount` *must* be greater than 0 to perform this check
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = &stage_flags;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadBool) {
// Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive.
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
} else {
printf("%s VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
// Not VK_TRUE or VK_FALSE
sampler_info.anisotropyEnable = 3;
CreateSamplerTest(*this, &sampler_info, "is neither VK_TRUE nor VK_FALSE");
}
TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) {
ASSERT_NO_FATAL_FAILURE(Init());
// Specify MAX_ENUM
VkFormatProperties format_properties;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "does not fall within the begin..end range");
vk::GetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SubmitSignaledFence) {
vk_testing::Fence testFence;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "submitted in SIGNALED state. Fences must be reset before being submitted");
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color);
m_commandBuffer->end();
testFence.init(*m_device, fenceInfo);
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle());
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, LeakAnObject) {
TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence.");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Workaround for overzealous layers checking even the guaranteed 0th queue family
const auto q_props = vk_testing::PhysicalDevice(gpu()).queue_properties();
ASSERT_TRUE(q_props.size() > 0);
ASSERT_TRUE(q_props[0].queueCount > 0);
const float q_priority[] = {1.0f};
VkDeviceQueueCreateInfo queue_ci = {};
queue_ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_ci.queueFamilyIndex = 0;
queue_ci.queueCount = 1;
queue_ci.pQueuePriorities = q_priority;
VkDeviceCreateInfo device_ci = {};
device_ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_ci.queueCreateInfoCount = 1;
device_ci.pQueueCreateInfos = &queue_ci;
VkDevice leaky_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &device_ci, nullptr, &leaky_device));
const VkFenceCreateInfo fence_ci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO};
VkFence leaked_fence;
ASSERT_VK_SUCCESS(vk::CreateFence(leaky_device, &fence_ci, nullptr, &leaked_fence));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyDevice-device-00378");
vk::DestroyDevice(leaky_device, nullptr);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UseObjectWithWrongDevice) {
TEST_DESCRIPTION(
"Try to destroy a render pass object using a device other than the one it was created on. This should generate a distinct "
"error from the invalid handle error.");
// Create first device and renderpass
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create second device
float priorities[] = {1.0f};
VkDeviceQueueCreateInfo queue_info{};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.flags = 0;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = &priorities[0];
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = 1;
device_create_info.pQueueCreateInfos = &queue_info;
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
VkDevice second_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &device_create_info, NULL, &second_device));
// Try to destroy the renderpass from the first device using the second device
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyRenderPass-renderPass-parent");
vk::DestroyRenderPass(second_device, m_renderPass, NULL);
m_errorMonitor->VerifyFound();
vk::DestroyDevice(second_device, NULL);
}
TEST_F(VkLayerTest, InvalidAllocationCallbacks) {
TEST_DESCRIPTION("Test with invalid VkAllocationCallbacks");
ASSERT_NO_FATAL_FAILURE(Init());
// vk::CreateInstance, and vk::CreateDevice tend to crash in the Loader Trampoline ATM, so choosing vk::CreateCommandPool
const VkCommandPoolCreateInfo cpci = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0,
DeviceObj()->QueueFamilyMatching(0, 0, true)};
VkCommandPool cmdPool;
struct Alloc {
static VKAPI_ATTR void *VKAPI_CALL alloc(void *, size_t, size_t, VkSystemAllocationScope) { return nullptr; };
static VKAPI_ATTR void *VKAPI_CALL realloc(void *, void *, size_t, size_t, VkSystemAllocationScope) { return nullptr; };
static VKAPI_ATTR void VKAPI_CALL free(void *, void *){};
static VKAPI_ATTR void VKAPI_CALL internalAlloc(void *, size_t, VkInternalAllocationType, VkSystemAllocationScope){};
static VKAPI_ATTR void VKAPI_CALL internalFree(void *, size_t, VkInternalAllocationType, VkSystemAllocationScope){};
};
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnAllocation-00632");
const VkAllocationCallbacks allocator = {nullptr, nullptr, Alloc::realloc, Alloc::free, nullptr, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnReallocation-00633");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, nullptr, Alloc::free, nullptr, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnFree-00634");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, Alloc::realloc, nullptr, nullptr, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnInternalAllocation-00635");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, Alloc::realloc, Alloc::free, nullptr, Alloc::internalFree};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnInternalAllocation-00635");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, Alloc::realloc, Alloc::free, Alloc::internalAlloc, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, MismatchedQueueFamiliesOnSubmit) {
TEST_DESCRIPTION(
"Submit command buffer created using one queue family and attempt to submit them on a queue created in a different queue "
"family.");
ASSERT_NO_FATAL_FAILURE(Init()); // assumes it initializes all queue families on vk::CreateDevice
// This test is meaningless unless we have multiple queue families
auto queue_family_properties = m_device->phy().queue_properties();
std::vector<uint32_t> queue_families;
for (uint32_t i = 0; i < queue_family_properties.size(); ++i)
if (queue_family_properties[i].queueCount > 0) queue_families.push_back(i);
if (queue_families.size() < 2) {
printf("%s Device only has one queue family; skipped.\n", kSkipPrefix);
return;
}
const uint32_t queue_family = queue_families[0];
const uint32_t other_queue_family = queue_families[1];
VkQueue other_queue;
vk::GetDeviceQueue(m_device->device(), other_queue_family, 0, &other_queue);
VkCommandPoolObj cmd_pool(m_device, queue_family);
VkCommandBufferObj cmd_buff(m_device, &cmd_pool);
cmd_buff.begin();
cmd_buff.end();
// Submit on the wrong queue
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmd_buff.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkQueueSubmit-pCommandBuffers-00074");
vk::QueueSubmit(other_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, TemporaryExternalSemaphore) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external semaphore instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for external semaphore device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external semaphore import and export capability
VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr,
handle_type};
VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR =
(PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp);
if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
// Create a semaphore to export payload from
VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type};
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0};
VkSemaphore export_semaphore;
err = vk::CreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore);
ASSERT_VK_SUCCESS(err);
// Create a semaphore to import payload into
sci.pNext = nullptr;
VkSemaphore import_semaphore;
err = vk::CreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore);
ASSERT_VK_SUCCESS(err);
#ifdef _WIN32
// Export semaphore payload to an opaque handle
HANDLE handle = nullptr;
VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore,
handle_type};
auto vkGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR");
err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above *temporarily*
VkImportSemaphoreWin32HandleInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR,
nullptr,
import_semaphore,
VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR,
handle_type,
handle,
nullptr};
auto vkImportSemaphoreWin32HandleKHR =
(PFN_vkImportSemaphoreWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR");
err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#else
// Export semaphore payload to an opaque handle
int fd = 0;
VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type};
auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR");
err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above *temporarily*
VkImportSemaphoreFdInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore,
VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd};
auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR");
err = vkImportSemaphoreFdKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#endif
// Wait on the imported semaphore twice in vk::QueueSubmit, the second wait should be an error
VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkSubmitInfo si[] = {
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "has no way to be signaled");
vk::QueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
auto index = m_device->graphics_queue_node_index_;
if (m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) {
// Wait on the imported semaphore twice in vk::QueueBindSparse, the second wait should be an error
VkBindSparseInfo bi[] = {
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "has no way to be signaled");
vk::QueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
// Cleanup
err = vk::QueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vk::DestroySemaphore(m_device->device(), export_semaphore, nullptr);
vk::DestroySemaphore(m_device->device(), import_semaphore, nullptr);
}
TEST_F(VkLayerTest, TemporaryExternalFence) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external fence instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for external fence device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external fence import and export capability
VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type};
VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR");
vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp);
if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
// Create a fence to export payload from
VkFence export_fence;
{
VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type};
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0};
err = vk::CreateFence(m_device->device(), &fci, nullptr, &export_fence);
ASSERT_VK_SUCCESS(err);
}
// Create a fence to import payload into
VkFence import_fence;
{
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
err = vk::CreateFence(m_device->device(), &fci, nullptr, &import_fence);
ASSERT_VK_SUCCESS(err);
}
#ifdef _WIN32
// Export fence payload to an opaque handle
HANDLE handle = nullptr;
{
VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceWin32HandleKHR =
(PFN_vkGetFenceWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR");
err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceWin32HandleInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR,
nullptr,
import_fence,
VK_FENCE_IMPORT_TEMPORARY_BIT_KHR,
handle_type,
handle,
nullptr};
auto vkImportFenceWin32HandleKHR =
(PFN_vkImportFenceWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR");
err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#else
// Export fence payload to an opaque handle
int fd = 0;
{
VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR");
err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence,
VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd};
auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR");
err = vkImportFenceFdKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#endif
// Undo the temporary import
vk::ResetFences(m_device->device(), 1, &import_fence);
// Signal the previously imported fence twice, the second signal should produce a validation error
vk::QueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "is already in use by another submission.");
vk::QueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
m_errorMonitor->VerifyFound();
// Cleanup
err = vk::QueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vk::DestroyFence(m_device->device(), export_fence, nullptr);
vk::DestroyFence(m_device->device(), import_fence, nullptr);
}
TEST_F(VkLayerTest, InvalidCmdBufferEventDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an event dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo evci = {};
evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
VkResult result = vk::CreateEvent(m_device->device(), &evci, NULL, &event);
ASSERT_VK_SUCCESS(result);
m_commandBuffer->begin();
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkEvent");
// Destroy event dependency prior to submit to cause ERROR
vk::DestroyEvent(m_device->device(), event, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCmdBufferQueryPoolDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a query pool dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkQueryPool query_pool;
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_TIMESTAMP;
qpci.queryCount = 1;
VkResult result = vk::CreateQueryPool(m_device->device(), &qpci, nullptr, &query_pool);
ASSERT_VK_SUCCESS(result);
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkQueryPool");
// Destroy query pool dependency prior to submit to cause ERROR
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DeviceFeature2AndVertexAttributeDivisorExtensionUnenabled) {
TEST_DESCRIPTION(
"Test unenabled VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME & "
"VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME.");
VkPhysicalDeviceFeatures2 pd_features2 = {};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = nullptr;
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &pd_features2;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
VkDevice testDevice;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-pNext-pNext");
m_errorMonitor->SetUnexpectedError("Failed to create device chain");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = {};
vadf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT;
vadf.pNext = nullptr;
device_create_info.pNext = &vadf;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VK_EXT_vertex_attribute_divisor must be enabled when it creates a device");
m_errorMonitor->SetUnexpectedError("Failed to create device chain");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Features12AndpNext) {
TEST_DESCRIPTION("Test VkPhysicalDeviceVulkan12Features and illegal struct in pNext");
SetTargetApiVersion(VK_API_VERSION_1_2);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s Vulkan12Struct requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME) ||
!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_8BIT_STORAGE_EXTENSION_NAME) ||
!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_16BIT_STORAGE_EXTENSION_NAME)) {
printf("%s Storage Extension(s) not supported, skipping tests\n", kSkipPrefix);
return;
}
VkPhysicalDevice16BitStorageFeatures sixteen_bit = {};
sixteen_bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES;
sixteen_bit.storageBuffer16BitAccess = true;
VkPhysicalDeviceVulkan11Features features11 = {};
features11.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
features11.pNext = &sixteen_bit;
features11.storageBuffer16BitAccess = true;
VkPhysicalDevice8BitStorageFeatures eight_bit = {};
eight_bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES;
eight_bit.pNext = &features11;
eight_bit.storageBuffer8BitAccess = true;
VkPhysicalDeviceVulkan12Features features12 = {};
features12.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
features12.pNext = &eight_bit;
features12.storageBuffer8BitAccess = true;
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &features12;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
VkDevice testDevice;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-pNext-02829");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-pNext-02830");
m_errorMonitor->SetUnexpectedError("Failed to create device chain");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Features12Extensions) {
TEST_DESCRIPTION("Checks that 1.2 features are enabled if extension is passed in.");
SetTargetApiVersion(VK_API_VERSION_1_2);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s Vulkan12Struct requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
// Explicity set all tested features to false
VkPhysicalDeviceVulkan12Features features12 = {};
features12.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
features12.pNext = nullptr;
features12.drawIndirectCount = VK_FALSE;
features12.samplerMirrorClampToEdge = VK_FALSE;
features12.descriptorIndexing = VK_FALSE;
features12.samplerFilterMinmax = VK_FALSE;
features12.shaderOutputViewportIndex = VK_FALSE;
features12.shaderOutputLayer = VK_TRUE; // Set true since both shader_viewport features need to true
std::vector<const char *> device_extensions;
// Go through each extension and if supported add to list and add failure to check for
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) {
device_extensions.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02831");
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
device_extensions.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02832");
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME)) {
device_extensions.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
device_extensions.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02833");
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME)) {
device_extensions.push_back(VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02834");
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) {
device_extensions.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensions-02835");
}
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &features12;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.ppEnabledExtensionNames = device_extensions.data();
device_create_info.enabledExtensionCount = device_extensions.size();
VkDevice testDevice;
m_errorMonitor->SetUnexpectedError("Failed to create device chain");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, FeaturesVariablePointer) {
TEST_DESCRIPTION("Checks VK_KHR_variable_pointers features.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
std::vector<const char *> device_extensions;
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME) &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME)) {
device_extensions.push_back(VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME);
device_extensions.push_back(VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME);
} else {
printf("%s VariablePointer Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device that enables variablePointers but not variablePointersStorageBuffer
auto variable_features = lvl_init_struct<VkPhysicalDeviceVariablePointersFeatures>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&variable_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (variable_features.variablePointers == VK_FALSE) {
printf("%s variablePointer feature not supported, skipping tests\n", kSkipPrefix);
return;
}
variable_features.variablePointersStorageBuffer = VK_FALSE;
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &features2;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.ppEnabledExtensionNames = device_extensions.data();
device_create_info.enabledExtensionCount = device_extensions.size();
VkDevice testDevice;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPhysicalDeviceVariablePointersFeatures-variablePointers-01431");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, FeaturesMultiview) {
TEST_DESCRIPTION("Checks VK_KHR_multiview features.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
std::vector<const char *> device_extensions;
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME)) {
device_extensions.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME);
} else {
printf("%s Multiview Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto multiview_features = lvl_init_struct<VkPhysicalDeviceMultiviewFeatures>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&multiview_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
// Set false to trigger VUs
multiview_features.multiview = VK_FALSE;
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &features2;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.ppEnabledExtensionNames = device_extensions.data();
device_create_info.enabledExtensionCount = device_extensions.size();
VkDevice testDevice;
if ((multiview_features.multiviewGeometryShader == VK_FALSE) && (multiview_features.multiviewTessellationShader == VK_FALSE)) {
printf("%s multiviewGeometryShader and multiviewTessellationShader feature not supported, skipping tests\n", kSkipPrefix);
return;
}
if (multiview_features.multiviewGeometryShader == VK_TRUE) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPhysicalDeviceMultiviewFeatures-multiviewGeometryShader-00580");
}
if (multiview_features.multiviewTessellationShader == VK_TRUE) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPhysicalDeviceMultiviewFeatures-multiviewTessellationShader-00581");
}
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, BeginQueryOnTimestampPool) {
TEST_DESCRIPTION("Call CmdBeginQuery on a TIMESTAMP query pool.");
ASSERT_NO_FATAL_FAILURE(Init());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryType-02804");
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(m_commandBuffer->handle(), &begin_info);
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::EndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoSync) {
TEST_DESCRIPTION("Test vkAcquireNextImageKHR with VK_NULL_HANDLE semaphore and fence");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImageKHR-semaphore-01780");
uint32_t dummy;
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, VK_NULL_HANDLE, &dummy);
m_errorMonitor->VerifyFound();
}
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoSync2KHR) {
TEST_DESCRIPTION("Test vkAcquireNextImage2KHR with VK_NULL_HANDLE semaphore and fence");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool extension_dependency_satisfied = false;
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
extension_dependency_satisfied = true;
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (extension_dependency_satisfied && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-semaphore-01782");
VkAcquireNextImageInfoKHR acquire_info = {VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR};
acquire_info.swapchain = m_swapchain;
acquire_info.timeout = UINT64_MAX;
acquire_info.semaphore = VK_NULL_HANDLE;
acquire_info.fence = VK_NULL_HANDLE;
acquire_info.deviceMask = 0x1;
uint32_t dummy;
vk::AcquireNextImage2KHR(device(), &acquire_info, &dummy);
m_errorMonitor->VerifyFound();
}
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoBinarySemaphore) {
TEST_DESCRIPTION("Test vkAcquireNextImageKHR with non-binary semaphore");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_TRUE(InitSwapchain());
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImageKHR-semaphore-03265");
uint32_t image_i;
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, semaphore, VK_NULL_HANDLE, &image_i);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoBinarySemaphore2KHR) {
TEST_DESCRIPTION("Test vkAcquireNextImage2KHR with non-binary semaphore");
TEST_DESCRIPTION("Test vkAcquireNextImage2KHR with VK_NULL_HANDLE semaphore and fence");
SetTargetApiVersion(VK_API_VERSION_1_1);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
bool extension_dependency_satisfied = false;
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
extension_dependency_satisfied = true;
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (extension_dependency_satisfied && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_TRUE(InitSwapchain());
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkAcquireNextImageInfoKHR acquire_info = {};
acquire_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR;
acquire_info.swapchain = m_swapchain;
acquire_info.timeout = UINT64_MAX;
acquire_info.semaphore = semaphore;
acquire_info.deviceMask = 0x1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
uint32_t image_i;
vk::AcquireNextImage2KHR(device(), &acquire_info, &image_i);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireTooManyImages) {
TEST_DESCRIPTION("Acquiring invalid amount of images from the swapchain.");
if (!AddSurfaceInstanceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
uint32_t image_count;
ASSERT_VK_SUCCESS(vk::GetSwapchainImagesKHR(device(), m_swapchain, &image_count, nullptr));
VkSurfaceCapabilitiesKHR caps;
ASSERT_VK_SUCCESS(vk::GetPhysicalDeviceSurfaceCapabilitiesKHR(gpu(), m_surface, &caps));
const uint32_t acquirable_count = image_count - caps.minImageCount + 1;
std::vector<VkFenceObj> fences(acquirable_count);
for (uint32_t i = 0; i < acquirable_count; ++i) {
fences[i].init(*m_device, VkFenceObj::create_info());
uint32_t image_i;
const auto res = vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, fences[i].handle(), &image_i);
ASSERT_TRUE(res == VK_SUCCESS || res == VK_SUBOPTIMAL_KHR);
}
VkFenceObj error_fence;
error_fence.init(*m_device, VkFenceObj::create_info());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImageKHR-swapchain-01802");
uint32_t image_i;
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, error_fence.handle(), &image_i);
m_errorMonitor->VerifyFound();
// Cleanup
vk::WaitForFences(device(), fences.size(), MakeVkHandles<VkFence>(fences).data(), VK_TRUE, UINT64_MAX);
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireTooManyImages2KHR) {
TEST_DESCRIPTION("Acquiring invalid amount of images from the swapchain via vkAcquireNextImage2KHR.");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool extension_dependency_satisfied = false;
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
extension_dependency_satisfied = true;
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (extension_dependency_satisfied && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
uint32_t image_count;
ASSERT_VK_SUCCESS(vk::GetSwapchainImagesKHR(device(), m_swapchain, &image_count, nullptr));
VkSurfaceCapabilitiesKHR caps;
ASSERT_VK_SUCCESS(vk::GetPhysicalDeviceSurfaceCapabilitiesKHR(gpu(), m_surface, &caps));
const uint32_t acquirable_count = image_count - caps.minImageCount + 1;
std::vector<VkFenceObj> fences(acquirable_count);
for (uint32_t i = 0; i < acquirable_count; ++i) {
fences[i].init(*m_device, VkFenceObj::create_info());
uint32_t image_i;
const auto res = vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, fences[i].handle(), &image_i);
ASSERT_TRUE(res == VK_SUCCESS || res == VK_SUBOPTIMAL_KHR);
}
VkFenceObj error_fence;
error_fence.init(*m_device, VkFenceObj::create_info());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImage2KHR-swapchain-01803");
VkAcquireNextImageInfoKHR acquire_info = {VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR};
acquire_info.swapchain = m_swapchain;
acquire_info.timeout = UINT64_MAX;
acquire_info.fence = error_fence.handle();
acquire_info.deviceMask = 0x1;
uint32_t image_i;
vk::AcquireNextImage2KHR(device(), &acquire_info, &image_i);
m_errorMonitor->VerifyFound();
// Cleanup
vk::WaitForFences(device(), fences.size(), MakeVkHandles<VkFence>(fences).data(), VK_TRUE, UINT64_MAX);
DestroySwapchain();
}
TEST_F(VkLayerTest, InvalidDeviceMask) {
TEST_DESCRIPTION("Invalid deviceMask.");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool support_surface = true;
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping VkAcquireNextImageInfoKHR test\n", kSkipPrefix);
support_surface = false;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (support_surface) {
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping BindSwapchainImageMemory test\n", kSkipPrefix);
support_surface = false;
}
}
if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s Device Groups requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
return;
}
uint32_t physical_device_group_count = 0;
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, nullptr);
if (physical_device_group_count == 0) {
printf("%s physical_device_group_count is 0, skipping test\n", kSkipPrefix);
return;
}
std::vector<VkPhysicalDeviceGroupProperties> physical_device_group(physical_device_group_count,
{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES});
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, physical_device_group.data());
VkDeviceGroupDeviceCreateInfo create_device_pnext = {};
create_device_pnext.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO;
create_device_pnext.physicalDeviceCount = physical_device_group[0].physicalDeviceCount;
create_device_pnext.pPhysicalDevices = physical_device_group[0].physicalDevices;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &create_device_pnext, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!InitSwapchain()) {
printf("%s Cannot create surface or swapchain, skipping VkAcquireNextImageInfoKHR test\n", kSkipPrefix);
support_surface = false;
}
// Test VkMemoryAllocateFlagsInfo
VkMemoryAllocateFlagsInfo alloc_flags_info = {};
alloc_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
alloc_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT;
alloc_flags_info.deviceMask = 0xFFFFFFFF;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = &alloc_flags_info;
alloc_info.memoryTypeIndex = 0;
alloc_info.allocationSize = 1024;
VkDeviceMemory mem;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
vk::AllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
m_errorMonitor->VerifyFound();
alloc_flags_info.deviceMask = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
vk::AllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
m_errorMonitor->VerifyFound();
uint32_t pdev_group_count = 0;
std::vector<VkPhysicalDeviceGroupProperties> group_props;
VkResult err = vk::EnumeratePhysicalDeviceGroups(instance(), &pdev_group_count, nullptr);
group_props.resize(pdev_group_count);
err = vk::EnumeratePhysicalDeviceGroups(instance(), &pdev_group_count, &group_props[0]);
auto tgt = gpu();
bool test_run = false;
for (uint32_t i = 0; i < pdev_group_count; i++) {
if ((group_props[i].physicalDeviceCount > 1) && !test_run) {
for (uint32_t j = 0; j < group_props[i].physicalDeviceCount; j++) {
if (tgt == group_props[i].physicalDevices[j]) {
void *data;
VkDeviceMemory mi_mem;
alloc_flags_info.deviceMask = 3;
err = vk::AllocateMemory(m_device->device(), &alloc_info, NULL, &mi_mem);
if (VK_SUCCESS == err) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkMapMemory-memory-00683");
vk::MapMemory(m_device->device(), mi_mem, 0, 1024, 0, &data);
m_errorMonitor->VerifyFound();
vk::FreeMemory(m_device->device(), mi_mem, nullptr);
}
test_run = true;
break;
}
}
}
}
// Test VkDeviceGroupCommandBufferBeginInfo
VkDeviceGroupCommandBufferBeginInfo dev_grp_cmd_buf_info = {};
dev_grp_cmd_buf_info.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO;
dev_grp_cmd_buf_info.deviceMask = 0xFFFFFFFF;
VkCommandBufferBeginInfo cmd_buf_info = {};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = &dev_grp_cmd_buf_info;
m_commandBuffer->reset();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
dev_grp_cmd_buf_info.deviceMask = 0;
m_commandBuffer->reset();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
// Test VkDeviceGroupRenderPassBeginInfo
dev_grp_cmd_buf_info.deviceMask = 0x00000001;
m_commandBuffer->reset();
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
VkDeviceGroupRenderPassBeginInfo dev_grp_rp_info = {};
dev_grp_rp_info.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO;
dev_grp_rp_info.deviceMask = 0xFFFFFFFF;
m_renderPassBeginInfo.pNext = &dev_grp_rp_info;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
dev_grp_rp_info.deviceMask = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
dev_grp_rp_info.deviceMask = 0x00000001;
dev_grp_rp_info.deviceRenderAreaCount = physical_device_group[0].physicalDeviceCount + 1;
std::vector<VkRect2D> device_render_areas(dev_grp_rp_info.deviceRenderAreaCount, m_renderPassBeginInfo.renderArea);
dev_grp_rp_info.pDeviceRenderAreas = device_render_areas.data();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908");
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
// Test vk::CmdSetDeviceMask()
vk::CmdSetDeviceMask(m_commandBuffer->handle(), 0x00000001);
dev_grp_rp_info.deviceRenderAreaCount = physical_device_group[0].physicalDeviceCount;
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00108");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00110");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00111");
vk::CmdSetDeviceMask(m_commandBuffer->handle(), 0xFFFFFFFF);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00109");
vk::CmdSetDeviceMask(m_commandBuffer->handle(), 0);
m_errorMonitor->VerifyFound();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkSemaphore semaphore2;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore2));
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkFence fence;
ASSERT_VK_SUCCESS(vk::CreateFence(m_device->device(), &fence_create_info, nullptr, &fence));
if (support_surface) {
// Test VkAcquireNextImageInfoKHR
uint32_t imageIndex;
VkAcquireNextImageInfoKHR acquire_next_image_info = {};
acquire_next_image_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR;
acquire_next_image_info.semaphore = semaphore;
acquire_next_image_info.swapchain = m_swapchain;
acquire_next_image_info.fence = fence;
acquire_next_image_info.deviceMask = 0xFFFFFFFF;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
vk::AcquireNextImage2KHR(m_device->device(), &acquire_next_image_info, &imageIndex);
m_errorMonitor->VerifyFound();
vk::WaitForFences(m_device->device(), 1, &fence, VK_TRUE, std::numeric_limits<int>::max());
vk::ResetFences(m_device->device(), 1, &fence);
acquire_next_image_info.semaphore = semaphore2;
acquire_next_image_info.deviceMask = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
vk::AcquireNextImage2KHR(m_device->device(), &acquire_next_image_info, &imageIndex);
m_errorMonitor->VerifyFound();
DestroySwapchain();
}
// Test VkDeviceGroupSubmitInfo
VkDeviceGroupSubmitInfo device_group_submit_info = {};
device_group_submit_info.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO;
device_group_submit_info.commandBufferCount = 1;
std::array<uint32_t, 1> command_buffer_device_masks = {{0xFFFFFFFF}};
device_group_submit_info.pCommandBufferDeviceMasks = command_buffer_device_masks.data();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &device_group_submit_info;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_commandBuffer->reset();
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
vk::EndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::WaitForFences(m_device->device(), 1, &fence, VK_TRUE, std::numeric_limits<int>::max());
vk::DestroyFence(m_device->device(), fence, nullptr);
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
vk::DestroySemaphore(m_device->device(), semaphore2, nullptr);
}
TEST_F(VkLayerTest, ValidationCacheTestBadMerge) {
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), kValidationLayerName, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Load extension functions
auto fpCreateValidationCache =
(PFN_vkCreateValidationCacheEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCreateValidationCacheEXT");
auto fpDestroyValidationCache =
(PFN_vkDestroyValidationCacheEXT)vk::GetDeviceProcAddr(m_device->device(), "vkDestroyValidationCacheEXT");
auto fpMergeValidationCaches =
(PFN_vkMergeValidationCachesEXT)vk::GetDeviceProcAddr(m_device->device(), "vkMergeValidationCachesEXT");
if (!fpCreateValidationCache || !fpDestroyValidationCache || !fpMergeValidationCaches) {
printf("%s Failed to load function pointers for %s\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
return;
}
VkValidationCacheCreateInfoEXT validationCacheCreateInfo;
validationCacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT;
validationCacheCreateInfo.pNext = NULL;
validationCacheCreateInfo.initialDataSize = 0;
validationCacheCreateInfo.pInitialData = NULL;
validationCacheCreateInfo.flags = 0;
VkValidationCacheEXT validationCache = VK_NULL_HANDLE;
VkResult res = fpCreateValidationCache(m_device->device(), &validationCacheCreateInfo, nullptr, &validationCache);
ASSERT_VK_SUCCESS(res);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkMergeValidationCachesEXT-dstCache-01536");
res = fpMergeValidationCaches(m_device->device(), validationCache, 1, &validationCache);
m_errorMonitor->VerifyFound();
fpDestroyValidationCache(m_device->device(), validationCache, nullptr);
}
TEST_F(VkLayerTest, InvalidQueueFamilyIndex) {
// Miscellaneous queueFamilyIndex validation tests
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buffCI.queueFamilyIndexCount = 2;
// Introduce failure by specifying invalid queue_family_index
uint32_t qfi[2];
qfi[0] = 777;
qfi[1] = 0;
buffCI.pQueueFamilyIndices = qfi;
buffCI.sharingMode = VK_SHARING_MODE_CONCURRENT; // qfi only matters in CONCURRENT mode
// Test for queue family index out of range
CreateBufferTest(*this, &buffCI, "VUID-VkBufferCreateInfo-sharingMode-01419");
// Test for non-unique QFI in array
qfi[0] = 0;
CreateBufferTest(*this, &buffCI, "VUID-VkBufferCreateInfo-sharingMode-01419");
if (m_device->queue_props.size() > 2) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "which was not created allowing concurrent");
// Create buffer shared to queue families 1 and 2, but submitted on queue family 0
buffCI.queueFamilyIndexCount = 2;
qfi[0] = 1;
qfi[1] = 2;
VkBufferObj ib;
ib.init(*m_device, buffCI);
m_commandBuffer->begin();
vk::CmdFillBuffer(m_commandBuffer->handle(), ib.handle(), 0, 16, 5);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
}
// If there is more than one queue family, create a device with a single queue family, then create a buffer
// with SHARING_MODE_CONCURRENT that uses a non-device PDEV queue family.
uint32_t queue_count;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, NULL);
VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_count];
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, queue_props);
if (queue_count < 3) {
printf("%s Multiple queue families are required to run this test.\n", kSkipPrefix);
return;
}
float priorities = {1.0f};
VkDeviceQueueCreateInfo queue_info = {};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = queue_props->queueCount;
queue_info.pQueuePriorities = &priorities;
VkDeviceCreateInfo dev_info{};
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.queueCreateInfoCount = 1;
dev_info.pQueueCreateInfos = &queue_info;
dev_info.enabledLayerCount = 0;
dev_info.enabledExtensionCount = m_device_extension_names.size();
dev_info.ppEnabledExtensionNames = m_device_extension_names.data();
// Create a device with a single queue family
VkDevice second_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &dev_info, nullptr, &second_device));
// Select Queue family for CONCURRENT buffer that is not owned by device
buffCI.queueFamilyIndexCount = 2;
qfi[1] = 2;
VkBuffer buffer = VK_NULL_HANDLE;
m_errorMonitor->ExpectSuccess();
vk::CreateBuffer(second_device, &buffCI, NULL, &buffer);
m_errorMonitor->VerifyNotFound();
vk::DestroyDevice(second_device, nullptr);
}
TEST_F(VkLayerTest, InvalidQueryPoolCreate) {
TEST_DESCRIPTION("Attempt to create a query pool for PIPELINE_STATISTICS without enabling pipeline stats for the device.");
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
VkDevice local_device;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
// Intentionally disable pipeline stats
features.pipelineStatisticsQuery = VK_FALSE;
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
VkResult err = vk::CreateDevice(gpu(), &device_create_info, nullptr, &local_device);
ASSERT_VK_SUCCESS(err);
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
qpci.queryCount = 1;
VkQueryPool query_pool;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolCreateInfo-queryType-00791");
vk::CreateQueryPool(local_device, &qpci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
qpci.queryType = VK_QUERY_TYPE_OCCLUSION;
qpci.queryCount = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolCreateInfo-queryCount-02763");
vk::CreateQueryPool(local_device, &qpci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
vk::DestroyDevice(local_device, nullptr);
}
TEST_F(VkLayerTest, InvalidQuerySizes) {
TEST_DESCRIPTION("Invalid size of using queries commands.");
ASSERT_NO_FATAL_FAILURE(Init());
if (IsPlatform(kPixel2XL)) {
printf("%s This test should not run on Pixel 2 XL\n", kSkipPrefix);
return;
}
uint32_t queue_count;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, NULL);
VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_count];
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, queue_props);
const uint32_t timestampValidBits = queue_props[m_device->graphics_queue_node_index_].timestampValidBits;
VkBufferObj buffer;
buffer.init(*m_device, 128, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VkMemoryRequirements mem_reqs = {};
vk::GetBufferMemoryRequirements(m_device->device(), buffer.handle(), &mem_reqs);
const VkDeviceSize buffer_size = mem_reqs.size;
const uint32_t query_pool_size = 4;
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = query_pool_size;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_commandBuffer->begin();
// firstQuery is too large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdResetQueryPool-firstQuery-00796");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdResetQueryPool-firstQuery-00797");
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, query_pool_size, 1);
m_errorMonitor->VerifyFound();
// sum of firstQuery and queryCount is too large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdResetQueryPool-firstQuery-00797");
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 1, query_pool_size);
m_errorMonitor->VerifyFound();
// Actually reset all queries so they can be used
m_errorMonitor->ExpectSuccess();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, query_pool_size);
m_errorMonitor->VerifyNotFound();
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
// query index to large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdEndQuery-query-00810");
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, query_pool_size);
m_errorMonitor->VerifyFound();
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
// firstQuery is too large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-firstQuery-00820");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-firstQuery-00821");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, query_pool_size, 1, buffer.handle(), 0, 0, 0);
m_errorMonitor->VerifyFound();
// sum of firstQuery and queryCount is too large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-firstQuery-00821");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 1, query_pool_size, buffer.handle(), 0, 0, 0);
m_errorMonitor->VerifyFound();
// offset larger than buffer size
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), buffer_size + 4, 0, 0);
m_errorMonitor->VerifyFound();
// buffer does not have enough storage from offset to contain result of each query
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 2, buffer.handle(), buffer_size - 4, 4, 0);
m_errorMonitor->VerifyFound();
// Query is not a timestamp type
if (timestampValidBits == 0) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdWriteTimestamp-timestampValidBits-00829");
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdWriteTimestamp-queryPool-01416");
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, query_pool, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
const size_t out_data_size = 128;
uint8_t data[out_data_size];
// firstQuery is too large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-firstQuery-00813");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-firstQuery-00816");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidQuery");
vk::GetQueryPoolResults(m_device->device(), query_pool, query_pool_size, 1, out_data_size, &data, 0, 0);
m_errorMonitor->VerifyFound();
// sum of firstQuery and queryCount is too large
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-firstQuery-00816");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidQuery");
vk::GetQueryPoolResults(m_device->device(), query_pool, 1, query_pool_size, out_data_size, &data, 0, 0);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, UnclosedAndDuplicateQueries) {
TEST_DESCRIPTION("End a command buffer with a query still in progress, create nested queries.");
ASSERT_NO_FATAL_FAILURE(Init());
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = 5;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 5);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-01922");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 1, 0);
// Attempt to begin a query that has the same type as an active query
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 3, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkEndCommandBuffer-commandBuffer-00061");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::EndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, QueryPreciseBit) {
TEST_DESCRIPTION("Check for correct Query Precise Bit circumstances.");
ASSERT_NO_FATAL_FAILURE(Init());
// These tests require that the device support pipeline statistics query
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
if (VK_TRUE != device_features.pipelineStatisticsQuery) {
printf("%s Test requires unsupported pipelineStatisticsQuery feature. Skipped.\n", kSkipPrefix);
return;
}
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Test for precise bit when query type is not OCCLUSION
if (features.occlusionQueryPrecise) {
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->handle(), &event_create_info, nullptr, &event);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryType-00800");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->handle(), &query_pool_create_info, nullptr, &query_pool);
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
vk::DestroyQueryPool(m_device->handle(), query_pool, nullptr);
vk::DestroyEvent(m_device->handle(), event, nullptr);
}
// Test for precise bit when precise feature is not available
features.occlusionQueryPrecise = false;
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_;
VkCommandPool command_pool;
vk::CreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool);
VkCommandBufferAllocateInfo cmd = {};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = command_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkCommandBuffer cmd_buffer;
VkResult err = vk::AllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer);
ASSERT_VK_SUCCESS(err);
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(test_device.handle(), &event_create_info, nullptr, &event);
VkCommandBufferBeginInfo begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr};
vk::BeginCommandBuffer(cmd_buffer, &begin_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryType-00800");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(test_device.handle(), &query_pool_create_info, nullptr, &query_pool);
vk::CmdResetQueryPool(cmd_buffer, query_pool, 0, 1);
vk::CmdBeginQuery(cmd_buffer, query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT);
m_errorMonitor->VerifyFound();
vk::EndCommandBuffer(cmd_buffer);
vk::DestroyQueryPool(test_device.handle(), query_pool, nullptr);
vk::DestroyEvent(test_device.handle(), event, nullptr);
vk::DestroyCommandPool(test_device.handle(), command_pool, nullptr);
}
TEST_F(VkLayerTest, StageMaskGsTsEnabled) {
TEST_DESCRIPTION(
"Attempt to use a stageMask w/ geometry shader and tesselation shader bits enabled when those features are disabled on the "
"device.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Make sure gs & ts are disabled
features.geometryShader = false;
features.tessellationShader = false;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_;
VkCommandPool command_pool;
vk::CreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool);
VkCommandBufferAllocateInfo cmd = {};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = command_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkCommandBuffer cmd_buffer;
VkResult err = vk::AllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer);
ASSERT_VK_SUCCESS(err);
VkEvent event;
VkEventCreateInfo evci = {};
evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
VkResult result = vk::CreateEvent(test_device.handle(), &evci, NULL, &event);
ASSERT_VK_SUCCESS(result);
VkCommandBufferBeginInfo cbbi = {};
cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(cmd_buffer, &cbbi);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetEvent-stageMask-04090");
vk::CmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetEvent-stageMask-04091");
vk::CmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT);
m_errorMonitor->VerifyFound();
vk::DestroyEvent(test_device.handle(), event, NULL);
vk::DestroyCommandPool(test_device.handle(), command_pool, NULL);
}
TEST_F(VkLayerTest, StageMaskHost) {
TEST_DESCRIPTION("Test invalid usage of VK_PIPELINE_STAGE_HOST_BIT.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetEvent-stageMask-01149");
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_HOST_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdResetEvent-stageMask-01153");
vk::CmdResetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_HOST_BIT);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkPipelineStageFlags stage_flags = VK_PIPELINE_STAGE_HOST_BIT;
VkSubmitInfo submit_info = {};
// Signal the semaphore so the next test can wait on it.
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = &stage_flags;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitDstStageMask-00078");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroyEvent(m_device->device(), event, nullptr);
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
TEST_F(VkLayerTest, DescriptorPoolInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete a DescriptorPool with a DescriptorSet that is in use.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create image to update the descriptor with
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
// Create Sampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
// Create PSO to be used for draw-time errors below
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
// Update descriptor with image and sampler
pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, NULL);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put pool in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy pool while in-flight, causing error
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyDescriptorPool-descriptorPool-00303");
vk::DestroyDescriptorPool(m_device->device(), pipe.descriptor_set_->pool_, NULL);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Cleanup
vk::DestroySampler(m_device->device(), sampler, NULL);
m_errorMonitor->SetUnexpectedError(
"If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj");
// TODO : It seems Validation layers think ds_pool was already destroyed, even though it wasn't?
}
TEST_F(VkLayerTest, FramebufferInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use framebuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vk::GetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put it in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy framebuffer while in-flight
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyFramebuffer-framebuffer-00892");
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy everything
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If framebuffer is not VK_NULL_HANDLE, framebuffer must be a valid VkFramebuffer handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Framebuffer obj");
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
}
TEST_F(VkLayerTest, FramebufferImageInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use image that's child of framebuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vk::GetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageCreateInfo image_ci = {};
image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_ci.pNext = NULL;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = VK_FORMAT_B8G8R8A8_UNORM;
image_ci.extent.width = 256;
image_ci.extent.height = 256;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = 1;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_ci.flags = 0;
VkImageObj image(m_device);
image.init(&image_ci);
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put it (and attached imageView) in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer to put framebuffer and children in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy image attached to framebuffer while in-flight
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyImage-image-01000");
vk::DestroyImage(m_device->device(), image.handle(), NULL);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy image and other objects
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If image is not VK_NULL_HANDLE, image must be a valid VkImage handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Image obj");
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
}
TEST_F(VkLayerTest, EventInUseDestroyedSignaled) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_commandBuffer->end();
vk::DestroyEvent(m_device->device(), event, nullptr);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "that is invalid because bound");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InUseDestroyedSignaled) {
TEST_DESCRIPTION(
"Use vkCmdExecuteCommands with invalid state in primary and secondary command buffers. Delete objects that are in use. "
"Call VkQueueSubmit with an event that has been deleted.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->ExpectSuccess();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkFence fence;
ASSERT_VK_SUCCESS(vk::CreateFence(m_device->device(), &fence_create_info, nullptr, &fence));
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorBufferInfo(0, buffer_test.GetBuffer(), 1024, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
pipe.descriptor_set_->UpdateDescriptorSets();
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
m_commandBuffer->begin();
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, NULL);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, fence);
m_errorMonitor->Reset(); // resume logmsg processing
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyEvent-event-01145");
vk::DestroyEvent(m_device->device(), event, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroySemaphore-semaphore-01137");
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyFence-fence-01120");
vk::DestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If semaphore is not VK_NULL_HANDLE, semaphore must be a valid VkSemaphore handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Semaphore obj");
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->SetUnexpectedError("If fence is not VK_NULL_HANDLE, fence must be a valid VkFence handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Fence obj");
vk::DestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->SetUnexpectedError("If event is not VK_NULL_HANDLE, event must be a valid VkEvent handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Event obj");
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskOneCommandBufferPass) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
vk::CmdWaitEvents(commandBuffer1.handle(), 1, &event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskOneCommandBufferFail) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
// wrong srcStageMask
vk::CmdWaitEvents(commandBuffer1.handle(), 1, &event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdWaitEvents-srcStageMask-parameter");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskTwoCommandBufferPass) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
commandBuffer2.begin();
vk::CmdWaitEvents(commandBuffer2.handle(), 1, &event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer2.end();
submit_info.pCommandBuffers = &commandBuffer2.handle();
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskTwoCommandBufferFail) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
commandBuffer2.begin();
// wrong srcStageMask
vk::CmdWaitEvents(commandBuffer2.handle(), 1, &event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer2.end();
submit_info.pCommandBuffers = &commandBuffer2.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdWaitEvents-srcStageMask-parameter");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, QueryPoolPartialTimestamp) {
TEST_DESCRIPTION("Request partial result on timestamp query.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t queue_count;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, NULL);
VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_count];
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, queue_props);
if (queue_props[m_device->graphics_queue_node_index_].timestampValidBits == 0) {
printf("%s Device graphic queue has timestampValidBits of 0, skipping.\n", kSkipPrefix);
return;
}
VkBufferObj buffer;
buffer.init(*m_device, 128, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
// Use setup as a positive test...
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, query_pool, 0);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-queryType-00827");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 0, 8, VK_QUERY_RESULT_PARTIAL_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
m_commandBuffer->end();
// Submit cmd buffer and wait for it.
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyNotFound();
// Attempt to obtain partial results.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-00818");
uint32_t data_space[16];
m_errorMonitor->SetUnexpectedError("Cannot get query results on queryPool");
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, sizeof(uint32_t),
VK_QUERY_RESULT_PARTIAL_BIT);
m_errorMonitor->VerifyFound();
// Destroy query pool.
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPoolInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use query pool.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_commandBuffer->begin();
// Use query pool to create binding with cmd buffer
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, query_pool, 0);
m_commandBuffer->end();
// Submit cmd buffer and then destroy query pool while in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyQueryPool-queryPool-00793");
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now that cmd buffer done we can safely destroy query_pool
m_errorMonitor->SetUnexpectedError("If queryPool is not VK_NULL_HANDLE, queryPool must be a valid VkQueryPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove QueryPool obj");
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, PipelineInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use pipeline.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const VkPipelineLayoutObj pipeline_layout(m_device);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyPipeline-pipeline-00765");
// Create PSO to be used for draw-time errors below
// Store pipeline handle so we can actually delete it before test finishes
VkPipeline delete_this_pipeline;
{ // Scope pipeline so it will be auto-deleted
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.InitState();
pipe.CreateGraphicsPipeline();
delete_this_pipeline = pipe.pipeline_;
m_commandBuffer->begin();
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then pipeline destroyed while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
} // Pipeline deletion triggered here
m_errorMonitor->VerifyFound();
// Make sure queue finished and then actually delete pipeline
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If pipeline is not VK_NULL_HANDLE, pipeline must be a valid VkPipeline handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Pipeline obj");
vk::DestroyPipeline(m_device->handle(), delete_this_pipeline, nullptr);
}
TEST_F(VkLayerTest, ImageViewInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use imageView.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
// Create PSO to use the sampler
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyImageView-imageView-01026");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer then destroy sampler
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy imageView while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroyImageView(m_device->device(), view, nullptr);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now we can actually destroy imageView
m_errorMonitor->SetUnexpectedError("If imageView is not VK_NULL_HANDLE, imageView must be a valid VkImageView handle");
m_errorMonitor->SetUnexpectedError("Unable to remove ImageView obj");
vk::DestroySampler(m_device->device(), sampler, nullptr);
}
TEST_F(VkLayerTest, BufferViewInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use bufferView.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBufferObj buffer;
buffer.init(*m_device, buffer_create_info);
VkBufferView view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.buffer = buffer.handle();
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
VkResult err = vk::CreateBufferView(m_device->device(), &bvci, NULL, &view);
ASSERT_VK_SUCCESS(err);
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = imageLoad(s, 0);\n"
"}\n";
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
err = pipe.CreateGraphicsPipeline();
if (err != VK_SUCCESS) {
printf("%s Unable to compile shader, skipping.\n", kSkipPrefix);
return;
}
pipe.descriptor_set_->WriteDescriptorBufferView(0, view, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyBufferView-bufferView-00936");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy bufferView while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroyBufferView(m_device->device(), view, nullptr);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now we can actually destroy bufferView
m_errorMonitor->SetUnexpectedError("If bufferView is not VK_NULL_HANDLE, bufferView must be a valid VkBufferView handle");
m_errorMonitor->SetUnexpectedError("Unable to remove BufferView obj");
vk::DestroyBufferView(m_device->device(), view, NULL);
}
TEST_F(VkLayerTest, SamplerInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use sampler.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
// Create PSO to use the sampler
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroySampler-sampler-01082");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer then destroy sampler
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy sampler while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroySampler(m_device->device(), sampler, nullptr); // Destroyed too soon
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now we can actually destroy sampler
m_errorMonitor->SetUnexpectedError("If sampler is not VK_NULL_HANDLE, sampler must be a valid VkSampler handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Sampler obj");
vk::DestroySampler(m_device->device(), sampler, NULL); // Destroyed for real
}
TEST_F(VkLayerTest, QueueForwardProgressFenceWait) {
TEST_DESCRIPTION("Call VkQueueSubmit with a semaphore that is already signaled but not waited on by the queue.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *queue_forward_progress_message = "UNASSIGNED-CoreValidation-DrawState-QueueForwardProgress";
VkCommandBufferObj cb1(m_device, m_commandPool);
cb1.begin();
cb1.end();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cb1.handle();
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_commandBuffer->begin();
m_commandBuffer->end();
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, queue_forward_progress_message);
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DeviceWaitIdle(m_device->device());
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
#if GTEST_IS_THREADSAFE
TEST_F(VkLayerTest, ThreadCommandBufferCollision) {
test_platform_thread thread;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "THREADING ERROR");
m_errorMonitor->SetAllowedFailureMsg("THREADING ERROR"); // Ignore any extra threading errors found beyond the first one
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Calls AllocateCommandBuffers
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
commandBuffer.begin();
VkEventCreateInfo event_info;
VkEvent event;
VkResult err;
memset(&event_info, 0, sizeof(event_info));
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
err = vk::CreateEvent(device(), &event_info, NULL, &event);
ASSERT_VK_SUCCESS(err);
err = vk::ResetEvent(device(), event);
ASSERT_VK_SUCCESS(err);
struct thread_data_struct data;
data.commandBuffer = commandBuffer.handle();
data.event = event;
bool bailout = false;
data.bailout = &bailout;
m_errorMonitor->SetBailout(data.bailout);
// First do some correct operations using multiple threads.
// Add many entries to command buffer from another thread.
test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data);
// Make non-conflicting calls from this thread at the same time.
for (int i = 0; i < 80000; i++) {
uint32_t count;
vk::EnumeratePhysicalDevices(instance(), &count, NULL);
}
test_platform_thread_join(thread, NULL);
// Then do some incorrect operations using multiple threads.
// Add many entries to command buffer from another thread.
test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data);
// Add many entries to command buffer from this thread at the same time.
AddToCommandBuffer(&data);
test_platform_thread_join(thread, NULL);
commandBuffer.end();
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyFound();
vk::DestroyEvent(device(), event, NULL);
}
TEST_F(VkLayerTest, ThreadUpdateDescriptorCollision) {
TEST_DESCRIPTION("Two threads updating the same descriptor set, expected to generate a threading error");
test_platform_thread thread;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "THREADING ERROR : vkUpdateDescriptorSets");
m_errorMonitor->SetAllowedFailureMsg("THREADING ERROR"); // Ignore any extra threading errors found beyond the first one
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet normal_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
},
0);
VkBufferObj buffer;
buffer.init(*m_device, 256, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
struct thread_data_struct data;
data.device = device();
data.descriptorSet = normal_descriptor_set.set_;
data.binding = 0;
data.buffer = buffer.handle();
bool bailout = false;
data.bailout = &bailout;
m_errorMonitor->SetBailout(data.bailout);
// Update descriptors from another thread.
test_platform_thread_create(&thread, UpdateDescriptor, (void *)&data);
// Update descriptors from this thread at the same time.
struct thread_data_struct data2;
data2.device = device();
data2.descriptorSet = normal_descriptor_set.set_;
data2.binding = 1;
data2.buffer = buffer.handle();
data2.bailout = &bailout;
UpdateDescriptor(&data2);
test_platform_thread_join(thread, NULL);
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ThreadUpdateDescriptorUpdateAfterBindNoCollision) {
TEST_DESCRIPTION("Two threads updating the same UAB descriptor set, expected not to generate a threading error");
test_platform_thread thread;
m_errorMonitor->ExpectSuccess();
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME);
} else {
printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device that enables descriptorBindingStorageBufferUpdateAfterBind
auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (VK_FALSE == indexing_features.descriptorBindingStorageBufferUpdateAfterBind) {
printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::array<VkDescriptorBindingFlagsEXT, 2> flags = {
{VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}};
auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>();
flags_create_info.bindingCount = (uint32_t)flags.size();
flags_create_info.pBindingFlags = flags.data();
OneOffDescriptorSet normal_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
},
VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT, &flags_create_info,
VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT);
VkBufferObj buffer;
buffer.init(*m_device, 256, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
struct thread_data_struct data;
data.device = device();
data.descriptorSet = normal_descriptor_set.set_;
data.binding = 0;
data.buffer = buffer.handle();
bool bailout = false;
data.bailout = &bailout;
m_errorMonitor->SetBailout(data.bailout);
// Update descriptors from another thread.
test_platform_thread_create(&thread, UpdateDescriptor, (void *)&data);
// Update descriptors from this thread at the same time.
struct thread_data_struct data2;
data2.device = device();
data2.descriptorSet = normal_descriptor_set.set_;
data2.binding = 1;
data2.buffer = buffer.handle();
data2.bailout = &bailout;
UpdateDescriptor(&data2);
test_platform_thread_join(thread, NULL);
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyNotFound();
}
#endif // GTEST_IS_THREADSAFE
TEST_F(VkLayerTest, ExecuteUnrecordedPrimaryCB) {
TEST_DESCRIPTION("Attempt vkQueueSubmit with a CB in the initial state");
ASSERT_NO_FATAL_FAILURE(Init());
// never record m_commandBuffer
VkSubmitInfo si = {};
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
si.commandBufferCount = 1;
si.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkQueueSubmit-pCommandBuffers-00072");
vk::QueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Maintenance1AndNegativeViewport) {
TEST_DESCRIPTION("Attempt to enable AMD_negative_viewport_height and Maintenance1_KHR extension simultaneously");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!((DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) &&
(DeviceExtensionSupported(gpu(), nullptr, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)))) {
printf("%s Maintenance1 and AMD_negative viewport height extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
const char *extension_names[2] = {"VK_KHR_maintenance1", "VK_AMD_negative_viewport_height"};
VkDevice testDevice;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.enabledExtensionCount = 2;
device_create_info.ppEnabledExtensionNames = (const char *const *)extension_names;
device_create_info.pEnabledFeatures = &features;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374");
// The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do
// not use the LunarG loader (e.g. Android) will not see the message and the test will fail.
m_errorMonitor->SetUnexpectedError("Failed to create device chain.");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, HostQueryResetNotEnabled) {
TEST_DESCRIPTION("Use vkResetQueryPoolEXT without enabling the feature");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-None-02665");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 1);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, HostQueryResetBadFirstQuery) {
TEST_DESCRIPTION("Bad firstQuery in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
SetTargetApiVersion(VK_API_VERSION_1_2);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-firstQuery-02666");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 1, 0);
m_errorMonitor->VerifyFound();
if (DeviceValidationVersion() >= VK_API_VERSION_1_2) {
auto fpvkResetQueryPool = (PFN_vkResetQueryPool)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPool");
if (nullptr == fpvkResetQueryPool) {
m_errorMonitor->ExpectSuccess();
m_errorMonitor->SetError("No ProcAddr for 1.2 core vkResetQueryPool");
m_errorMonitor->VerifyNotFound();
} else {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-firstQuery-02666");
fpvkResetQueryPool(m_device->device(), query_pool, 1, 0);
m_errorMonitor->VerifyFound();
}
}
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, HostQueryResetBadRange) {
TEST_DESCRIPTION("Bad range in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-firstQuery-02667");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 2);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, HostQueryResetInvalidQueryPool) {
TEST_DESCRIPTION("Invalid queryPool in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
// Create and destroy a query pool.
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
// Attempt to reuse the query pool handle.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-queryPool-parameter");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 1);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, HostQueryResetWrongDevice) {
TEST_DESCRIPTION("Device not matching queryPool in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
// Create a second device with the feature enabled.
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
auto features = m_device->phy().features();
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &host_query_reset_features;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.pEnabledFeatures = &features;
device_create_info.enabledExtensionCount = m_device_extension_names.size();
device_create_info.ppEnabledExtensionNames = m_device_extension_names.data();
VkDevice second_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &device_create_info, nullptr, &second_device));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-queryPool-parent");
// Run vk::ResetQueryPoolExt on the wrong device.
fpvkResetQueryPoolEXT(second_device, query_pool, 0, 1);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
vk::DestroyDevice(second_device, nullptr);
}
TEST_F(VkLayerTest, ResetEventThenSet) {
TEST_DESCRIPTION("Reset an event then set it after the reset has been submitted.");
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(command_buffer, &begin_info);
vk::CmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
vk::EndCommandBuffer(command_buffer);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "that is already in use by a command buffer.");
vk::SetEvent(m_device->device(), event);
m_errorMonitor->VerifyFound();
}
vk::QueueWaitIdle(queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
vk::FreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer);
vk::DestroyCommandPool(m_device->device(), command_pool, NULL);
}
TEST_F(VkLayerTest, ShadingRateImageNV) {
TEST_DESCRIPTION("Test VK_NV_shading_rate_image.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
std::array<const char *, 1> required_device_extensions = {{VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME}};
for (auto device_extension : required_device_extensions) {
if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) {
m_device_extension_names.push_back(device_extension);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension);
return;
}
}
if (IsPlatform(kMockICD) || DeviceSimulation()) {
printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device that enables shading_rate_image but disables multiViewport
auto shading_rate_image_features = lvl_init_struct<VkPhysicalDeviceShadingRateImageFeaturesNV>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&shading_rate_image_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
features2.features.multiViewport = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Test shading rate image creation
VkResult result = VK_RESULT_MAX_ENUM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8_UINT;
image_create_info.extent.width = 4;
image_create_info.extent.height = 4;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
// image type must be 2D
image_create_info.imageType = VK_IMAGE_TYPE_3D;
CreateImageTest(*this, &image_create_info, "VUID-VkImageCreateInfo-imageType-02082");
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.arrayLayers = 6;
// must be single sample
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
CreateImageTest(*this, &image_create_info, "VUID-VkImageCreateInfo-samples-02083");
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
// tiling must be optimal
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
CreateImageTest(*this, &image_create_info, "VUID-VkImageCreateInfo-tiling-02084");
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Should succeed.
VkImageObj image(m_device);
image.init(&image_create_info);
// Test image view creation
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8_UINT;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// view type must be 2D or 2D_ARRAY
ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
ivci.subresourceRange.layerCount = 6;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02086");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-01003");
result = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vk::DestroyImageView(m_device->device(), view, NULL);
view = VK_NULL_HANDLE;
}
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.subresourceRange.layerCount = 1;
// format must be R8_UINT
ivci.format = VK_FORMAT_R8_UNORM;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02087");
result = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vk::DestroyImageView(m_device->device(), view, NULL);
view = VK_NULL_HANDLE;
}
ivci.format = VK_FORMAT_R8_UINT;
vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
m_errorMonitor->VerifyNotFound();
// Test pipeline creation
VkPipelineViewportShadingRateImageStateCreateInfoNV vsrisci = {
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV};
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkViewport viewports[20] = {viewport, viewport};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkRect2D scissors[20] = {scissor, scissor};
VkDynamicState dynPalette = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV;
VkPipelineDynamicStateCreateInfo dyn = {VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, 0, 1, &dynPalette};
// viewportCount must be 0 or 1 when multiViewport is disabled
{
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = 2;
helper.vp_state_ci_.pViewports = viewports;
helper.vp_state_ci_.scissorCount = 2;
helper.vp_state_ci_.pScissors = scissors;
helper.vp_state_ci_.pNext = &vsrisci;
helper.dyn_state_ci_ = dyn;
vsrisci.shadingRateImageEnable = VK_TRUE;
vsrisci.viewportCount = 2;
};
CreatePipelineHelper::OneshotTest(
*this, break_vp, kErrorBit,
vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02054",
"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}));
}
// viewportCounts must match
{
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = 1;
helper.vp_state_ci_.pViewports = viewports;
helper.vp_state_ci_.scissorCount = 1;
helper.vp_state_ci_.pScissors = scissors;
helper.vp_state_ci_.pNext = &vsrisci;
helper.dyn_state_ci_ = dyn;
vsrisci.shadingRateImageEnable = VK_TRUE;
vsrisci.viewportCount = 0;
};
CreatePipelineHelper::OneshotTest(
*this, break_vp, kErrorBit,
vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-shadingRateImageEnable-02056"}));
}
// pShadingRatePalettes must not be NULL.
{
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = 1;
helper.vp_state_ci_.pViewports = viewports;
helper.vp_state_ci_.scissorCount = 1;
helper.vp_state_ci_.pScissors = scissors;
helper.vp_state_ci_.pNext = &vsrisci;
vsrisci.shadingRateImageEnable = VK_TRUE;
vsrisci.viewportCount = 1;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit,
vector<std::string>({"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04057"}));
}
// Create an image without the SRI bit
VkImageObj nonSRIimage(m_device);
nonSRIimage.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(nonSRIimage.initialized());
VkImageView nonSRIview = nonSRIimage.targetView(VK_FORMAT_B8G8R8A8_UNORM);
// Test SRI layout on non-SRI image
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = nullptr;
img_barrier.srcAccessMask = 0;
img_barrier.dstAccessMask = 0;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV;
img_barrier.image = nonSRIimage.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
m_commandBuffer->begin();
// Error trying to convert it to SRI layout
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageMemoryBarrier-oldLayout-02088");
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
// succeed converting it to GENERAL
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyNotFound();
// Test vk::CmdBindShadingRateImageNV errors
auto vkCmdBindShadingRateImageNV =
(PFN_vkCmdBindShadingRateImageNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdBindShadingRateImageNV");
// if the view is non-NULL, it must be R8_UINT, USAGE_SRI, image layout must match, layout must be valid
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageView-02060");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageView-02061");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageView-02062");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063");
vkCmdBindShadingRateImageNV(m_commandBuffer->handle(), nonSRIview, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
m_errorMonitor->VerifyFound();
// Test vk::CmdSetViewportShadingRatePaletteNV errors
auto vkCmdSetViewportShadingRatePaletteNV =
(PFN_vkCmdSetViewportShadingRatePaletteNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetViewportShadingRatePaletteNV");
VkShadingRatePaletteEntryNV paletteEntries[100] = {};
VkShadingRatePaletteNV palette = {100, paletteEntries};
VkShadingRatePaletteNV palettes[] = {palette, palette};
// errors on firstViewport/viewportCount
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02066");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02067");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02068");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-viewportCount-02069");
vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 20, 2, palettes);
m_errorMonitor->VerifyFound();
// shadingRatePaletteEntryCount must be in range
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071");
vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 0, 1, palettes);
m_errorMonitor->VerifyFound();
VkCoarseSampleLocationNV locations[100] = {
{0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 1}, // duplicate
{1000, 0, 0}, // pixelX too large
{0, 1000, 0}, // pixelY too large
{0, 0, 1000}, // sample too large
};
// Test custom sample orders, both via pipeline state and via dynamic state
{
VkCoarseSampleOrderCustomNV sampOrdBadShadingRate = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV, 1, 1,
locations};
VkCoarseSampleOrderCustomNV sampOrdBadSampleCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 3, 1,
locations};
VkCoarseSampleOrderCustomNV sampOrdBadSampleLocationCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV,
2, 2, locations};
VkCoarseSampleOrderCustomNV sampOrdDuplicateLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2,
1 * 2 * 2, &locations[1]};
VkCoarseSampleOrderCustomNV sampOrdOutOfRangeLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2,
1 * 2 * 2, &locations[4]};
VkCoarseSampleOrderCustomNV sampOrdTooLargeSampleLocationCount = {
VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, 4, 64, &locations[8]};
VkCoarseSampleOrderCustomNV sampOrdGood = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2,
&locations[0]};
VkPipelineViewportCoarseSampleOrderStateCreateInfoNV csosci = {
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV};
csosci.sampleOrderType = VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV;
csosci.customSampleOrderCount = 1;
using std::vector;
struct TestCase {
const VkCoarseSampleOrderCustomNV *order;
vector<std::string> vuids;
};
vector<TestCase> test_cases = {
{&sampOrdBadShadingRate, {"VUID-VkCoarseSampleOrderCustomNV-shadingRate-02073"}},
{&sampOrdBadSampleCount,
{"VUID-VkCoarseSampleOrderCustomNV-sampleCount-02074", "VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}},
{&sampOrdBadSampleLocationCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}},
{&sampOrdDuplicateLocations, {"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}},
{&sampOrdOutOfRangeLocations,
{"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077", "VUID-VkCoarseSampleLocationNV-pixelX-02078",
"VUID-VkCoarseSampleLocationNV-pixelY-02079", "VUID-VkCoarseSampleLocationNV-sample-02080"}},
{&sampOrdTooLargeSampleLocationCount,
{"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02076",
"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}},
{&sampOrdGood, {}},
};
for (const auto &test_case : test_cases) {
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.pNext = &csosci;
csosci.pCustomSampleOrders = test_case.order;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.vuids);
}
// Test vk::CmdSetCoarseSampleOrderNV errors
auto vkCmdSetCoarseSampleOrderNV =
(PFN_vkCmdSetCoarseSampleOrderNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetCoarseSampleOrderNV");
for (const auto &test_case : test_cases) {
for (uint32_t i = 0; i < test_case.vuids.size(); ++i) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, test_case.vuids[i]);
}
vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, 1, test_case.order);
if (test_case.vuids.size()) {
m_errorMonitor->VerifyFound();
} else {
m_errorMonitor->VerifyNotFound();
}
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetCoarseSampleOrderNV-sampleOrderType-02081");
vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV, 1, &sampOrdGood);
m_errorMonitor->VerifyFound();
}
m_commandBuffer->end();
vk::DestroyImageView(m_device->device(), view, NULL);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include "android_ndk_types.h"
TEST_F(VkLayerTest, AndroidHardwareBufferImageCreate) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer image create info.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkImage img = VK_NULL_HANDLE;
auto reset_img = [&img, dev]() {
if (VK_NULL_HANDLE != img) vk::DestroyImage(dev, img, NULL);
img = VK_NULL_HANDLE;
};
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = nullptr;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_UNDEFINED;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
// undefined format
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-01975");
// Various extra errors for having VK_FORMAT_UNDEFINED without VkExternalFormatANDROID
m_errorMonitor->SetUnexpectedError("VUID_Undefined");
m_errorMonitor->SetUnexpectedError("UNASSIGNED-CoreValidation-Image-FormatNotSupported");
m_errorMonitor->SetUnexpectedError("VUID-VkImageCreateInfo-imageCreateMaxMipLevels-02251");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
// also undefined format
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = 0;
ici.pNext = &efa;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-01975");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
// undefined format with an unknown external format
efa.externalFormat = 0xBADC0DE;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkExternalFormatANDROID-externalFormat-01894");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
AHardwareBuffer *ahb;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
// Allocate an AHardwareBuffer
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Retrieve it's properties to make it's external format 'known' (AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM)
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {};
ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_fmt_props;
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
pfn_GetAHBProps(dev, ahb, &ahb_props);
// a defined image format with a non-zero external format
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
efa.externalFormat = ahb_fmt_props.externalFormat;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-01974");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.format = VK_FORMAT_UNDEFINED;
// external format while MUTABLE
ici.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02396");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.flags = 0;
// external format while usage other than SAMPLED
ici.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02397");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
// external format while tiline other than OPTIMAL
ici.tiling = VK_IMAGE_TILING_LINEAR;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02398");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
// imageType
VkExternalMemoryImageCreateInfo emici = {};
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
ici.pNext = &emici; // remove efa from chain, insert emici
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
ici.imageType = VK_IMAGE_TYPE_3D;
ici.extent = {64, 64, 64};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02393");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
// wrong mipLevels
ici.imageType = VK_IMAGE_TYPE_2D;
ici.extent = {64, 64, 1};
ici.mipLevels = 6; // should be 7
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02394");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
}
TEST_F(VkLayerTest, AndroidHardwareBufferFetchUnboundImageInfo) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer retreive image properties while memory unbound.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkImage img = VK_NULL_HANDLE;
auto reset_img = [&img, dev]() {
if (VK_NULL_HANDLE != img) vk::DestroyImage(dev, img, NULL);
img = VK_NULL_HANDLE;
};
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = nullptr;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_LINEAR;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
VkExternalMemoryImageCreateInfo emici = {};
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
ici.pNext = &emici;
m_errorMonitor->ExpectSuccess();
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyNotFound();
// attempt to fetch layout from unbound image
VkImageSubresource sub_rsrc = {};
sub_rsrc.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkSubresourceLayout sub_layout = {};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetImageSubresourceLayout-image-01895");
vk::GetImageSubresourceLayout(dev, img, &sub_rsrc, &sub_layout);
m_errorMonitor->VerifyFound();
// attempt to get memory reqs from unbound image
VkImageMemoryRequirementsInfo2 imri = {};
imri.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
imri.image = img;
VkMemoryRequirements2 mem_reqs = {};
mem_reqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageMemoryRequirementsInfo2-image-01897");
vk::GetImageMemoryRequirements2(dev, &imri, &mem_reqs);
m_errorMonitor->VerifyFound();
reset_img();
}
TEST_F(VkLayerTest, AndroidHardwareBufferMemoryAllocation) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer memory allocation.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkImage img = VK_NULL_HANDLE;
auto reset_img = [&img, dev]() {
if (VK_NULL_HANDLE != img) vk::DestroyImage(dev, img, NULL);
img = VK_NULL_HANDLE;
};
VkDeviceMemory mem_handle = VK_NULL_HANDLE;
auto reset_mem = [&mem_handle, dev]() {
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
mem_handle = VK_NULL_HANDLE;
};
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
// AHB structs
AHardwareBuffer *ahb = nullptr;
AHardwareBuffer_Desc ahb_desc = {};
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {};
ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_fmt_props;
VkImportAndroidHardwareBufferInfoANDROID iahbi = {};
iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
// destroy and re-acquire an AHB, and fetch it's properties
auto recreate_ahb = [&ahb, &iahbi, &ahb_desc, &ahb_props, dev, pfn_GetAHBProps]() {
if (ahb) AHardwareBuffer_release(ahb);
ahb = nullptr;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
if (ahb) {
pfn_GetAHBProps(dev, ahb, &ahb_props);
iahbi.buffer = ahb;
}
};
// Allocate an AHardwareBuffer
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
recreate_ahb();
// Create an image w/ external format
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = ahb_fmt_props.externalFormat;
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = &efa;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_UNDEFINED;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
VkResult res = vk::CreateImage(dev, &ici, NULL, &img);
ASSERT_VK_SUCCESS(res);
VkMemoryAllocateInfo mai = {};
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mai.pNext = &iahbi; // Chained import struct
mai.allocationSize = ahb_props.allocationSize;
mai.memoryTypeIndex = 32;
// Set index to match one of the bits in ahb_props
for (int i = 0; i < 32; i++) {
if (ahb_props.memoryTypeBits & (1 << i)) {
mai.memoryTypeIndex = i;
break;
}
}
ASSERT_NE(32, mai.memoryTypeIndex);
// Import w/ non-dedicated memory allocation
// Import requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02384");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Allocation size mismatch
ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
ahb_desc.height = 1;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize + 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-allocationSize-02383");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
mai.allocationSize = ahb_props.allocationSize;
reset_mem();
// memoryTypeIndex mismatch
mai.memoryTypeIndex++;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
mai.memoryTypeIndex--;
reset_mem();
// Insert dedicated image memory allocation to mai chain
VkMemoryDedicatedAllocateInfo mdai = {};
mdai.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
mdai.image = img;
mdai.buffer = VK_NULL_HANDLE;
mdai.pNext = mai.pNext;
mai.pNext = &mdai;
// Dedicated allocation with unmatched usage bits for Color
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER;
ahb_desc.height = 64;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02390");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Dedicated allocation with unmatched usage bits for Depth/Stencil
ahb_desc.format = AHARDWAREBUFFER_FORMAT_S8_UINT;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER;
ahb_desc.height = 64;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02390");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Dedicated allocation with incomplete mip chain
reset_img();
ici.mipLevels = 2;
vk::CreateImage(dev, &ici, NULL, &img);
mdai.image = img;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE;
recreate_ahb();
if (ahb) {
mai.allocationSize = ahb_props.allocationSize;
for (int i = 0; i < 32; i++) {
if (ahb_props.memoryTypeBits & (1 << i)) {
mai.memoryTypeIndex = i;
break;
}
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02389");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
} else {
// ERROR: AHardwareBuffer_allocate() with MIPMAP_COMPLETE fails. It returns -12, NO_MEMORY.
// The problem seems to happen in Pixel 2, not Pixel 3.
printf("%s AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE not supported, skipping tests\n", kSkipPrefix);
return;
}
// Dedicated allocation with mis-matched dimension
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.height = 32;
ahb_desc.width = 128;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02388");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Dedicated allocation with mis-matched VkFormat
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.height = 64;
ahb_desc.width = 64;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize;
ici.mipLevels = 1;
ici.format = VK_FORMAT_B8G8R8A8_UNORM;
ici.pNext = NULL;
VkImage img2;
vk::CreateImage(dev, &ici, NULL, &img2);
mdai.image = img2;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02387");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
vk::DestroyImage(dev, img2, NULL);
mdai.image = img;
reset_mem();
// Missing required ahb usage
ahb_desc.usage = AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884");
recreate_ahb();
m_errorMonitor->VerifyFound();
// Dedicated allocation with missing usage bits
// Setting up this test also triggers a slew of others
mai.allocationSize = ahb_props.allocationSize + 1;
mai.memoryTypeIndex = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02390");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-allocationSize-02383");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02386");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Non-import allocation - replace import struct in chain with export struct
VkExportMemoryAllocateInfo emai = {};
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
mai.pNext = &emai;
emai.pNext = &mdai; // still dedicated
mdai.pNext = nullptr;
// Export with allocation size non-zero
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryDedicatedAllocateInfo-image-02964");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-01874");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
AHardwareBuffer_release(ahb);
reset_mem();
reset_img();
}
TEST_F(VkLayerTest, AndroidHardwareBufferCreateYCbCrSampler) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer YCbCr sampler creation.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
// Enable Ycbcr Conversion Features
VkPhysicalDeviceSamplerYcbcrConversionFeatures ycbcr_features = {};
ycbcr_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
ycbcr_features.samplerYcbcrConversion = VK_TRUE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &ycbcr_features));
VkDevice dev = m_device->device();
VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE;
VkSamplerYcbcrConversionCreateInfo sycci = {};
sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
sycci.format = VK_FORMAT_UNDEFINED;
sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061");
m_errorMonitor->SetUnexpectedError("VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651");
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
m_errorMonitor->VerifyFound();
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
sycci.format = VK_FORMAT_R8G8B8A8_UNORM;
sycci.pNext = &efa;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904");
m_errorMonitor->SetUnexpectedError("VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651");
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, AndroidHardwareBufferPhysDevImageFormatProp2) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer GetPhysicalDeviceImageFormatProperties.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping test\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
if ((DeviceValidationVersion() < VK_API_VERSION_1_1) &&
!InstanceExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s %s extension not supported, skipping test\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
VkImageFormatProperties2 ifp = {};
ifp.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
VkPhysicalDeviceImageFormatInfo2 pdifi = {};
pdifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
pdifi.format = VK_FORMAT_R8G8B8A8_UNORM;
pdifi.tiling = VK_IMAGE_TILING_OPTIMAL;
pdifi.type = VK_IMAGE_TYPE_2D;
pdifi.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkAndroidHardwareBufferUsageANDROID ahbu = {};
ahbu.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID;
ahbu.androidHardwareBufferUsage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ifp.pNext = &ahbu;
// AHB_usage chained to input without a matching external image format struc chained to output
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868");
vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp);
m_errorMonitor->VerifyFound();
// output struct chained, but does not include VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID usage
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
pdifi.pNext = &pdeifi;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868");
vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp);
m_errorMonitor->VerifyFound();
}
#if DISABLEUNTILAHBWORKS
TEST_F(VkLayerTest, AndroidHardwareBufferCreateImageView) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer image view creation.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
// Allocate an AHB and fetch its properties
AHardwareBuffer *ahb = nullptr;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Retrieve AHB properties to make it's external format 'known'
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {};
ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_fmt_props;
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
pfn_GetAHBProps(dev, ahb, &ahb_props);
AHardwareBuffer_release(ahb);
VkExternalMemoryImageCreateInfo emici = {};
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
emici.pNext = nullptr;
emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
// Give image an external format
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.pNext = (void *)&emici;
efa.externalFormat = ahb_fmt_props.externalFormat;
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 1;
ahb_desc.layers = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Create another VkExternalFormatANDROID for test VUID-VkImageViewCreateInfo-image-02400
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props_Ycbcr = {};
ahb_fmt_props_Ycbcr.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props_Ycbcr = {};
ahb_props_Ycbcr.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props_Ycbcr.pNext = &ahb_fmt_props_Ycbcr;
pfn_GetAHBProps(dev, ahb, &ahb_props_Ycbcr);
AHardwareBuffer_release(ahb);
VkExternalFormatANDROID efa_Ycbcr = {};
efa_Ycbcr.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa_Ycbcr.externalFormat = ahb_fmt_props_Ycbcr.externalFormat;
// Need to make sure format has sample bit needed for image usage
if ((ahb_fmt_props_Ycbcr.formatFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == 0) {
printf("%s VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT feature bit not supported for format %" PRIu64 ".", kSkipPrefix,
ahb_fmt_props_Ycbcr.externalFormat);
return;
}
// Create the image
VkImage img = VK_NULL_HANDLE;
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = &efa;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_UNDEFINED;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
vk::CreateImage(dev, &ici, NULL, &img);
// Set up memory allocation
VkDeviceMemory img_mem = VK_NULL_HANDLE;
VkMemoryAllocateInfo mai = {};
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mai.allocationSize = 64 * 64 * 4;
mai.memoryTypeIndex = 0;
vk::AllocateMemory(dev, &mai, NULL, &img_mem);
// It shouldn't use vk::GetImageMemoryRequirements for imported AndroidHardwareBuffer when memory isn't bound yet
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetImageMemoryRequirements-image-04004");
VkMemoryRequirements img_mem_reqs = {};
vk::GetImageMemoryRequirements(m_device->device(), img, &img_mem_reqs);
m_errorMonitor->VerifyFound();
vk::BindImageMemory(dev, img, img_mem, 0);
// Bind image to memory
vk::DestroyImage(dev, img, NULL);
vk::FreeMemory(dev, img_mem, NULL);
vk::CreateImage(dev, &ici, NULL, &img);
vk::AllocateMemory(dev, &mai, NULL, &img_mem);
vk::BindImageMemory(dev, img, img_mem, 0);
// Create a YCbCr conversion, with different external format, chain to view
VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE;
VkSamplerYcbcrConversionCreateInfo sycci = {};
sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
sycci.pNext = &efa_Ycbcr;
sycci.format = VK_FORMAT_UNDEFINED;
sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
VkSamplerYcbcrConversionInfo syci = {};
syci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
syci.conversion = ycbcr_conv;
// Create a view
VkImageView image_view = VK_NULL_HANDLE;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.pNext = &syci;
ivci.image = img;
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_UNDEFINED;
ivci.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
auto reset_view = [&image_view, dev]() {
if (VK_NULL_HANDLE != image_view) vk::DestroyImageView(dev, image_view, NULL);
image_view = VK_NULL_HANDLE;
};
// Up to this point, no errors expected
m_errorMonitor->VerifyNotFound();
// Chained ycbcr conversion has different (external) format than image
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02400");
// Also causes "unsupported format" - should be removed in future spec update
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-None-02273");
vk::CreateImageView(dev, &ivci, NULL, &image_view);
m_errorMonitor->VerifyFound();
reset_view();
vk::DestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL);
sycci.pNext = &efa;
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
syci.conversion = ycbcr_conv;
// View component swizzle not IDENTITY
ivci.components.r = VK_COMPONENT_SWIZZLE_B;
ivci.components.b = VK_COMPONENT_SWIZZLE_R;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02401");
// Also causes "unsupported format" - should be removed in future spec update
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-None-02273");
vk::CreateImageView(dev, &ivci, NULL, &image_view);
m_errorMonitor->VerifyFound();
reset_view();
ivci.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
ivci.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
// View with external format, when format is not UNDEFINED
ivci.format = VK_FORMAT_R5G6B5_UNORM_PACK16;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02399");
// Also causes "view format different from image format"
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-01762");
vk::CreateImageView(dev, &ivci, NULL, &image_view);
m_errorMonitor->VerifyFound();
reset_view();
vk::DestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL);
vk::DestroyImageView(dev, image_view, NULL);
vk::DestroyImage(dev, img, NULL);
vk::FreeMemory(dev, img_mem, NULL);
}
#endif // DISABLEUNTILAHBWORKS
TEST_F(VkLayerTest, AndroidHardwareBufferImportBuffer) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer import as buffer.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkDeviceMemory mem_handle = VK_NULL_HANDLE;
auto reset_mem = [&mem_handle, dev]() {
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
mem_handle = VK_NULL_HANDLE;
};
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
// AHB structs
AHardwareBuffer *ahb = nullptr;
AHardwareBuffer_Desc ahb_desc = {};
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
VkImportAndroidHardwareBufferInfoANDROID iahbi = {};
iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
// Allocate an AHardwareBuffer
ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_SENSOR_DIRECT_DATA; // non USAGE_GPU_*
ahb_desc.width = 512;
ahb_desc.height = 1;
ahb_desc.layers = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
m_errorMonitor->SetUnexpectedError("VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884");
pfn_GetAHBProps(dev, ahb, &ahb_props);
iahbi.buffer = ahb;
// Create export and import buffers
VkExternalMemoryBufferCreateInfo ext_buf_info = {};
ext_buf_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR;
ext_buf_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.pNext = &ext_buf_info;
bci.size = ahb_props.allocationSize;
bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
VkBuffer buf = VK_NULL_HANDLE;
vk::CreateBuffer(dev, &bci, NULL, &buf);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(dev, buf, &mem_reqs);
// Allocation info
VkMemoryAllocateInfo mai = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, mem_reqs, 0);
mai.pNext = &iahbi; // Chained import struct
VkPhysicalDeviceMemoryProperties memory_info;
vk::GetPhysicalDeviceMemoryProperties(gpu(), &memory_info);
unsigned int i;
for (i = 0; i < memory_info.memoryTypeCount; i++) {
if ((ahb_props.memoryTypeBits & (1 << i))) {
mai.memoryTypeIndex = i;
break;
}
}
if (i >= memory_info.memoryTypeCount) {
printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix);
AHardwareBuffer_release(ahb);
reset_mem();
vk::DestroyBuffer(dev, buf, NULL);
return;
}
// Import as buffer requires usage AHB_USAGE_GPU_DATA_BUFFER
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881");
// Also causes "non-dedicated allocation format/usage" error
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02384");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
AHardwareBuffer_release(ahb);
reset_mem();
vk::DestroyBuffer(dev, buf, NULL);
}
TEST_F(VkLayerTest, AndroidHardwareBufferExporttBuffer) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer export memory as AHB.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkDeviceMemory mem_handle = VK_NULL_HANDLE;
// Allocate device memory, no linked export struct indicating AHB handle type
VkMemoryAllocateInfo mai = {};
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mai.allocationSize = 65536;
mai.memoryTypeIndex = 0;
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
PFN_vkGetMemoryAndroidHardwareBufferANDROID pfn_GetMemAHB =
(PFN_vkGetMemoryAndroidHardwareBufferANDROID)vk::GetDeviceProcAddr(dev, "vkGetMemoryAndroidHardwareBufferANDROID");
ASSERT_TRUE(pfn_GetMemAHB != nullptr);
VkMemoryGetAndroidHardwareBufferInfoANDROID mgahbi = {};
mgahbi.sType = VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
mgahbi.memory = mem_handle;
AHardwareBuffer *ahb = nullptr;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882");
pfn_GetMemAHB(dev, &mgahbi, &ahb);
m_errorMonitor->VerifyFound();
if (ahb) AHardwareBuffer_release(ahb);
ahb = nullptr;
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
mem_handle = VK_NULL_HANDLE;
// Add an export struct with AHB handle type to allocation info
VkExportMemoryAllocateInfo emai = {};
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
mai.pNext = &emai;
// Create an image, do not bind memory
VkImage img = VK_NULL_HANDLE;
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {128, 128, 1};
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
vk::CreateImage(dev, &ici, NULL, &img);
ASSERT_TRUE(VK_NULL_HANDLE != img);
// Add image to allocation chain as dedicated info, re-allocate
VkMemoryDedicatedAllocateInfo mdai = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO};
mdai.image = img;
emai.pNext = &mdai;
mai.allocationSize = 0;
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
mgahbi.memory = mem_handle;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883");
pfn_GetMemAHB(dev, &mgahbi, &ahb);
m_errorMonitor->VerifyFound();
if (ahb) AHardwareBuffer_release(ahb);
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
vk::DestroyImage(dev, img, NULL);
}
TEST_F(VkLayerTest, AndroidHardwareBufferInvalidBindBufferMemory) {
TEST_DESCRIPTION("Validate binding AndroidHardwareBuffer VkBuffer act same as non-AHB buffers.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Allocate an AHardwareBuffer
AHardwareBuffer *ahb;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
ahb_desc.width = 64;
ahb_desc.height = 1;
ahb_desc.layers = 1;
ahb_desc.stride = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
VkExternalMemoryBufferCreateInfo ext_buf_info = {};
ext_buf_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR;
ext_buf_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = &ext_buf_info;
buffer_create_info.size = 1 << 20; // 1 MB
buffer_create_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
VkBuffer buffer = VK_NULL_HANDLE;
vk::CreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer);
// Try to get memory requirements prior to binding memory
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkImportAndroidHardwareBufferInfoANDROID import_ahb_Info = {};
import_ahb_Info.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
import_ahb_Info.pNext = nullptr;
import_ahb_Info.buffer = ahb;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = &import_ahb_Info;
memory_info.allocationSize = mem_reqs.size + mem_reqs.alignment; // save room for offset
bool has_memtype = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &memory_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
if (!has_memtype) {
printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix);
AHardwareBuffer_release(ahb);
vk::DestroyBuffer(m_device->device(), buffer, nullptr);
return;
}
VkDeviceMemory memory = VK_NULL_HANDLE;
VkResult result = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &memory);
if ((memory == VK_NULL_HANDLE) || (result != VK_SUCCESS)) {
printf("%s This test failed to allocate memory for importing\n", kSkipPrefix);
return;
}
if (mem_reqs.alignment > 1) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindBufferMemory-memoryOffset-01036");
vk::BindBufferMemory(device(), buffer, memory, 1);
m_errorMonitor->VerifyFound();
}
VkDeviceSize buffer_offset = (mem_reqs.size - 1) & ~(mem_reqs.alignment - 1);
if (buffer_offset > 0) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindBufferMemory-size-01037");
vk::BindBufferMemory(device(), buffer, memory, buffer_offset);
m_errorMonitor->VerifyFound();
}
vk::DestroyBuffer(m_device->device(), buffer, nullptr);
vk::FreeMemory(m_device->device(), memory, nullptr);
}
TEST_F(VkLayerTest, AndroidHardwareBufferImportBufferHandleType) {
TEST_DESCRIPTION("Don't use proper resource handleType for import buffer");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(m_device->device(),
"vkGetAndroidHardwareBufferPropertiesANDROID");
PFN_vkBindBufferMemory2KHR vkBindBufferMemory2Function =
(PFN_vkBindBufferMemory2KHR)vk::GetDeviceProcAddr(m_device->handle(), "vkBindBufferMemory2KHR");
m_errorMonitor->ExpectSuccess();
AHardwareBuffer *ahb;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
ahb_desc.width = 64;
ahb_desc.height = 1;
ahb_desc.layers = 1;
ahb_desc.stride = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Create buffer without VkExternalMemoryBufferCreateInfo
VkBuffer buffer = VK_NULL_HANDLE;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = nullptr;
buffer_create_info.size = 512;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
vk::CreateBuffer(m_device->device(), &buffer_create_info, nullptr, &buffer);
VkImportAndroidHardwareBufferInfoANDROID import_ahb_Info = {};
import_ahb_Info.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
import_ahb_Info.pNext = nullptr;
import_ahb_Info.buffer = ahb;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = nullptr;
pfn_GetAHBProps(m_device->device(), ahb, &ahb_props);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &import_ahb_Info;
memory_allocate_info.allocationSize = ahb_props.allocationSize;
// driver won't expose correct memoryType since resource was not created as an import operation
// so just need any valid memory type returned from GetAHBInfo
for (int i = 0; i < 32; i++) {
if (ahb_props.memoryTypeBits & (1 << i)) {
memory_allocate_info.memoryTypeIndex = i;
break;
}
}
VkDeviceMemory memory;
vk::AllocateMemory(m_device->device(), &memory_allocate_info, nullptr, &memory);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindBufferMemory-memory-02986");
m_errorMonitor->SetUnexpectedError("VUID-vkBindBufferMemory-memory-01035");
vk::BindBufferMemory(m_device->device(), buffer, memory, 0);
m_errorMonitor->VerifyFound();
VkBindBufferMemoryInfo bind_buffer_info = {};
bind_buffer_info.sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO;
bind_buffer_info.pNext = nullptr;
bind_buffer_info.buffer = buffer;
bind_buffer_info.memory = memory;
bind_buffer_info.memoryOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindBufferMemoryInfo-memory-02986");
m_errorMonitor->SetUnexpectedError("VUID-VkBindBufferMemoryInfo-memory-01035");
vkBindBufferMemory2Function(m_device->device(), 1, &bind_buffer_info);
m_errorMonitor->VerifyFound();
vk::DestroyBuffer(m_device->device(), buffer, nullptr);
vk::FreeMemory(m_device->device(), memory, nullptr);
}
TEST_F(VkLayerTest, AndroidHardwareBufferImportImageHandleType) {
TEST_DESCRIPTION("Don't use proper resource handleType for import image");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kGalaxyS10)) {
printf("%s This test should not run on Galaxy S10\n", kSkipPrefix);
return;
}
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(m_device->device(),
"vkGetAndroidHardwareBufferPropertiesANDROID");
PFN_vkBindImageMemory2KHR vkBindImageMemory2Function =
(PFN_vkBindImageMemory2KHR)vk::GetDeviceProcAddr(m_device->handle(), "vkBindImageMemory2KHR");
m_errorMonitor->ExpectSuccess();
AHardwareBuffer *ahb;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
ahb_desc.stride = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Create buffer without VkExternalMemoryImageCreateInfo
VkImage image = VK_NULL_HANDLE;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = nullptr;
image_create_info.flags = 0;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent = {64, 64, 1};
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
vk::CreateImage(m_device->device(), &image_create_info, nullptr, &image);
VkMemoryDedicatedAllocateInfo memory_dedicated_info = {};
memory_dedicated_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
memory_dedicated_info.pNext = nullptr;
memory_dedicated_info.image = image;
memory_dedicated_info.buffer = VK_NULL_HANDLE;
VkImportAndroidHardwareBufferInfoANDROID import_ahb_Info = {};
import_ahb_Info.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
import_ahb_Info.pNext = &memory_dedicated_info;
import_ahb_Info.buffer = ahb;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = nullptr;
pfn_GetAHBProps(m_device->device(), ahb, &ahb_props);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &import_ahb_Info;
memory_allocate_info.allocationSize = ahb_props.allocationSize;
// driver won't expose correct memoryType since resource was not created as an import operation
// so just need any valid memory type returned from GetAHBInfo
for (int i = 0; i < 32; i++) {
if (ahb_props.memoryTypeBits & (1 << i)) {
memory_allocate_info.memoryTypeIndex = i;
break;
}
}
VkDeviceMemory memory;
vk::AllocateMemory(m_device->device(), &memory_allocate_info, nullptr, &memory);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindImageMemory-memory-02990");
m_errorMonitor->SetUnexpectedError("VUID-vkBindImageMemory-memory-01047");
m_errorMonitor->SetUnexpectedError("VUID-vkBindImageMemory-size-01049");
vk::BindImageMemory(m_device->device(), image, memory, 0);
m_errorMonitor->VerifyFound();
VkBindImageMemoryInfo bind_image_info = {};
bind_image_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_image_info.pNext = nullptr;
bind_image_info.image = image;
bind_image_info.memory = memory;
bind_image_info.memoryOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindImageMemoryInfo-memory-02990");
m_errorMonitor->SetUnexpectedError("VUID-VkBindImageMemoryInfo-pNext-01617");
m_errorMonitor->SetUnexpectedError("VUID-VkBindImageMemoryInfo-pNext-01615");
vkBindImageMemory2Function(m_device->device(), 1, &bind_image_info);
m_errorMonitor->VerifyFound();
vk::DestroyImage(m_device->device(), image, nullptr);
vk::FreeMemory(m_device->device(), memory, nullptr);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
TEST_F(VkLayerTest, ValidateStride) {
TEST_DESCRIPTION("Validate Stride.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
if (IsPlatform(kPixelC)) {
printf("%s This test should not run on Pixel C\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, query_pool, 0);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
char data_space;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-flags-02827");
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, 1, VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-flags-00815");
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, 1,
(VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
m_errorMonitor->VerifyFound();
char data_space4[4] = "";
m_errorMonitor->ExpectSuccess();
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space4), &data_space4, 4, VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyNotFound();
char data_space8[8] = "";
m_errorMonitor->ExpectSuccess();
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space8), &data_space8, 8,
(VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
m_errorMonitor->VerifyNotFound();
uint32_t qfi = 0;
VkBufferCreateInfo buff_create_info = {};
buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_create_info.size = 128;
buff_create_info.usage =
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
buff_create_info.queueFamilyIndexCount = 1;
buff_create_info.pQueueFamilyIndices = &qfi;
VkBufferObj buffer;
buffer.init(*m_device, buff_create_info);
m_commandBuffer->reset();
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-flags-00822");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 1, 1, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-flags-00823");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 1, 1, VK_QUERY_RESULT_64_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 4, 4, 0);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess();
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 8, 8, VK_QUERY_RESULT_64_BIT);
m_errorMonitor->VerifyNotFound();
if (m_device->phy().features().multiDrawIndirect) {
auto buffer_memory_barrier = buffer.buffer_memory_barrier(
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT, 0, VK_WHOLE_SIZE);
m_commandBuffer->PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 1,
&buffer_memory_barrier, 0, nullptr);
CreatePipelineHelper helper(*this);
helper.InitInfo();
helper.InitState();
helper.CreateGraphicsPipeline();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirect-drawCount-00476");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirect-drawCount-00488");
vk::CmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 100, 2);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
vk::CmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 2, 24);
m_errorMonitor->VerifyNotFound();
vk::CmdBindIndexBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_INDEX_TYPE_UINT16);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirect-drawCount-00528");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirect-drawCount-00540");
vk::CmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 100, 2);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
vk::CmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 2, 24);
m_errorMonitor->VerifyNotFound();
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
} else {
printf("%s Test requires unsupported multiDrawIndirect feature. Skipped.\n", kSkipPrefix);
}
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, WarningSwapchainCreateInfoPreTransform) {
TEST_DESCRIPTION("Print warning when preTransform doesn't match curretTransform");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-CoreValidation-SwapchainPreTransform");
m_errorMonitor->SetUnexpectedError("VUID-VkSwapchainCreateInfoKHR-preTransform-01279");
InitSwapchain(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR);
m_errorMonitor->VerifyFound();
DestroySwapchain();
}
TEST_F(VkLayerTest, ValidateGeometryNV) {
TEST_DESCRIPTION("Validate acceleration structure geometries.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
VkBufferObj vbo;
vbo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferObj ibo;
ibo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferObj tbo;
tbo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferObj aabbbo;
aabbbo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferCreateInfo unbound_buffer_ci = {};
unbound_buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
unbound_buffer_ci.size = 1024;
unbound_buffer_ci.usage = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV;
VkBufferObj unbound_buffer;
unbound_buffer.init_no_mem(*m_device, unbound_buffer_ci);
const std::vector<float> vertices = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 0.0f};
const std::vector<uint32_t> indicies = {0, 1, 2};
const std::vector<float> aabbs = {0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f};
const std::vector<float> transforms = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f};
uint8_t *mapped_vbo_buffer_data = (uint8_t *)vbo.memory().map();
std::memcpy(mapped_vbo_buffer_data, (uint8_t *)vertices.data(), sizeof(float) * vertices.size());
vbo.memory().unmap();
uint8_t *mapped_ibo_buffer_data = (uint8_t *)ibo.memory().map();
std::memcpy(mapped_ibo_buffer_data, (uint8_t *)indicies.data(), sizeof(uint32_t) * indicies.size());
ibo.memory().unmap();
uint8_t *mapped_tbo_buffer_data = (uint8_t *)tbo.memory().map();
std::memcpy(mapped_tbo_buffer_data, (uint8_t *)transforms.data(), sizeof(float) * transforms.size());
tbo.memory().unmap();
uint8_t *mapped_aabbbo_buffer_data = (uint8_t *)aabbbo.memory().map();
std::memcpy(mapped_aabbbo_buffer_data, (uint8_t *)aabbs.data(), sizeof(float) * aabbs.size());
aabbbo.memory().unmap();
VkGeometryNV valid_geometry_triangles = {};
valid_geometry_triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
valid_geometry_triangles.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_NV;
valid_geometry_triangles.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
valid_geometry_triangles.geometry.triangles.vertexData = vbo.handle();
valid_geometry_triangles.geometry.triangles.vertexOffset = 0;
valid_geometry_triangles.geometry.triangles.vertexCount = 3;
valid_geometry_triangles.geometry.triangles.vertexStride = 12;
valid_geometry_triangles.geometry.triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT;
valid_geometry_triangles.geometry.triangles.indexData = ibo.handle();
valid_geometry_triangles.geometry.triangles.indexOffset = 0;
valid_geometry_triangles.geometry.triangles.indexCount = 3;
valid_geometry_triangles.geometry.triangles.indexType = VK_INDEX_TYPE_UINT32;
valid_geometry_triangles.geometry.triangles.transformData = tbo.handle();
valid_geometry_triangles.geometry.triangles.transformOffset = 0;
valid_geometry_triangles.geometry.aabbs = {};
valid_geometry_triangles.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
VkGeometryNV valid_geometry_aabbs = {};
valid_geometry_aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
valid_geometry_aabbs.geometryType = VK_GEOMETRY_TYPE_AABBS_NV;
valid_geometry_aabbs.geometry.triangles = {};
valid_geometry_aabbs.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
valid_geometry_aabbs.geometry.aabbs = {};
valid_geometry_aabbs.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
valid_geometry_aabbs.geometry.aabbs.aabbData = aabbbo.handle();
valid_geometry_aabbs.geometry.aabbs.numAABBs = 1;
valid_geometry_aabbs.geometry.aabbs.offset = 0;
valid_geometry_aabbs.geometry.aabbs.stride = 24;
PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV = reinterpret_cast<PFN_vkCreateAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCreateAccelerationStructureNV"));
assert(vkCreateAccelerationStructureNV != nullptr);
const auto GetCreateInfo = [](const VkGeometryNV &geometry) {
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
as_create_info.info.instanceCount = 0;
as_create_info.info.geometryCount = 1;
as_create_info.info.pGeometries = &geometry;
return as_create_info;
};
VkAccelerationStructureNV as;
// Invalid vertex format.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexFormat = VK_FORMAT_R64_UINT;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexFormat-02430");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex offset - not multiple of component size.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexOffset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexOffset-02429");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexOffset = 12 * 1024;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexOffset-02428");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex buffer - no such buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexData = VkBuffer(123456789);
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexData-parameter");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
#if 0
// XXX Subtest disabled because this is the wrong VUID.
// No VUIDs currently exist to require memory is bound (spec bug).
// Invalid vertex buffer - no memory bound.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexData = unbound_buffer.handle();
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexOffset-02428");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
#endif
// Invalid index offset - not multiple of index size.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexOffset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexOffset-02432");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexOffset = 2048;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexOffset-02431");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index count - must be 0 if type is VK_INDEX_TYPE_NONE_NV.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexType = VK_INDEX_TYPE_NONE_NV;
geometry.geometry.triangles.indexData = VK_NULL_HANDLE;
geometry.geometry.triangles.indexCount = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexCount-02436");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index data - must be VK_NULL_HANDLE if type is VK_INDEX_TYPE_NONE_NV.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexType = VK_INDEX_TYPE_NONE_NV;
geometry.geometry.triangles.indexData = ibo.handle();
geometry.geometry.triangles.indexCount = 0;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexData-02434");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid transform offset - not multiple of 16.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.transformOffset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-transformOffset-02438");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid transform offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.transformOffset = 2048;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-transformOffset-02437");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid aabb offset - not multiple of 8.
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.offset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryAABBNV-offset-02440");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid aabb offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.offset = 8 * 1024;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryAABBNV-offset-02439");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid aabb stride - not multiple of 8.
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.stride = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryAABBNV-stride-02441");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// geometryType must be VK_GEOMETRY_TYPE_TRIANGLES_NV or VK_GEOMETRY_TYPE_AABBS_NV
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.stride = 1;
geometry.geometryType = VK_GEOMETRY_TYPE_INSTANCES_KHR;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryNV-geometryType-03503");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateCreateAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure creation.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV = reinterpret_cast<PFN_vkCreateAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCreateAccelerationStructureNV"));
assert(vkCreateAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
VkAccelerationStructureNV as = VK_NULL_HANDLE;
// Top level can not have geometry
{
VkAccelerationStructureCreateInfoNV bad_top_level_create_info = as_create_info;
bad_top_level_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
bad_top_level_create_info.info.instanceCount = 0;
bad_top_level_create_info.info.geometryCount = 1;
bad_top_level_create_info.info.pGeometries = &geometry;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-type-02425");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_top_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Bot level can not have instances
{
VkAccelerationStructureCreateInfoNV bad_bot_level_create_info = as_create_info;
bad_bot_level_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bad_bot_level_create_info.info.instanceCount = 1;
bad_bot_level_create_info.info.geometryCount = 0;
bad_bot_level_create_info.info.pGeometries = nullptr;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-type-02426");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_bot_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not prefer both fast trace and fast build
{
VkAccelerationStructureCreateInfoNV bad_flags_level_create_info = as_create_info;
bad_flags_level_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bad_flags_level_create_info.info.instanceCount = 0;
bad_flags_level_create_info.info.geometryCount = 1;
bad_flags_level_create_info.info.pGeometries = &geometry;
bad_flags_level_create_info.info.flags =
VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV | VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-flags-02592");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_flags_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not have geometry or instance for compacting
{
VkAccelerationStructureCreateInfoNV bad_compacting_as_create_info = as_create_info;
bad_compacting_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bad_compacting_as_create_info.info.instanceCount = 0;
bad_compacting_as_create_info.info.geometryCount = 1;
bad_compacting_as_create_info.info.pGeometries = &geometry;
bad_compacting_as_create_info.info.flags = 0;
bad_compacting_as_create_info.compactedSize = 1024;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureCreateInfoNV-compactedSize-02421");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_compacting_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not mix different geometry types into single bottom level acceleration structure
{
VkGeometryNV aabb_geometry = {};
aabb_geometry.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
aabb_geometry.geometryType = VK_GEOMETRY_TYPE_AABBS_NV;
aabb_geometry.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
aabb_geometry.geometry.aabbs = {};
aabb_geometry.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
// Buffer contents do not matter for this test.
aabb_geometry.geometry.aabbs.aabbData = geometry.geometry.triangles.vertexData;
aabb_geometry.geometry.aabbs.numAABBs = 1;
aabb_geometry.geometry.aabbs.offset = 0;
aabb_geometry.geometry.aabbs.stride = 24;
std::vector<VkGeometryNV> geometries = {geometry, aabb_geometry};
VkAccelerationStructureCreateInfoNV mix_geometry_types_as_create_info = as_create_info;
mix_geometry_types_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
mix_geometry_types_as_create_info.info.instanceCount = 0;
mix_geometry_types_as_create_info.info.geometryCount = static_cast<uint32_t>(geometries.size());
mix_geometry_types_as_create_info.info.pGeometries = geometries.data();
mix_geometry_types_as_create_info.info.flags = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAccelerationStructureInfoNV-type-02786");
vkCreateAccelerationStructureNV(m_device->handle(), &mix_geometry_types_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateCreateAccelerationStructureKHR) {
TEST_DESCRIPTION("Validate acceleration structure creation.");
if (!InitFrameworkForRayTracingTest(this, true, m_instance_extension_names, m_device_extension_names, m_errorMonitor, false,
false, true)) {
return;
}
auto ray_tracing_features = lvl_init_struct<VkPhysicalDeviceRayTracingFeaturesKHR>();
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (ray_tracing_features.rayQuery == VK_FALSE && ray_tracing_features.rayTracing == VK_FALSE) {
printf("%s Both of the required features rayQuery and rayTracing are not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &ray_tracing_features));
PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR = reinterpret_cast<PFN_vkCreateAccelerationStructureKHR>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCreateAccelerationStructureKHR"));
assert(vkCreateAccelerationStructureKHR != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
// Get an NV geometry in the helper, then pull out the bits we need for Create
VkGeometryNV geometryNV;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometryNV);
VkAccelerationStructureCreateGeometryTypeInfoKHR geometryInfo = {};
geometryInfo.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR;
geometryInfo.geometryType = geometryNV.geometryType;
geometryInfo.maxPrimitiveCount = 1024;
geometryInfo.indexType = geometryNV.geometry.triangles.indexType;
geometryInfo.maxVertexCount = 1024;
geometryInfo.vertexFormat = geometryNV.geometry.triangles.vertexFormat;
geometryInfo.allowsTransforms = VK_TRUE;
VkAccelerationStructureCreateInfoKHR as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR;
VkAccelerationStructureKHR as = VK_NULL_HANDLE;
// Top level can not have geometry
{
VkAccelerationStructureCreateInfoKHR bad_top_level_create_info = as_create_info;
bad_top_level_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
bad_top_level_create_info.maxGeometryCount = 1;
bad_top_level_create_info.pGeometryInfos = &geometryInfo;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAccelerationStructureCreateInfoKHR-type-03496");
vkCreateAccelerationStructureKHR(m_device->handle(), &bad_top_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// If type is VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR
// and compactedSize is 0, maxGeometryCount must be 1
// also tests If compactedSize is 0 then maxGeometryCount must not be 0
{
VkAccelerationStructureCreateInfoKHR bad_top_level_create_info = as_create_info;
bad_top_level_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
bad_top_level_create_info.maxGeometryCount = 0;
bad_top_level_create_info.compactedSize = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkAccelerationStructureCreateInfoKHR-compactedSize-02993");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAccelerationStructureCreateInfoKHR-type-03495");
vkCreateAccelerationStructureKHR(m_device->handle(), &bad_top_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Bot level can not have instances
{
VkAccelerationStructureCreateInfoKHR bad_bot_level_create_info = as_create_info;
bad_bot_level_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
bad_bot_level_create_info.maxGeometryCount = 1;
VkAccelerationStructureCreateGeometryTypeInfoKHR geometryInfo2 = geometryInfo;
geometryInfo2.geometryType = VK_GEOMETRY_TYPE_INSTANCES_KHR;
bad_bot_level_create_info.pGeometryInfos = &geometryInfo2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAccelerationStructureCreateInfoKHR-type-03497");
vkCreateAccelerationStructureKHR(m_device->handle(), &bad_bot_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not prefer both fast trace and fast build
{
VkAccelerationStructureCreateInfoKHR bad_flags_level_create_info = as_create_info;
bad_flags_level_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
bad_flags_level_create_info.maxGeometryCount = 1;
bad_flags_level_create_info.pGeometryInfos = &geometryInfo;
bad_flags_level_create_info.flags =
VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR | VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkAccelerationStructureCreateInfoKHR-flags-03499");
vkCreateAccelerationStructureKHR(m_device->handle(), &bad_flags_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not have geometry or instance for compacting
{
VkAccelerationStructureCreateInfoKHR bad_compacting_as_create_info = as_create_info;
bad_compacting_as_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
bad_compacting_as_create_info.maxGeometryCount = 1;
bad_compacting_as_create_info.pGeometryInfos = &geometryInfo;
bad_compacting_as_create_info.flags = 0;
bad_compacting_as_create_info.compactedSize = 1024;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkAccelerationStructureCreateInfoKHR-compactedSize-03490");
vkCreateAccelerationStructureKHR(m_device->handle(), &bad_compacting_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not mix different geometry types into single bottom level acceleration structure
{
VkAccelerationStructureCreateGeometryTypeInfoKHR aabb_geometry = {};
aabb_geometry = geometryInfo;
aabb_geometry.geometryType = VK_GEOMETRY_TYPE_AABBS_KHR;
std::vector<VkAccelerationStructureCreateGeometryTypeInfoKHR> geometries = {geometryInfo, aabb_geometry};
VkAccelerationStructureCreateInfoKHR mix_geometry_types_as_create_info = as_create_info;
mix_geometry_types_as_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
mix_geometry_types_as_create_info.maxGeometryCount = static_cast<uint32_t>(geometries.size());
mix_geometry_types_as_create_info.pGeometryInfos = geometries.data();
mix_geometry_types_as_create_info.flags = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAccelerationStructureCreateInfoKHR-type-03498");
vkCreateAccelerationStructureKHR(m_device->handle(), &mix_geometry_types_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// If geometryType is VK_GEOMETRY_TYPE_TRIANGLES_KHR, indexType must be
// VK_INDEX_TYPE_UINT16, VK_INDEX_TYPE_UINT32, or VK_INDEX_TYPE_NONE_KHR
{
VkAccelerationStructureCreateGeometryTypeInfoKHR invalid_index = geometryInfo;
invalid_index.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_KHR;
invalid_index.indexType = VK_INDEX_TYPE_UINT8_EXT;
VkAccelerationStructureCreateInfoKHR invalid_index_geometry_types_as_create_info = as_create_info;
invalid_index_geometry_types_as_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
invalid_index_geometry_types_as_create_info.pGeometryInfos = &invalid_index;
invalid_index_geometry_types_as_create_info.maxGeometryCount = 1;
invalid_index_geometry_types_as_create_info.flags = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkAccelerationStructureCreateGeometryTypeInfoKHR-geometryType-03502");
vkCreateAccelerationStructureKHR(m_device->handle(), &invalid_index_geometry_types_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// flags must be a valid combination of VkBuildAccelerationStructureFlagBitsNV
{
VkAccelerationStructureCreateInfoKHR invalid_flag = as_create_info;
invalid_flag.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
invalid_flag.flags = VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR;
invalid_flag.pGeometryInfos = &geometryInfo;
invalid_flag.maxGeometryCount = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkAccelerationStructureCreateInfoKHR-flags-parameter");
vkCreateAccelerationStructureKHR(m_device->handle(), &invalid_flag, nullptr, &as);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateBindAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure binding.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV =
reinterpret_cast<PFN_vkBindAccelerationStructureMemoryNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkBindAccelerationStructureMemoryNV"));
assert(vkBindAccelerationStructureMemoryNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
as_create_info.info.geometryCount = 1;
as_create_info.info.pGeometries = &geometry;
as_create_info.info.instanceCount = 0;
VkAccelerationStructureObj as(*m_device, as_create_info, false);
m_errorMonitor->VerifyNotFound();
VkMemoryRequirements as_memory_requirements = as.memory_requirements().memoryRequirements;
VkBindAccelerationStructureMemoryInfoNV as_bind_info = {};
as_bind_info.sType = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV;
as_bind_info.accelerationStructure = as.handle();
VkMemoryAllocateInfo as_memory_alloc = {};
as_memory_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
as_memory_alloc.allocationSize = as_memory_requirements.size;
ASSERT_TRUE(m_device->phy().set_memory_type(as_memory_requirements.memoryTypeBits, &as_memory_alloc, 0));
// Can not bind already freed memory
{
VkDeviceMemory as_memory_freed = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_freed));
vk::FreeMemory(device(), as_memory_freed, NULL);
VkBindAccelerationStructureMemoryInfoNV as_bind_info_freed = as_bind_info;
as_bind_info_freed.memory = as_memory_freed;
as_bind_info_freed.memoryOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoKHR-memory-parameter");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_freed);
m_errorMonitor->VerifyFound();
}
// Can not bind with bad alignment
if (as_memory_requirements.alignment > 1) {
VkMemoryAllocateInfo as_memory_alloc_bad_alignment = as_memory_alloc;
as_memory_alloc_bad_alignment.allocationSize += 1;
VkDeviceMemory as_memory_bad_alignment = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc_bad_alignment, NULL, &as_memory_bad_alignment));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_alignment = as_bind_info;
as_bind_info_bad_alignment.memory = as_memory_bad_alignment;
as_bind_info_bad_alignment.memoryOffset = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoKHR-memoryOffset-02594");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_alignment);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_alignment, NULL);
}
// Can not bind with offset outside the allocation
{
VkDeviceMemory as_memory_bad_offset = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_bad_offset));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_offset = as_bind_info;
as_bind_info_bad_offset.memory = as_memory_bad_offset;
as_bind_info_bad_offset.memoryOffset =
(as_memory_alloc.allocationSize + as_memory_requirements.alignment) & ~(as_memory_requirements.alignment - 1);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoKHR-memoryOffset-02451");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_offset);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_offset, NULL);
}
// Can not bind with offset that doesn't leave enough size
{
VkDeviceSize offset = (as_memory_requirements.size - 1) & ~(as_memory_requirements.alignment - 1);
if (offset > 0 && (as_memory_requirements.size < (as_memory_alloc.allocationSize - as_memory_requirements.alignment))) {
VkDeviceMemory as_memory_bad_offset = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_bad_offset));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_offset = as_bind_info;
as_bind_info_bad_offset.memory = as_memory_bad_offset;
as_bind_info_bad_offset.memoryOffset = offset;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoKHR-size-02595");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_offset);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_offset, NULL);
}
}
// Can not bind with memory that has unsupported memory type
{
VkPhysicalDeviceMemoryProperties memory_properties = {};
vk::GetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memory_properties);
uint32_t supported_memory_type_bits = as_memory_requirements.memoryTypeBits;
uint32_t unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~supported_memory_type_bits;
if (unsupported_mem_type_bits != 0) {
VkMemoryAllocateInfo as_memory_alloc_bad_type = as_memory_alloc;
ASSERT_TRUE(m_device->phy().set_memory_type(unsupported_mem_type_bits, &as_memory_alloc_bad_type, 0));
VkDeviceMemory as_memory_bad_type = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc_bad_type, NULL, &as_memory_bad_type));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_type = as_bind_info;
as_bind_info_bad_type.memory = as_memory_bad_type;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoKHR-memory-02593");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_type);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_type, NULL);
}
}
// Can not bind memory twice
{
VkAccelerationStructureObj as_twice(*m_device, as_create_info, false);
VkDeviceMemory as_memory_twice_1 = VK_NULL_HANDLE;
VkDeviceMemory as_memory_twice_2 = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_twice_1));
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_twice_2));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_twice_1 = as_bind_info;
VkBindAccelerationStructureMemoryInfoNV as_bind_info_twice_2 = as_bind_info;
as_bind_info_twice_1.accelerationStructure = as_twice.handle();
as_bind_info_twice_2.accelerationStructure = as_twice.handle();
as_bind_info_twice_1.memory = as_memory_twice_1;
as_bind_info_twice_2.memory = as_memory_twice_2;
ASSERT_VK_SUCCESS(vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_twice_1));
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"VUID-VkBindAccelerationStructureMemoryInfoKHR-accelerationStructure-02450");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_twice_2);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_twice_1, NULL);
vk::FreeMemory(device(), as_memory_twice_2, NULL);
}
}
TEST_F(VkLayerTest, ValidateWriteDescriptorSetAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure descriptor writing.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
OneOffDescriptorSet ds(m_device,
{
{0, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, 1, VK_SHADER_STAGE_RAYGEN_BIT_NV, nullptr},
});
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV;
VkAccelerationStructureNV badHandle = (VkAccelerationStructureNV)12345678;
VkWriteDescriptorSetAccelerationStructureKHR acc = {};
acc.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV;
acc.accelerationStructureCount = 1;
acc.pAccelerationStructures = &badHandle;
descriptor_write.pNext = &acc;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"VUID-VkWriteDescriptorSetAccelerationStructureKHR-pAccelerationStructures-parameter");
vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
top_level_as_create_info.info.instanceCount = 1;
top_level_as_create_info.info.geometryCount = 0;
VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
acc.pAccelerationStructures = &top_level_as.handle();
m_errorMonitor->ExpectSuccess();
vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, ValidateCmdBuildAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure building.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
assert(vkCmdBuildAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bot_level_as_create_info.info.instanceCount = 0;
bot_level_as_create_info.info.geometryCount = 1;
bot_level_as_create_info.info.pGeometries = &geometry;
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkBufferObj bot_level_as_scratch;
bot_level_as.create_scratch_buffer(*m_device, &bot_level_as_scratch);
// Command buffer must be in recording state
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-recording");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->begin();
// Incompatible type
VkAccelerationStructureInfoNV as_build_info_with_incompatible_type = bot_level_as_create_info.info;
as_build_info_with_incompatible_type.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
as_build_info_with_incompatible_type.instanceCount = 1;
as_build_info_with_incompatible_type.geometryCount = 0;
// This is duplicated since it triggers one error for different types and one error for lower instance count - the
// build info is incompatible but still needs to be valid to get past the stateless checks.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &as_build_info_with_incompatible_type, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Incompatible flags
VkAccelerationStructureInfoNV as_build_info_with_incompatible_flags = bot_level_as_create_info.info;
as_build_info_with_incompatible_flags.flags = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &as_build_info_with_incompatible_flags, VK_NULL_HANDLE, 0,
VK_FALSE, bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Incompatible build size
VkGeometryNV geometry_with_more_vertices = geometry;
geometry_with_more_vertices.geometry.triangles.vertexCount += 1;
VkAccelerationStructureInfoNV as_build_info_with_incompatible_geometry = bot_level_as_create_info.info;
as_build_info_with_incompatible_geometry.pGeometries = &geometry_with_more_vertices;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &as_build_info_with_incompatible_geometry, VK_NULL_HANDLE, 0,
VK_FALSE, bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Scratch buffer too small
VkBufferCreateInfo too_small_scratch_buffer_info = {};
too_small_scratch_buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
too_small_scratch_buffer_info.usage = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV;
too_small_scratch_buffer_info.size = 1;
VkBufferObj too_small_scratch_buffer(*m_device, too_small_scratch_buffer_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02491");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, too_small_scratch_buffer.handle(), 0);
m_errorMonitor->VerifyFound();
// Scratch buffer with offset too small
VkDeviceSize scratch_buffer_offset = 5;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02491");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), scratch_buffer_offset);
m_errorMonitor->VerifyFound();
// Src must have been built before
VkAccelerationStructureObj bot_level_as_updated(*m_device, bot_level_as_create_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02489");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_TRUE,
bot_level_as_updated.handle(), bot_level_as.handle(), bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Src must have been built before with the VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV flag
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02489");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_TRUE,
bot_level_as_updated.handle(), bot_level_as.handle(), bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// invalid scratch buff
VkBufferObj bot_level_as_invalid_scratch;
VkBufferCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
// invalid usage
create_info.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR;
bot_level_as.create_scratch_buffer(*m_device, &bot_level_as_invalid_scratch, &create_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-scratch-02781");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_invalid_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// invalid instance data.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-instanceData-02782");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info,
bot_level_as_invalid_scratch.handle(), 0, VK_FALSE, bot_level_as.handle(), VK_NULL_HANDLE,
bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// must be called outside renderpass
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-renderpass");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_commandBuffer->EndRenderPass();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ValidateGetAccelerationStructureHandleNV) {
TEST_DESCRIPTION("Validate acceleration structure handle querying.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV =
reinterpret_cast<PFN_vkGetAccelerationStructureHandleNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkGetAccelerationStructureHandleNV"));
assert(vkGetAccelerationStructureHandleNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bot_level_as_create_info.info.instanceCount = 0;
bot_level_as_create_info.info.geometryCount = 1;
bot_level_as_create_info.info.pGeometries = &geometry;
// Not enough space for the handle
{
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info);
m_errorMonitor->VerifyNotFound();
uint64_t handle = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetAccelerationStructureHandleNV-dataSize-02240");
vkGetAccelerationStructureHandleNV(m_device->handle(), bot_level_as.handle(), sizeof(uint8_t), &handle);
m_errorMonitor->VerifyFound();
}
// No memory bound to acceleration structure
{
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info, /*init_memory=*/false);
m_errorMonitor->VerifyNotFound();
uint64_t handle = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
vkGetAccelerationStructureHandleNV(m_device->handle(), bot_level_as.handle(), sizeof(uint64_t), &handle);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateCmdCopyAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure copying.");
if (!InitFrameworkForRayTracingTest(this, false, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV = reinterpret_cast<PFN_vkCmdCopyAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdCopyAccelerationStructureNV"));
assert(vkCmdCopyAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
as_create_info.info.instanceCount = 0;
as_create_info.info.geometryCount = 1;
as_create_info.info.pGeometries = &geometry;
VkAccelerationStructureObj src_as(*m_device, as_create_info);
VkAccelerationStructureObj dst_as(*m_device, as_create_info);
VkAccelerationStructureObj dst_as_without_mem(*m_device, as_create_info, false);
m_errorMonitor->VerifyNotFound();
// Command buffer must be in recording state
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-recording");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV);
m_errorMonitor->VerifyFound();
m_commandBuffer->begin();
// Src must have been created with allow compaction flag
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-src-03411");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV);
m_errorMonitor->VerifyFound();
// Dst must have been bound with memory
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as_without_mem.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV);
m_errorMonitor->VerifyFound();
// mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR);
m_errorMonitor->VerifyFound();
// mode must be a valid VkCopyAccelerationStructureModeKHR value
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-mode-parameter");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR);
m_errorMonitor->VerifyFound();
// This command must only be called outside of a render pass instance
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-renderpass");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV);
m_commandBuffer->EndRenderPass();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, QueryPerformanceCreation) {
TEST_DESCRIPTION("Create performance query without support");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performance_features = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performance_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performance_features.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performance_features));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
break;
}
if (counters.empty()) {
printf("%s No queue reported any performance counter.\n", kSkipPrefix);
return;
}
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counters.size();
std::vector<uint32_t> counterIndices;
for (uint32_t c = 0; c < counters.size(); c++) counterIndices.push_back(c);
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
// Missing pNext
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolCreateInfo-queryType-03222");
VkQueryPool query_pool;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
query_pool_ci.pNext = &perf_query_pool_ci;
// Invalid counter indices
counterIndices.push_back(counters.size());
perf_query_pool_ci.counterIndexCount++;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321");
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
perf_query_pool_ci.counterIndexCount--;
counterIndices.pop_back();
// Success
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->begin();
// Missing acquire lock
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-03223");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyFound();
}
m_commandBuffer->end();
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceCounterCommandbufferScope) {
TEST_DESCRIPTION("Insert a performance query begin/end with respect to the command buffer counter scope");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_COMMAND_BUFFER_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_COMMAND_BUFFER_KHR) {
counterIndices.push_back(counterIdx);
break;
}
}
if (counterIndices.empty()) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported any performance counter with command buffer scope.\n", kSkipPrefix);
return;
}
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
// Not the first command.
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
m_commandBuffer->begin();
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer, 0, 4096, 0);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-03224");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
// First command: success.
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
m_commandBuffer->begin();
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyNotFound();
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer, 0, 4096, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
vkReleaseProfilingLockKHR(device());
}
TEST_F(VkLayerTest, QueryPerformanceCounterRenderPassScope) {
TEST_DESCRIPTION("Insert a performance query begin/end with respect to the render pass counter scope");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_RENDER_PASS_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_RENDER_PASS_KHR) {
counterIndices.push_back(counterIdx);
break;
}
}
if (counterIndices.empty()) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported any performance counter with render pass scope.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
// Inside a render pass.
{
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-03225");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceReleaseProfileLockBeforeSubmit) {
TEST_DESCRIPTION("Verify that we get an error if we release the profiling lock during the recording of performance queries");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_COMMAND_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_COMMAND_KHR) {
counterIndices.push_back(counterIdx);
break;
}
}
if (counterIndices.empty()) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported any performance counter with render pass scope.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
{
m_commandBuffer->reset();
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
}
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
m_commandBuffer->reset();
m_commandBuffer->begin();
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
// Release while recording.
vkReleaseProfilingLockKHR(device());
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkQueueSubmit-pCommandBuffers-03220");
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(queue);
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceIncompletePasses) {
TEST_DESCRIPTION("Verify that we get an error if we don't submit a command buffer for each passes before getting the results.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto hostQueryResetFeatures = lvl_init_struct<VkPhysicalDeviceHostQueryResetFeaturesEXT>();
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>(&hostQueryResetFeatures);
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
if (!hostQueryResetFeatures.hostQueryReset) {
printf("%s Missing host query reset.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR =
(PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR");
ASSERT_TRUE(vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
uint32_t nPasses = 0;
// Find all counters with VK_QUERY_SCOPE_COMMAND_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_COMMAND_KHR) counterIndices.push_back(counterIdx);
}
VkQueryPoolPerformanceCreateInfoKHR create_info{};
create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
create_info.queueFamilyIndex = idx;
create_info.counterIndexCount = counterIndices.size();
create_info.pCounterIndices = &counterIndices[0];
vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(gpu(), &create_info, &nPasses);
if (nPasses < 2) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported a set of counters that needs more than one pass.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
PFN_vkResetQueryPoolEXT fpvkResetQueryPoolEXT =
(PFN_vkResetQueryPoolEXT)vk::GetInstanceProcAddr(instance(), "vkResetQueryPoolEXT");
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
VkCommandBufferBeginInfo command_buffer_begin_info{};
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 1);
m_commandBuffer->begin(&command_buffer_begin_info);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer, 0, 4096, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
// Invalid pass index
{
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = nPasses;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221");
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
// Leave the last pass out.
for (uint32_t passIdx = 0; passIdx < (nPasses - 1); passIdx++) {
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = passIdx;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
vk::QueueWaitIdle(queue);
std::vector<VkPerformanceCounterResultKHR> results;
results.resize(counterIndices.size());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03231");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyFound();
{
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = nPasses - 1;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
vk::QueueWaitIdle(queue);
// Invalid stride
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03229");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR) + 4, VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyFound();
// Invalid flags
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03230");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_WITH_AVAILABILITY_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03230");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_PARTIAL_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03230");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_64_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyNotFound();
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceResetAndBegin) {
TEST_DESCRIPTION("Verify that we get an error if we reset & begin a performance query within the same primary command buffer.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto hostQueryResetFeatures = lvl_init_struct<VkPhysicalDeviceHostQueryResetFeaturesEXT>();
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>(&hostQueryResetFeatures);
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
if (!hostQueryResetFeatures.hostQueryReset) {
printf("%s Missing host query reset.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR =
(PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR");
ASSERT_TRUE(vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_COMMAND_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_COMMAND_KHR) {
counterIndices.push_back(counterIdx);
break;
}
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported a set of counters that needs more than one pass.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
VkCommandBufferBeginInfo command_buffer_begin_info{};
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBeginQuery-None-02863");
m_commandBuffer->reset();
m_commandBuffer->begin(&command_buffer_begin_info);
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
{
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = 0;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
vk::QueueWaitIdle(queue);
m_errorMonitor->VerifyFound();
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueueSubmitNoTimelineSemaphoreInfo) {
TEST_DESCRIPTION("Submit a queue with a timeline semaphore but not a VkTimelineSemaphoreSubmitInfoKHR.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info[2] = {};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].commandBufferCount = 0;
submit_info[0].pWaitDstStageMask = &stageFlags;
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &semaphore;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-03239");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
VkTimelineSemaphoreSubmitInfoKHR timeline_semaphore_submit_info{};
uint64_t signalValue = 1;
timeline_semaphore_submit_info.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
submit_info[0].pNext = &timeline_semaphore_submit_info;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].commandBufferCount = 0;
submit_info[1].pWaitDstStageMask = &stageFlags;
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &semaphore;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-03239");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
TEST_F(VkLayerTest, QueueSubmitTimelineSemaphoreBadValue) {
TEST_DESCRIPTION("Submit a queue with a timeline semaphore using a wrong payload value.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
auto timelineproperties = lvl_init_struct<VkPhysicalDeviceTimelineSemaphorePropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&timelineproperties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &prop2);
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkTimelineSemaphoreSubmitInfoKHR timeline_semaphore_submit_info = {};
uint64_t signalValue = 1;
uint64_t waitValue = 3;
timeline_semaphore_submit_info.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &waitValue;
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info[2] = {};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].pNext = &timeline_semaphore_submit_info;
submit_info[0].pWaitDstStageMask = &stageFlags;
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &semaphore;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].pNext = &timeline_semaphore_submit_info;
submit_info[1].pWaitDstStageMask = &stageFlags;
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &semaphore;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pNext-03241");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pNext-03240");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
semaphore_type_create_info.initialValue = 5;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pSignalSemaphores-03242");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
// Check if we can test violations of maxTimelineSemaphoreValueDifference
if (timelineproperties.maxTimelineSemaphoreValueDifference < UINT64_MAX) {
semaphore_type_create_info.initialValue = 0;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
signalValue = timelineproperties.maxTimelineSemaphoreValueDifference + 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pSignalSemaphores-03244");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
if (signalValue < UINT64_MAX) {
waitValue = signalValue + 1;
signalValue = 1;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &waitValue;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-03243");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
}
TEST_F(VkLayerTest, QueueSubmitBinarySemaphoreNotSignaled) {
TEST_DESCRIPTION("Submit a queue with a waiting binary semaphore not previously signaled.");
bool timelineSemaphoresExtensionSupported = true;
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
timelineSemaphoresExtensionSupported = false;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (timelineSemaphoresExtensionSupported &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
timelineSemaphoresExtensionSupported = false;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore[3];
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore[0]));
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore[1]));
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore[2]));
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info[3] = {};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].pWaitDstStageMask = &stageFlags;
submit_info[0].waitSemaphoreCount = 1;
submit_info[0].pWaitSemaphores = &(semaphore[0]);
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &(semaphore[1]);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, timelineSemaphoresExtensionSupported
? "VUID-vkQueueSubmit-pWaitSemaphores-03238"
: "VUID-vkQueueSubmit-pWaitSemaphores-00069");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].pWaitDstStageMask = &stageFlags;
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &(semaphore[1]);
submit_info[1].signalSemaphoreCount = 1;
submit_info[1].pSignalSemaphores = &(semaphore[2]);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, timelineSemaphoresExtensionSupported
? "VUID-vkQueueSubmit-pWaitSemaphores-03238"
: "VUID-vkQueueSubmit-pWaitSemaphores-00069");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
submit_info[2].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[2].signalSemaphoreCount = 1;
submit_info[2].pSignalSemaphores = &(semaphore[0]);
ASSERT_VK_SUCCESS(vk::QueueSubmit(m_device->m_queue, 1, &(submit_info[2]), VK_NULL_HANDLE));
ASSERT_VK_SUCCESS(vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE));
ASSERT_VK_SUCCESS(vk::QueueWaitIdle(m_device->m_queue));
vk::DestroySemaphore(m_device->device(), semaphore[0], nullptr);
vk::DestroySemaphore(m_device->device(), semaphore[1], nullptr);
vk::DestroySemaphore(m_device->device(), semaphore[2], nullptr);
}
TEST_F(VkLayerTest, QueueSubmitTimelineSemaphoreOutOfOrder) {
TEST_DESCRIPTION("Submit out-of-order timeline semaphores.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
// We need two queues for this
uint32_t queue_count;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, NULL);
VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_count];
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_count, queue_props);
uint32_t family_index[2] = {0};
uint32_t queue_index[2] = {0};
if (queue_count > 1) {
family_index[1]++;
} else {
// If there's only one family index, check if it supports more than 1 queue
if (queue_props[0].queueCount > 1) {
queue_index[1]++;
} else {
printf("%s Multiple queues are required to run this test. .\n", kSkipPrefix);
return;
}
}
float priorities[] = {1.0f, 1.0f};
VkDeviceQueueCreateInfo queue_info[2] = {};
queue_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[0].queueFamilyIndex = family_index[0];
queue_info[0].queueCount = queue_count > 1 ? 1 : 2;
queue_info[0].pQueuePriorities = &(priorities[0]);
queue_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[1].queueFamilyIndex = family_index[1];
queue_info[1].queueCount = queue_count > 1 ? 1 : 2;
queue_info[1].pQueuePriorities = &(priorities[0]);
VkDeviceCreateInfo dev_info{};
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.queueCreateInfoCount = queue_count > 1 ? 2 : 1;
dev_info.pQueueCreateInfos = &(queue_info[0]);
dev_info.enabledLayerCount = 0;
dev_info.enabledExtensionCount = m_device_extension_names.size();
dev_info.ppEnabledExtensionNames = m_device_extension_names.data();
auto timeline_semaphore_features = lvl_init_struct<VkPhysicalDeviceTimelineSemaphoreFeatures>();
timeline_semaphore_features.timelineSemaphore = true;
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&timeline_semaphore_features);
dev_info.pNext = &features2;
VkDevice dev;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &dev_info, nullptr, &dev));
VkQueue queue[2];
vk::GetDeviceQueue(dev, family_index[0], queue_index[0], &(queue[0]));
vk::GetDeviceQueue(dev, family_index[1], queue_index[1], &(queue[1]));
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
semaphore_type_create_info.initialValue = 5;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(dev, &semaphore_create_info, nullptr, &semaphore));
uint64_t semaphoreValues[] = {10, 100, 0, 10};
VkTimelineSemaphoreSubmitInfoKHR timeline_semaphore_submit_info{};
timeline_semaphore_submit_info.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &(semaphoreValues[0]);
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &(semaphoreValues[1]);
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &timeline_semaphore_submit_info;
submit_info.pWaitDstStageMask = &stageFlags;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
ASSERT_VK_SUCCESS(vk::QueueSubmit(queue[0], 1, &submit_info, VK_NULL_HANDLE));
timeline_semaphore_submit_info.pWaitSemaphoreValues = &(semaphoreValues[2]);
timeline_semaphore_submit_info.pSignalSemaphoreValues = &(semaphoreValues[3]);
ASSERT_VK_SUCCESS(vk::QueueSubmit(queue[1], 1, &submit_info, VK_NULL_HANDLE));
vk::DeviceWaitIdle(dev);
vk::DestroySemaphore(dev, semaphore, nullptr);
vk::DestroyDevice(dev, nullptr);
}
TEST_F(VkLayerTest, InvalidExternalSemaphore) {
TEST_DESCRIPTION("Import and export invalid external semaphores, no queue sumbits involved.");
#ifdef _WIN32
printf("%s Test doesn't currently support Win32 semaphore, skipping test\n", kSkipPrefix);
return;
#else
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
// Check for external semaphore instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for external semaphore device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Create a semaphore fpr importing
VkSemaphoreCreateInfo semaphore_create_info = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO};
semaphore_create_info.pNext = nullptr;
semaphore_create_info.flags = 0;
VkSemaphore import_semaphore;
VkResult err = vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &import_semaphore);
ASSERT_VK_SUCCESS(err);
int fd = 0;
VkImportSemaphoreFdInfoKHR import_semaphore_fd_info = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR};
import_semaphore_fd_info.pNext = nullptr;
import_semaphore_fd_info.semaphore = import_semaphore;
import_semaphore_fd_info.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR;
import_semaphore_fd_info.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT;
import_semaphore_fd_info.fd = fd;
auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImportSemaphoreFdInfoKHR-handleType-01143");
vkImportSemaphoreFdKHR(device(), &import_semaphore_fd_info);
m_errorMonitor->VerifyFound();
// Cleanup
vk::DestroySemaphore(device(), import_semaphore, nullptr);
#endif
}
TEST_F(VkLayerTest, InvalidWaitSemaphoresType) {
TEST_DESCRIPTION("Wait for a non Timeline Semaphore");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore[2];
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &(semaphore[0])));
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_BINARY;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &(semaphore[1])));
VkSemaphoreWaitInfo semaphore_wait_info{};
semaphore_wait_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO;
semaphore_wait_info.semaphoreCount = 2;
semaphore_wait_info.pSemaphores = &semaphore[0];
const uint64_t wait_values[] = {10, 40};
semaphore_wait_info.pValues = &wait_values[0];
auto vkWaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)vk::GetDeviceProcAddr(m_device->device(), "vkWaitSemaphoresKHR");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSemaphoreWaitInfo-pSemaphores-03256");
vkWaitSemaphoresKHR(m_device->device(), &semaphore_wait_info, 10000);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore[0], nullptr);
vk::DestroySemaphore(m_device->device(), semaphore[1], nullptr);
}
TEST_F(VkLayerTest, InvalidSignalSemaphoreType) {
TEST_DESCRIPTION("Signal a non Timeline Semaphore");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto timelinefeatures = lvl_init_struct<VkPhysicalDeviceTimelineSemaphoreFeaturesKHR>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&timelinefeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!timelinefeatures.timelineSemaphore) {
printf("%s Timeline semaphores are not supported.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkSemaphoreSignalInfo semaphore_signal_info{};
semaphore_signal_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO;
semaphore_signal_info.semaphore = semaphore;
semaphore_signal_info.value = 10;
auto vkSignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)vk::GetDeviceProcAddr(m_device->device(), "vkSignalSemaphoreKHR");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSemaphoreSignalInfo-semaphore-03257");
vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
TEST_F(VkLayerTest, InvalidSignalSemaphoreValue) {
TEST_DESCRIPTION("Signal a Timeline Semaphore with invalid values");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
if (!CheckTimelineSemaphoreSupportAndInitState(this)) {
printf("%s Timeline semaphore not supported, skipping test\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
auto timelineproperties = lvl_init_struct<VkPhysicalDeviceTimelineSemaphorePropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&timelineproperties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &prop2);
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
semaphore_type_create_info.initialValue = 5;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore[2];
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore[0]));
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore[1]));
VkSemaphoreSignalInfo semaphore_signal_info{};
semaphore_signal_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO;
semaphore_signal_info.semaphore = semaphore[0];
semaphore_signal_info.value = 3;
auto vkSignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)vk::GetDeviceProcAddr(m_device->device(), "vkSignalSemaphoreKHR");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSemaphoreSignalInfo-value-03258");
vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info);
m_errorMonitor->VerifyFound();
semaphore_signal_info.value = 10;
ASSERT_VK_SUCCESS(vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info));
VkTimelineSemaphoreSubmitInfoKHR timeline_semaphore_submit_info{};
uint64_t waitValue = 10;
uint64_t signalValue = 20;
timeline_semaphore_submit_info.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &waitValue;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &timeline_semaphore_submit_info;
submit_info.pWaitDstStageMask = &stageFlags;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &(semaphore[1]);
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &(semaphore[0]);
ASSERT_VK_SUCCESS(vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE));
semaphore_signal_info.value = 25;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSemaphoreSignalInfo-value-03259");
vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info);
m_errorMonitor->VerifyFound();
semaphore_signal_info.value = 15;
ASSERT_VK_SUCCESS(vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info));
semaphore_signal_info.semaphore = semaphore[1];
ASSERT_VK_SUCCESS(vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info));
// Check if we can test violations of maxTimelineSemaphoreValueDifference
if (timelineproperties.maxTimelineSemaphoreValueDifference < UINT64_MAX) {
VkSemaphore sem;
semaphore_type_create_info.initialValue = 0;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &sem));
semaphore_signal_info.semaphore = sem;
semaphore_signal_info.value = timelineproperties.maxTimelineSemaphoreValueDifference + 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSemaphoreSignalInfo-value-03260");
vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info);
m_errorMonitor->VerifyFound();
semaphore_signal_info.value--;
ASSERT_VK_SUCCESS(vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info));
vk::DestroySemaphore(m_device->device(), sem, nullptr);
// Regression test for value difference validations ran against binary semaphores
{
VkSemaphore timeline_sem;
VkSemaphore binary_sem;
semaphore_type_create_info.initialValue = 0;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &timeline_sem));
VkSemaphoreCreateInfo binary_semaphore_create_info{};
binary_semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &binary_semaphore_create_info, nullptr, &binary_sem));
signalValue = 1;
uint64_t offendingValue = timelineproperties.maxTimelineSemaphoreValueDifference + 1;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &timeline_sem;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &binary_sem;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &signalValue;
// These two assignments are not required by the spec, but would segfault on older versions of validation layers
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &offendingValue;
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
semaphore_signal_info.semaphore = timeline_sem;
semaphore_signal_info.value = 1;
vkSignalSemaphoreKHR(m_device->device(), &semaphore_signal_info);
m_errorMonitor->VerifyNotFound();
vk::DestroySemaphore(m_device->device(), binary_sem, nullptr);
vk::DestroySemaphore(m_device->device(), timeline_sem, nullptr);
}
}
ASSERT_VK_SUCCESS(vk::QueueWaitIdle(m_device->m_queue));
vk::DestroySemaphore(m_device->device(), semaphore[0], nullptr);
vk::DestroySemaphore(m_device->device(), semaphore[1], nullptr);
}
TEST_F(VkLayerTest, InvalidSemaphoreCounterType) {
TEST_DESCRIPTION("Get payload from a non Timeline Semaphore");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto timelinefeatures = lvl_init_struct<VkPhysicalDeviceTimelineSemaphoreFeaturesKHR>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&timelinefeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!timelinefeatures.timelineSemaphore) {
printf("%s Timeline semaphores are not supported.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
auto vkGetSemaphoreCounterValueKHR =
(PFN_vkGetSemaphoreCounterValueKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetSemaphoreCounterValueKHR");
uint64_t value = 0xdeadbeef;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetSemaphoreCounterValue-semaphore-03255");
vkGetSemaphoreCounterValueKHR(m_device->device(), semaphore, &value);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
TEST_F(VkLayerTest, ImageDrmFormatModifer) {
TEST_DESCRIPTION("General testing of VK_EXT_image_drm_format_modifier");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (IsPlatform(kMockICD)) {
printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME);
return;
}
PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT =
(PFN_vkGetImageDrmFormatModifierPropertiesEXT)vk::GetInstanceProcAddr(instance(),
"vkGetImageDrmFormatModifierPropertiesEXT");
ASSERT_TRUE(vkGetImageDrmFormatModifierPropertiesEXT != nullptr);
ASSERT_NO_FATAL_FAILURE(InitState());
const uint64_t dummy_modifiers[2] = {0, 1};
VkImageCreateInfo image_info = {};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.pNext = nullptr;
image_info.imageType = VK_IMAGE_TYPE_2D;
image_info.arrayLayers = 1;
image_info.extent = {64, 64, 1};
image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_info.mipLevels = 1;
image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
image_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
VkImageFormatProperties2 image_format_prop = {};
image_format_prop.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
VkPhysicalDeviceImageFormatInfo2 image_format_info = {};
image_format_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
image_format_info.format = image_info.format;
image_format_info.tiling = image_info.tiling;
image_format_info.type = image_info.imageType;
image_format_info.usage = image_info.usage;
VkPhysicalDeviceImageDrmFormatModifierInfoEXT drm_format_mod_info = {};
drm_format_mod_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
drm_format_mod_info.pNext = nullptr;
drm_format_mod_info.drmFormatModifier = dummy_modifiers[0];
drm_format_mod_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
drm_format_mod_info.queueFamilyIndexCount = 0;
image_format_info.pNext = (void *)&drm_format_mod_info;
vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &image_format_info, &image_format_prop);
VkSubresourceLayout dummyPlaneLayout = {0, 0, 0, 0, 0};
VkImageDrmFormatModifierListCreateInfoEXT drm_format_mod_list = {};
drm_format_mod_list.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT;
drm_format_mod_list.pNext = nullptr;
drm_format_mod_list.drmFormatModifierCount = 2;
drm_format_mod_list.pDrmFormatModifiers = dummy_modifiers;
VkImageDrmFormatModifierExplicitCreateInfoEXT drm_format_mod_explicit = {};
drm_format_mod_explicit.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
drm_format_mod_explicit.pNext = nullptr;
drm_format_mod_explicit.drmFormatModifierPlaneCount = 1;
drm_format_mod_explicit.pPlaneLayouts = &dummyPlaneLayout;
VkImage image = VK_NULL_HANDLE;
// No pNext
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-tiling-02261");
vk::CreateImage(device(), &image_info, nullptr, &image);
m_errorMonitor->VerifyFound();
// Postive check if only 1
image_info.pNext = (void *)&drm_format_mod_list;
m_errorMonitor->ExpectSuccess();
vk::CreateImage(device(), &image_info, nullptr, &image);
vk::DestroyImage(device(), image, nullptr);
m_errorMonitor->VerifyNotFound();
image_info.pNext = (void *)&drm_format_mod_explicit;
m_errorMonitor->ExpectSuccess();
vk::CreateImage(device(), &image_info, nullptr, &image);
vk::DestroyImage(device(), image, nullptr);
m_errorMonitor->VerifyNotFound();
// Having both in pNext
drm_format_mod_explicit.pNext = (void *)&drm_format_mod_list;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-tiling-02261");
vk::CreateImage(device(), &image_info, nullptr, &image);
m_errorMonitor->VerifyFound();
// Only 1 pNext but wrong tiling
image_info.pNext = (void *)&drm_format_mod_list;
image_info.tiling = VK_IMAGE_TILING_LINEAR;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02262");
vk::CreateImage(device(), &image_info, nullptr, &image);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ValidateNVDeviceDiagnosticCheckpoints) {
TEST_DESCRIPTION("General testing of VK_NV_device_diagnostic_checkpoints");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix,
VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
auto vkGetQueueCheckpointDataNV =
(PFN_vkGetQueueCheckpointDataNV)vk::GetDeviceProcAddr(m_device->device(), "vkGetQueueCheckpointDataNV");
auto vkCmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetCheckpointNV");
ASSERT_TRUE(vkGetQueueCheckpointDataNV != nullptr);
ASSERT_TRUE(vkCmdSetCheckpointNV != nullptr);
uint32_t data = 100;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetCheckpointNV-commandBuffer-recording");
vkCmdSetCheckpointNV(m_commandBuffer->handle(), &data);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidGetDeviceQueue) {
TEST_DESCRIPTION("General testing of vkGetDeviceQueue and general Device creation cases");
SetTargetApiVersion(VK_API_VERSION_1_1);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkDevice test_device;
VkQueue test_queue;
VkResult result;
// Use the first Physical device and queue family
// Makes test more portable as every driver has at least 1 queue with a queueCount of 1
uint32_t queue_family_count = 1;
uint32_t queue_family_index = 0;
VkQueueFamilyProperties queue_properties;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_family_count, &queue_properties);
float queue_priority = 1.0;
VkDeviceQueueCreateInfo queue_create_info = {};
queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_create_info.flags = VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT;
queue_create_info.pNext = nullptr;
queue_create_info.queueFamilyIndex = queue_family_index;
queue_create_info.queueCount = 1;
queue_create_info.pQueuePriorities = &queue_priority;
VkPhysicalDeviceProtectedMemoryFeatures protect_features = {};
protect_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
protect_features.pNext = nullptr;
protect_features.protectedMemory = VK_FALSE; // Starting with it off
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &protect_features;
device_create_info.flags = 0;
device_create_info.pQueueCreateInfos = &queue_create_info;
device_create_info.queueCreateInfoCount = 1;
device_create_info.pEnabledFeatures = nullptr;
device_create_info.enabledLayerCount = 0;
device_create_info.enabledExtensionCount = 0;
// Protect feature not set
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceQueueCreateInfo-flags-02861");
vk::CreateDevice(gpu(), &device_create_info, nullptr, &test_device);
m_errorMonitor->VerifyFound();
VkPhysicalDeviceFeatures2 features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
features2.pNext = &protect_features;
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (protect_features.protectedMemory == VK_TRUE) {
result = vk::CreateDevice(gpu(), &device_create_info, nullptr, &test_device);
if (result != VK_SUCCESS) {
printf("%s CreateDevice returned back %s, skipping rest of tests\n", kSkipPrefix, string_VkResult(result));
return;
}
// TODO: Re-enable test when Vulkan-Loader issue #384 is resolved and upstream
// Try using GetDeviceQueue with a queue that has as flag
// m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetDeviceQueue-flags-01841");
// vk::GetDeviceQueue(test_device, queue_family_index, 0, &test_queue);
// m_errorMonitor->VerifyFound();
vk::DestroyDevice(test_device, nullptr);
}
// Create device without protected queue
protect_features.protectedMemory = VK_FALSE;
queue_create_info.flags = 0;
result = vk::CreateDevice(gpu(), &device_create_info, nullptr, &test_device);
if (result != VK_SUCCESS) {
printf("%s CreateDevice returned back %s, skipping rest of tests\n", kSkipPrefix, string_VkResult(result));
return;
}
// TODO: Re-enable test when Vulkan-Loader issue #384 is resolved and upstream
// Set queueIndex 1 over size of queueCount
// m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetDeviceQueue-queueIndex-00385");
// vk::GetDeviceQueue(test_device, queue_family_index, queue_properties.queueCount, &test_queue);
// m_errorMonitor->VerifyFound();
// Use an unknown queue family index
// m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384");
// vk::GetDeviceQueue(test_device, queue_family_index + 1, 0, &test_queue);
// m_errorMonitor->VerifyFound();
// Sanity check can still get the queue
m_errorMonitor->ExpectSuccess();
vk::GetDeviceQueue(test_device, queue_family_index, 0, &test_queue);
m_errorMonitor->VerifyNotFound();
vk::DestroyDevice(test_device, nullptr);
}
TEST_F(VkLayerTest, DisabledProtectedMemory) {
TEST_DESCRIPTION("Validate cases where protectedMemory feature is not enabled");
SetTargetApiVersion(VK_API_VERSION_1_1);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s test requires Vulkan 1.1 extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto protected_memory_features = lvl_init_struct<VkPhysicalDeviceProtectedMemoryFeatures>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&protected_memory_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
// Set false to trigger VUs
protected_memory_features.protectedMemory = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info = {};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.pNext = nullptr;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkCommandPoolCreateInfo-flags-02860");
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &command_pool);
m_errorMonitor->VerifyFound();
VkBuffer buffer;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = nullptr;
buffer_create_info.flags = VK_BUFFER_CREATE_PROTECTED_BIT;
buffer_create_info.size = 4096;
buffer_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBufferCreateInfo-flags-01887");
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer);
m_errorMonitor->VerifyFound();
VkImage image;
VkImageCreateInfo image_create_info{};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = nullptr;
image_create_info.flags = VK_IMAGE_CREATE_PROTECTED_BIT;
image_create_info.extent = {64, 64, 1};
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.arrayLayers = 1;
image_create_info.mipLevels = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-flags-01890");
vk::CreateImage(device(), &image_create_info, nullptr, &image);
m_errorMonitor->VerifyFound();
// Try to find memory with protected bit in it at all
VkDeviceMemory memory_protected = VK_NULL_HANDLE;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = nullptr;
alloc_info.allocationSize = 4096;
VkPhysicalDeviceMemoryProperties phys_mem_props;
vk::GetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props);
alloc_info.memoryTypeIndex = phys_mem_props.memoryTypeCount + 1;
for (uint32_t i = 0; i < phys_mem_props.memoryTypeCount; i++) {
// Check just protected bit is in type at all
if ((phys_mem_props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0) {
alloc_info.memoryTypeIndex = i;
break;
}
}
if (alloc_info.memoryTypeIndex < phys_mem_props.memoryTypeCount) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872");
vk::AllocateMemory(device(), &alloc_info, NULL, &memory_protected);
m_errorMonitor->VerifyFound();
}
VkProtectedSubmitInfo protected_submit_info = {};
protected_submit_info.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
protected_submit_info.pNext = nullptr;
protected_submit_info.protectedSubmit = VK_TRUE;
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &protected_submit_info;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_commandBuffer->begin();
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816");
m_errorMonitor->SetUnexpectedError("VUID-VkSubmitInfo-pNext-04148");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidProtectedMemory) {
TEST_DESCRIPTION("Validate cases where protectedMemory feature is enabled and usages are invalid");
SetTargetApiVersion(VK_API_VERSION_1_1);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto protected_memory_features = lvl_init_struct<VkPhysicalDeviceProtectedMemoryFeatures>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&protected_memory_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (protected_memory_features.protectedMemory == VK_FALSE) {
printf("%s protectedMemory feature not supported, skipped.\n", kSkipPrefix);
return;
};
// Turns m_commandBuffer into a protected command buffer
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_PROTECTED_BIT));
bool sparse_support = (m_device->phy().features().sparseBinding == VK_TRUE);
if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
return;
}
VkBuffer buffer_protected = VK_NULL_HANDLE;
VkBuffer buffer_unprotected = VK_NULL_HANDLE;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = nullptr;
buffer_create_info.flags = VK_BUFFER_CREATE_PROTECTED_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
buffer_create_info.size = 1 << 20; // 1 MB
buffer_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (sparse_support == true) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBufferCreateInfo-None-01888");
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer_protected);
m_errorMonitor->VerifyFound();
}
// Create actual protected and unprotected buffers
m_errorMonitor->ExpectSuccess();
buffer_create_info.flags = VK_BUFFER_CREATE_PROTECTED_BIT;
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer_protected);
buffer_create_info.flags = 0;
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer_unprotected);
m_errorMonitor->VerifyNotFound();
VkImage image_protected = VK_NULL_HANDLE;
VkImage image_unprotected = VK_NULL_HANDLE;
VkImageCreateInfo image_create_info{};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = nullptr;
image_create_info.flags = VK_IMAGE_CREATE_PROTECTED_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
image_create_info.extent = {8, 8, 1};
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.arrayLayers = 1;
image_create_info.mipLevels = 1;
if (sparse_support == true) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-None-01891");
vk::CreateImage(device(), &image_create_info, nullptr, &image_protected);
m_errorMonitor->VerifyFound();
}
// Create actual protected and unprotected images
m_errorMonitor->ExpectSuccess();
image_create_info.flags = VK_IMAGE_CREATE_PROTECTED_BIT;
vk::CreateImage(device(), &image_create_info, nullptr, &image_protected);
image_create_info.flags = 0;
vk::CreateImage(device(), &image_create_info, nullptr, &image_unprotected);
m_errorMonitor->VerifyNotFound();
// Create protected and unproteced memory
VkDeviceMemory memory_protected = VK_NULL_HANDLE;
VkDeviceMemory memory_unprotected = VK_NULL_HANDLE;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = nullptr;
alloc_info.allocationSize = 0;
// set allocationSize to buffer as it will be larger than the image, but query image to avoid BP warning
VkMemoryRequirements mem_reqs_protected;
vk::GetImageMemoryRequirements(device(), image_protected, &mem_reqs_protected);
vk::GetBufferMemoryRequirements(device(), buffer_protected, &mem_reqs_protected);
VkMemoryRequirements mem_reqs_unprotected;
vk::GetImageMemoryRequirements(device(), image_unprotected, &mem_reqs_unprotected);
vk::GetBufferMemoryRequirements(device(), buffer_unprotected, &mem_reqs_unprotected);
// Get memory index for a protected and unprotected memory
VkPhysicalDeviceMemoryProperties phys_mem_props;
vk::GetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props);
uint32_t memory_type_protected = phys_mem_props.memoryTypeCount + 1;
uint32_t memory_type_unprotected = phys_mem_props.memoryTypeCount + 1;
for (uint32_t i = 0; i < phys_mem_props.memoryTypeCount; i++) {
if ((mem_reqs_unprotected.memoryTypeBits & (1 << i)) &&
((phys_mem_props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) ==
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
memory_type_unprotected = i;
}
// Check just protected bit is in type at all
if ((mem_reqs_protected.memoryTypeBits & (1 << i)) &&
((phys_mem_props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) {
memory_type_protected = i;
}
}
if ((memory_type_protected >= phys_mem_props.memoryTypeCount) || (memory_type_unprotected >= phys_mem_props.memoryTypeCount)) {
printf("%s No valid memory type index could be found; skipped.\n", kSkipPrefix);
vk::DestroyImage(device(), image_protected, nullptr);
vk::DestroyImage(device(), image_unprotected, nullptr);
vk::DestroyBuffer(device(), buffer_protected, nullptr);
vk::DestroyBuffer(device(), buffer_unprotected, nullptr);
return;
}
alloc_info.memoryTypeIndex = memory_type_protected;
alloc_info.allocationSize = mem_reqs_protected.size;
vk::AllocateMemory(device(), &alloc_info, NULL, &memory_protected);
alloc_info.allocationSize = mem_reqs_unprotected.size;
alloc_info.memoryTypeIndex = memory_type_unprotected;
vk::AllocateMemory(device(), &alloc_info, NULL, &memory_unprotected);
// Bind protected buffer with unprotected memory
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindBufferMemory-None-01898");
m_errorMonitor->SetUnexpectedError("VUID-vkBindBufferMemory-memory-01035");
vk::BindBufferMemory(device(), buffer_protected, memory_unprotected, 0);
m_errorMonitor->VerifyFound();
// Bind unprotected buffer with protected memory
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindBufferMemory-None-01899");
m_errorMonitor->SetUnexpectedError("VUID-vkBindBufferMemory-memory-01035");
vk::BindBufferMemory(device(), buffer_unprotected, memory_protected, 0);
m_errorMonitor->VerifyFound();
// Bind protected image with unprotected memory
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindImageMemory-None-01901");
m_errorMonitor->SetUnexpectedError("VUID-vkBindImageMemory-memory-01047");
vk::BindImageMemory(device(), image_protected, memory_unprotected, 0);
m_errorMonitor->VerifyFound();
// Bind unprotected image with protected memory
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindImageMemory-None-01902");
m_errorMonitor->SetUnexpectedError("VUID-vkBindImageMemory-memory-01047");
vk::BindImageMemory(device(), image_unprotected, memory_protected, 0);
m_errorMonitor->VerifyFound();
vk::DestroyImage(device(), image_protected, nullptr);
vk::DestroyImage(device(), image_unprotected, nullptr);
vk::DestroyBuffer(device(), buffer_protected, nullptr);
vk::DestroyBuffer(device(), buffer_unprotected, nullptr);
vk::FreeMemory(device(), memory_protected, nullptr);
vk::FreeMemory(device(), memory_unprotected, nullptr);
}
TEST_F(VkLayerTest, ValidateCmdTraceRaysKHR) {
TEST_DESCRIPTION("Validate vkCmdTraceRaysKHR.");
if (!InitFrameworkForRayTracingTest(this, true, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
VkBuffer buffer;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
auto ray_tracing_properties = lvl_init_struct<VkPhysicalDeviceRayTracingPropertiesKHR>();
auto properties2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&ray_tracing_properties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)vk::GetInstanceProcAddr(instance(), "vkCmdTraceRaysKHR");
ASSERT_TRUE(vkCmdTraceRaysKHR != nullptr);
VkStridedBufferRegionKHR stridebufregion = {};
stridebufregion.buffer = buffer;
stridebufregion.offset = 0;
stridebufregion.stride = ray_tracing_properties.shaderGroupHandleSize;
stridebufregion.size = buf_info.size;
// invalid offset
{
VkStridedBufferRegionKHR invalid_offset = stridebufregion;
invalid_offset.offset = ray_tracing_properties.shaderGroupBaseAlignment + 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-offset-04038");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &stridebufregion, &invalid_offset, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-offset-04032");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &invalid_offset, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-offset-04026");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &invalid_offset, &stridebufregion, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-pRayGenShaderBindingTable-04021");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &invalid_offset, &stridebufregion, &stridebufregion, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
}
// Invalid stride multiplier
{
VkStridedBufferRegionKHR invalid_stride = stridebufregion;
invalid_stride.stride = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-stride-04040");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &stridebufregion, &invalid_stride, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-stride-04034");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &invalid_stride, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-stride-04028");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &invalid_stride, &stridebufregion, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
}
// Invalid stride, greater than maxShaderGroupStride
{
VkStridedBufferRegionKHR invalid_stride = stridebufregion;
uint32_t align = ray_tracing_properties.shaderGroupHandleSize;
invalid_stride.stride =
ray_tracing_properties.maxShaderGroupStride + (align - (ray_tracing_properties.maxShaderGroupStride % align));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-stride-04041");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &stridebufregion, &invalid_stride, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-stride-04035");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &invalid_stride, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysKHR-stride-04029");
vkCmdTraceRaysKHR(m_commandBuffer->handle(), &stridebufregion, &invalid_stride, &stridebufregion, &stridebufregion, 100,
100, 1);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateCmdTraceRaysIndirectKHR) {
TEST_DESCRIPTION("Validate vkCmdTraceRaysIndirectKHR.");
if (!InitFrameworkForRayTracingTest(this, true, m_instance_extension_names, m_device_extension_names, m_errorMonitor, false,
false, true)) {
return;
}
auto ray_tracing_features = lvl_init_struct<VkPhysicalDeviceRayTracingFeaturesKHR>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features);
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (ray_tracing_features.rayTracingIndirectTraceRays == VK_FALSE) {
printf("%s rayTracingIndirectTraceRays not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &ray_tracing_features));
VkBuffer buffer;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
auto ray_tracing_properties = lvl_init_struct<VkPhysicalDeviceRayTracingPropertiesKHR>();
auto properties2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&ray_tracing_properties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2);
PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR =
(PFN_vkCmdTraceRaysIndirectKHR)vk::GetInstanceProcAddr(instance(), "vkCmdTraceRaysIndirectKHR");
ASSERT_TRUE(vkCmdTraceRaysIndirectKHR != nullptr);
VkStridedBufferRegionKHR stridebufregion = {};
stridebufregion.buffer = buffer;
stridebufregion.offset = 0;
stridebufregion.stride = ray_tracing_properties.shaderGroupHandleSize;
stridebufregion.size = buf_info.size;
// invalid offset
{
VkStridedBufferRegionKHR invalid_offset = stridebufregion;
invalid_offset.offset = ray_tracing_properties.shaderGroupBaseAlignment + 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-offset-04038");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &stridebufregion, &invalid_offset,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-offset-04032");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &invalid_offset, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-offset-04026");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &invalid_offset, &stridebufregion, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-pRayGenShaderBindingTable-04021");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &invalid_offset, &stridebufregion, &stridebufregion, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
}
// Invalid stride multiplier
{
VkStridedBufferRegionKHR invalid_stride = stridebufregion;
invalid_stride.stride = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-stride-04040");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &stridebufregion, &invalid_stride,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-stride-04034");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &invalid_stride, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-stride-04028");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &invalid_stride, &stridebufregion, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
}
// Invalid stride, greater than maxShaderGroupStride
{
VkStridedBufferRegionKHR invalid_stride = stridebufregion;
uint32_t align = ray_tracing_properties.shaderGroupHandleSize;
invalid_stride.stride =
ray_tracing_properties.maxShaderGroupStride + (align - (ray_tracing_properties.maxShaderGroupStride % align));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-stride-04041");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &stridebufregion, &invalid_stride,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-stride-04035");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &stridebufregion, &invalid_stride, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdTraceRaysIndirectKHR-stride-04029");
vkCmdTraceRaysIndirectKHR(m_commandBuffer->handle(), &stridebufregion, &invalid_stride, &stridebufregion, &stridebufregion,
buffer, 0);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateVkAccelerationStructureVersionKHR) {
TEST_DESCRIPTION("Validate VkAccelerationStructureVersionKHR.");
if (!InitFrameworkForRayTracingTest(this, true, m_instance_extension_names, m_device_extension_names, m_errorMonitor, false,
false, true)) {
return;
}
auto ray_tracing_features = lvl_init_struct<VkPhysicalDeviceRayTracingFeaturesKHR>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features);
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (ray_tracing_features.rayTracing == VK_FALSE) {
printf("%s rayTracing not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &ray_tracing_features));
PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR =
(PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)vk::GetInstanceProcAddr(
instance(), "vkGetDeviceAccelerationStructureCompatibilityKHR");
ASSERT_TRUE(vkGetDeviceAccelerationStructureCompatibilityKHR != nullptr);
VkAccelerationStructureVersionKHR valid_version = {};
uint8_t mode[] = {VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR};
valid_version.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR;
valid_version.versionData = mode;
{
VkAccelerationStructureVersionKHR invalid_version = valid_version;
invalid_version.sType = VK_STRUCTURE_TYPE_MAX_ENUM;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureVersionKHR-sType-sType");
vkGetDeviceAccelerationStructureCompatibilityKHR(m_device->handle(), &invalid_version);
m_errorMonitor->VerifyFound();
}
{
VkAccelerationStructureVersionKHR invalid_version = valid_version;
invalid_version.versionData = NULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureVersionKHR-versionData-parameter");
vkGetDeviceAccelerationStructureCompatibilityKHR(m_device->handle(), &invalid_version);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateCmdBuildAccelerationStructureKHR) {
TEST_DESCRIPTION("Validate acceleration structure building.");
if (!InitFrameworkForRayTracingTest(this, true, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCmdBuildAccelerationStructureKHR vkCmdBuildAccelerationStructureKHR =
(PFN_vkCmdBuildAccelerationStructureKHR)vk::GetDeviceProcAddr(device(), "vkCmdBuildAccelerationStructureKHR");
PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR =
(PFN_vkGetBufferDeviceAddressKHR)vk::GetDeviceProcAddr(device(), "vkGetBufferDeviceAddressKHR");
assert(vkCmdBuildAccelerationStructureKHR != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometryNV;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometryNV);
VkAccelerationStructureCreateGeometryTypeInfoKHR geometryInfo = {};
geometryInfo.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR;
geometryInfo.geometryType = geometryNV.geometryType;
geometryInfo.maxPrimitiveCount = 1024;
geometryInfo.indexType = geometryNV.geometry.triangles.indexType;
geometryInfo.maxVertexCount = 1024;
geometryInfo.vertexFormat = geometryNV.geometry.triangles.vertexFormat;
geometryInfo.allowsTransforms = VK_TRUE;
VkAccelerationStructureCreateInfoKHR bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR;
bot_level_as_create_info.pNext = NULL;
bot_level_as_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
bot_level_as_create_info.maxGeometryCount = 1;
bot_level_as_create_info.pGeometryInfos = &geometryInfo;
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info);
VkBufferDeviceAddressInfo vertexAddressInfo = {VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, NULL,
geometryNV.geometry.triangles.vertexData};
VkDeviceAddress vertexAddress = vkGetBufferDeviceAddressKHR(m_device->handle(), &vertexAddressInfo);
VkBufferDeviceAddressInfo indexAddressInfo = {VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, NULL,
geometryNV.geometry.triangles.indexData};
VkDeviceAddress indexAddress = vkGetBufferDeviceAddressKHR(m_device->handle(), &indexAddressInfo);
VkAccelerationStructureGeometryKHR valid_geometry_triangles = {VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR};
valid_geometry_triangles.geometryType = geometryNV.geometryType;
valid_geometry_triangles.geometry.triangles.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR;
valid_geometry_triangles.geometry.triangles.pNext = NULL;
valid_geometry_triangles.geometry.triangles.vertexFormat = geometryNV.geometry.triangles.vertexFormat;
valid_geometry_triangles.geometry.triangles.vertexData.deviceAddress = vertexAddress;
valid_geometry_triangles.geometry.triangles.vertexStride = 8;
valid_geometry_triangles.geometry.triangles.indexType = VK_INDEX_TYPE_UINT32;
valid_geometry_triangles.geometry.triangles.indexData.deviceAddress = indexAddress;
valid_geometry_triangles.geometry.triangles.transformData.deviceAddress = 0;
valid_geometry_triangles.flags = 0;
VkAccelerationStructureGeometryKHR *pGeometry_triangles = &valid_geometry_triangles;
VkAccelerationStructureBuildGeometryInfoKHR valid_asInfo_triangles = {
VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR};
valid_asInfo_triangles.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
valid_asInfo_triangles.flags = 0;
valid_asInfo_triangles.update = VK_FALSE;
valid_asInfo_triangles.srcAccelerationStructure = VK_NULL_HANDLE;
valid_asInfo_triangles.dstAccelerationStructure = bot_level_as.handle();
valid_asInfo_triangles.geometryArrayOfPointers = VK_FALSE;
valid_asInfo_triangles.geometryCount = 1;
valid_asInfo_triangles.ppGeometries = &pGeometry_triangles;
valid_asInfo_triangles.scratchData.deviceAddress = 0;
VkAccelerationStructureBuildOffsetInfoKHR buildOffsetInfo = {
1,
0,
0,
0,
};
const VkAccelerationStructureBuildOffsetInfoKHR *pBuildOffsetInfo = &buildOffsetInfo;
m_commandBuffer->begin();
// build valid src acc for update VK_TRUE case with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set
VkAccelerationStructureBuildGeometryInfoKHR valid_src_asInfo_triangles = valid_asInfo_triangles;
valid_src_asInfo_triangles.flags = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR;
valid_src_asInfo_triangles.srcAccelerationStructure = bot_level_as.handle();
valid_src_asInfo_triangles.dstAccelerationStructure = bot_level_as.handle();
vkCmdBuildAccelerationStructureKHR(m_commandBuffer->handle(), 1, &valid_src_asInfo_triangles, &pBuildOffsetInfo);
// positive test
{
VkAccelerationStructureBuildGeometryInfoKHR asInfo_validupdate = valid_asInfo_triangles;
asInfo_validupdate.update = VK_TRUE;
asInfo_validupdate.srcAccelerationStructure = bot_level_as.handle();
m_errorMonitor->ExpectSuccess();
vkCmdBuildAccelerationStructureKHR(m_commandBuffer->handle(), 1, &asInfo_validupdate, &pBuildOffsetInfo);
m_errorMonitor->VerifyNotFound();
}
// If update is VK_TRUE, srcAccelerationStructure must not be VK_NULL_HANDLE
{
VkAccelerationStructureBuildGeometryInfoKHR asInfo_invalidupdate = valid_asInfo_triangles;
asInfo_invalidupdate.update = VK_TRUE;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-update-03537");
vkCmdBuildAccelerationStructureKHR(m_commandBuffer->handle(), 1, &asInfo_invalidupdate, &pBuildOffsetInfo);
m_errorMonitor->VerifyFound();
}
{
VkAccelerationStructureBuildGeometryInfoKHR invalid_src_asInfo_triangles = valid_src_asInfo_triangles;
invalid_src_asInfo_triangles.flags = 0;
invalid_src_asInfo_triangles.srcAccelerationStructure = bot_level_as.handle();
invalid_src_asInfo_triangles.dstAccelerationStructure = bot_level_as.handle();
// build src As without flag VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set
vkCmdBuildAccelerationStructureKHR(m_commandBuffer->handle(), 1, &invalid_src_asInfo_triangles, &pBuildOffsetInfo);
VkAccelerationStructureBuildGeometryInfoKHR asInfo_invalidupdate = valid_asInfo_triangles;
asInfo_invalidupdate.update = VK_TRUE;
asInfo_invalidupdate.srcAccelerationStructure = bot_level_as.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-update-03538");
vkCmdBuildAccelerationStructureKHR(m_commandBuffer->handle(), 1, &asInfo_invalidupdate, &pBuildOffsetInfo);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateImportMemoryHandleType) {
TEST_DESCRIPTION("Validate import memory handleType for buffers and images");
#ifdef _WIN32
const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
const auto wrong_handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
// Check for external memory instance extensions
std::vector<const char *> reqd_instance_extensions = {
{VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}};
for (auto extension_name : reqd_instance_extensions) {
if (InstanceExtensionSupported(extension_name)) {
m_instance_extension_names.push_back(extension_name);
} else {
printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name);
return;
}
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
auto vkGetPhysicalDeviceExternalBufferPropertiesKHR =
(PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalBufferPropertiesKHR");
// Check for import/export capability
// export used to feed memory to test import
VkPhysicalDeviceExternalBufferInfoKHR ebi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, nullptr, 0,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, handle_type};
VkExternalBufferPropertiesKHR ebp = {VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, nullptr, {0, 0, 0}};
ASSERT_TRUE(vkGetPhysicalDeviceExternalBufferPropertiesKHR != nullptr);
vkGetPhysicalDeviceExternalBufferPropertiesKHR(gpu(), &ebi, &ebp);
if (!(ebp.externalMemoryProperties.compatibleHandleTypes & handle_type) ||
!(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) ||
!(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External buffer does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
// Always use dedicated allocation
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
} else {
printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix);
return;
}
// Check for external memory device extensions
if (DeviceExtensionSupported(gpu(), nullptr, ext_mem_extension_name)) {
m_device_extension_names.push_back(ext_mem_extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
} else {
printf("%s External memory extension not supported, skipping test\n", kSkipPrefix);
return;
}
// Check for bind memory 2
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
} else {
printf("%s bind memory 2 extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkBindBufferMemory2KHR vkBindBufferMemory2Function =
(PFN_vkBindBufferMemory2KHR)vk::GetDeviceProcAddr(m_device->handle(), "vkBindBufferMemory2KHR");
PFN_vkBindImageMemory2KHR vkBindImageMemory2Function =
(PFN_vkBindImageMemory2KHR)vk::GetDeviceProcAddr(m_device->handle(), "vkBindImageMemory2KHR");
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
VkMemoryPropertyFlags mem_flags = 0;
const VkDeviceSize buffer_size = 1024;
// Create export and import buffers
VkExternalMemoryBufferCreateInfoKHR external_buffer_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, nullptr,
handle_type};
auto buffer_info = VkBufferObj::create_info(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
buffer_info.pNext = &external_buffer_info;
VkBufferObj buffer_export;
buffer_export.init_no_mem(*m_device, buffer_info);
external_buffer_info.handleTypes = wrong_handle_type;
VkBufferObj buffer_import;
buffer_import.init_no_mem(*m_device, buffer_info);
// Allocation info
auto alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_export.memory_requirements(), mem_flags);
// Add export allocation info to pNext chain
VkMemoryDedicatedAllocateInfoKHR dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr,
VK_NULL_HANDLE, buffer_export.handle()};
VkExportMemoryAllocateInfoKHR export_info = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, &dedicated_info, handle_type};
alloc_info.pNext = &export_info;
// Allocate memory to be exported
vk_testing::DeviceMemory memory_buffer_export;
memory_buffer_export.init(*m_device, alloc_info);
// Bind exported memory
buffer_export.bind_memory(memory_buffer_export, 0);
VkExternalMemoryImageCreateInfoKHR external_image_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, nullptr,
handle_type};
VkImageCreateInfo image_info{};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.pNext = &external_image_info;
image_info.extent = {64, 64, 1};
image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_info.imageType = VK_IMAGE_TYPE_2D;
image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_info.arrayLayers = 1;
image_info.mipLevels = 1;
VkImageObj image_export(m_device);
image_export.init_no_mem(*m_device, image_info);
external_image_info.handleTypes = wrong_handle_type;
VkImageObj image_import(m_device);
image_import.init_no_mem(*m_device, image_info);
// Allocation info
dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr, image_export.handle(), VK_NULL_HANDLE};
alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_export.memory_requirements(), mem_flags);
alloc_info.pNext = &export_info;
// Allocate memory to be exported
vk_testing::DeviceMemory memory_image_export;
memory_image_export.init(*m_device, alloc_info);
// Bind exported memory
image_export.bind_memory(memory_image_export, 0);
#ifdef _WIN32
// Export memory to handle
auto vkGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vk::GetInstanceProcAddr(instance(), "vkGetMemoryWin32HandleKHR");
ASSERT_TRUE(vkGetMemoryWin32HandleKHR != nullptr);
VkMemoryGetWin32HandleInfoKHR mghi_buffer = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr,
memory_buffer_export.handle(), handle_type};
VkMemoryGetWin32HandleInfoKHR mghi_image = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr,
memory_image_export.handle(), handle_type};
HANDLE handle_buffer;
HANDLE handle_image;
ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi_buffer, &handle_buffer));
ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi_image, &handle_image));
VkImportMemoryWin32HandleInfoKHR import_info_buffer = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr,
handle_type, handle_buffer};
VkImportMemoryWin32HandleInfoKHR import_info_image = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr,
handle_type, handle_image};
#else
// Export memory to fd
auto vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vk::GetInstanceProcAddr(instance(), "vkGetMemoryFdKHR");
ASSERT_TRUE(vkGetMemoryFdKHR != nullptr);
VkMemoryGetFdInfoKHR mgfi_buffer = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_buffer_export.handle(),
handle_type};
VkMemoryGetFdInfoKHR mgfi_image = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_image_export.handle(),
handle_type};
int fd_buffer;
int fd_image;
ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi_buffer, &fd_buffer));
ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi_image, &fd_image));
VkImportMemoryFdInfoKHR import_info_buffer = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd_buffer};
VkImportMemoryFdInfoKHR import_info_image = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd_image};
#endif
// Import memory
alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_import.memory_requirements(), mem_flags);
alloc_info.pNext = &import_info_buffer;
vk_testing::DeviceMemory memory_buffer_import;
memory_buffer_import.init(*m_device, alloc_info);
alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_import.memory_requirements(), mem_flags);
alloc_info.pNext = &import_info_image;
vk_testing::DeviceMemory memory_image_import;
memory_image_import.init(*m_device, alloc_info);
m_errorMonitor->VerifyNotFound();
// Bind imported memory with different handleType
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindBufferMemory-memory-02727");
vk::BindBufferMemory(device(), buffer_import.handle(), memory_buffer_import.handle(), 0);
m_errorMonitor->VerifyFound();
VkBindBufferMemoryInfo bind_buffer_info = {};
bind_buffer_info.sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO;
bind_buffer_info.pNext = nullptr;
bind_buffer_info.buffer = buffer_import.handle();
bind_buffer_info.memory = memory_buffer_import.handle();
bind_buffer_info.memoryOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindBufferMemoryInfo-memory-02727");
vkBindBufferMemory2Function(device(), 1, &bind_buffer_info);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkBindImageMemory-memory-02729");
m_errorMonitor->SetUnexpectedError("VUID-VkBindImageMemoryInfo-memory-01614");
m_errorMonitor->SetUnexpectedError("VUID-VkBindImageMemoryInfo-memory-01612");
vk::BindImageMemory(device(), image_import.handle(), memory_image_import.handle(), 0);
m_errorMonitor->VerifyFound();
VkBindImageMemoryInfo bind_image_info = {};
bind_image_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_image_info.pNext = nullptr;
bind_image_info.image = image_import.handle();
bind_image_info.memory = memory_buffer_import.handle();
bind_image_info.memoryOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindImageMemoryInfo-memory-02729");
m_errorMonitor->SetUnexpectedError("VUID-VkBindImageMemoryInfo-memory-01614");
m_errorMonitor->SetUnexpectedError("VUID-VkBindImageMemoryInfo-memory-01612");
vkBindImageMemory2Function(device(), 1, &bind_image_info);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ValidateExtendedDynamicStateDisabled) {
TEST_DESCRIPTION("Validate VK_EXT_extended_dynamic_state VUs");
uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_1);
if (version < VK_API_VERSION_1_1) {
printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
return;
}
auto extended_dynamic_state_features = lvl_init_struct<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&extended_dynamic_state_features);
vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
if (!extended_dynamic_state_features.extendedDynamicState) {
printf("%s Test requires (unsupported) extendedDynamicState, skipping\n", kSkipPrefix);
return;
}
// First test attempted uses of VK_EXT_extended_dynamic_state without it being enabled.
extended_dynamic_state_features.extendedDynamicState = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
auto vkCmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetCullModeEXT");
auto vkCmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetFrontFaceEXT");
auto vkCmdSetPrimitiveTopologyEXT =
(PFN_vkCmdSetPrimitiveTopologyEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetPrimitiveTopologyEXT");
auto vkCmdSetViewportWithCountEXT =
(PFN_vkCmdSetViewportWithCountEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetViewportWithCountEXT");
auto vkCmdSetScissorWithCountEXT =
(PFN_vkCmdSetScissorWithCountEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetScissorWithCountEXT");
auto vkCmdSetDepthTestEnableEXT =
(PFN_vkCmdSetDepthTestEnableEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetDepthTestEnableEXT");
auto vkCmdSetDepthWriteEnableEXT =
(PFN_vkCmdSetDepthWriteEnableEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetDepthWriteEnableEXT");
auto vkCmdSetDepthCompareOpEXT =
(PFN_vkCmdSetDepthCompareOpEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetDepthCompareOpEXT");
auto vkCmdSetDepthBoundsTestEnableEXT =
(PFN_vkCmdSetDepthBoundsTestEnableEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetDepthBoundsTestEnableEXT");
auto vkCmdSetStencilTestEnableEXT =
(PFN_vkCmdSetStencilTestEnableEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetStencilTestEnableEXT");
auto vkCmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetStencilOpEXT");
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
const VkDynamicState dyn_states[] = {
VK_DYNAMIC_STATE_CULL_MODE_EXT, VK_DYNAMIC_STATE_FRONT_FACE_EXT,
VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT,
VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,
VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT,
VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT, VK_DYNAMIC_STATE_STENCIL_OP_EXT,
};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.vp_state_ci_.viewportCount = 0;
pipe.vp_state_ci_.scissorCount = 0;
pipe.InitState();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378");
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyFound();
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
commandBuffer.begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetCullModeEXT-None-03384");
vkCmdSetCullModeEXT(commandBuffer.handle(), VK_CULL_MODE_NONE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349");
vkCmdSetDepthBoundsTestEnableEXT(commandBuffer.handle(), VK_FALSE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDepthCompareOpEXT-None-03353");
vkCmdSetDepthCompareOpEXT(commandBuffer.handle(), VK_COMPARE_OP_NEVER);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDepthTestEnableEXT-None-03352");
vkCmdSetDepthTestEnableEXT(commandBuffer.handle(), VK_FALSE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354");
vkCmdSetDepthWriteEnableEXT(commandBuffer.handle(), VK_FALSE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetFrontFaceEXT-None-03383");
vkCmdSetFrontFaceEXT(commandBuffer.handle(), VK_FRONT_FACE_CLOCKWISE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347");
vkCmdSetPrimitiveTopologyEXT(commandBuffer.handle(), VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-None-03396");
VkRect2D scissor = {{0, 0}, {1, 1}};
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), 1, &scissor);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetStencilOpEXT-None-03351");
vkCmdSetStencilOpEXT(commandBuffer.handle(), VK_STENCIL_FACE_BACK_BIT, VK_STENCIL_OP_ZERO, VK_STENCIL_OP_ZERO,
VK_STENCIL_OP_ZERO, VK_COMPARE_OP_NEVER);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetStencilTestEnableEXT-None-03350");
vkCmdSetStencilTestEnableEXT(commandBuffer.handle(), VK_FALSE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportWithCountEXT-None-03393");
VkViewport viewport = {0, 0, 1, 1, 0.0f, 0.0f};
vkCmdSetViewportWithCountEXT(commandBuffer.handle(), 1, &viewport);
m_errorMonitor->VerifyFound();
commandBuffer.end();
}
TEST_F(VkLayerTest, ValidateExtendedDynamicStateEnabled) {
TEST_DESCRIPTION("Validate VK_EXT_extended_dynamic_state VUs");
uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_1);
if (version < VK_API_VERSION_1_1) {
printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
return;
}
auto extended_dynamic_state_features = lvl_init_struct<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&extended_dynamic_state_features);
vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
if (!extended_dynamic_state_features.extendedDynamicState) {
printf("%s Test requires (unsupported) extendedDynamicState, skipping\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
auto vkCmdSetPrimitiveTopologyEXT =
(PFN_vkCmdSetPrimitiveTopologyEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetPrimitiveTopologyEXT");
auto vkCmdSetViewportWithCountEXT =
(PFN_vkCmdSetViewportWithCountEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetViewportWithCountEXT");
auto vkCmdSetScissorWithCountEXT =
(PFN_vkCmdSetScissorWithCountEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetScissorWithCountEXT");
auto vkCmdBindVertexBuffers2EXT =
(PFN_vkCmdBindVertexBuffers2EXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdBindVertexBuffers2EXT");
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Verify viewportCount and scissorCount are specified as zero.
{
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
const VkDynamicState dyn_states[] = {
VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT,
VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT,
};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03379");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03380");
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyFound();
}
const VkDynamicState dyn_states[] = {
VK_DYNAMIC_STATE_CULL_MODE_EXT, VK_DYNAMIC_STATE_FRONT_FACE_EXT,
VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT,
VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,
VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT,
VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT, VK_DYNAMIC_STATE_STENCIL_OP_EXT,
};
// Verify dupes of every state.
for (size_t i = 0; i < size(dyn_states); ++i) {
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = 2;
VkDynamicState dyn_state_dupes[2] = {dyn_states[i], dyn_states[i]};
dyn_state_ci.pDynamicStates = dyn_state_dupes;
pipe.dyn_state_ci_ = dyn_state_ci;
if (dyn_states[i] == VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) {
pipe.vp_state_ci_.viewportCount = 0;
}
if (dyn_states[i] == VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) {
pipe.vp_state_ci_.scissorCount = 0;
}
pipe.InitState();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442");
pipe.CreateGraphicsPipeline();
m_errorMonitor->VerifyFound();
}
// Verify each vkCmdSet command
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.vp_state_ci_.viewportCount = 0;
pipe.vp_state_ci_.scissorCount = 0;
pipe.vi_ci_.vertexBindingDescriptionCount = 1;
VkVertexInputBindingDescription inputBinding = {0, sizeof(float), VK_VERTEX_INPUT_RATE_VERTEX};
pipe.vi_ci_.pVertexBindingDescriptions = &inputBinding;
pipe.vi_ci_.vertexAttributeDescriptionCount = 1;
VkVertexInputAttributeDescription attribute = {0, 0, VK_FORMAT_R32_SFLOAT, 0};
pipe.vi_ci_.pVertexAttributeDescriptions = &attribute;
pipe.InitState();
pipe.CreateGraphicsPipeline();
VkBufferObj buffer;
buffer.init(*m_device, 16, 0, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
std::vector<VkBuffer> buffers(m_device->props.limits.maxVertexInputBindings + 1ull, buffer.handle());
std::vector<VkDeviceSize> offsets(buffers.size(), 0);
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
commandBuffer.begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-firstBinding-03355");
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), m_device->props.limits.maxVertexInputBindings, 1, buffers.data(),
offsets.data(), 0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-firstBinding-03356");
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, m_device->props.limits.maxVertexInputBindings + 1, buffers.data(),
offsets.data(), 0, 0);
m_errorMonitor->VerifyFound();
{
VkBufferObj bufferWrongUsage;
bufferWrongUsage.init(*m_device, 16, 0, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359");
VkBuffer buffers2[1] = {bufferWrongUsage.handle()};
VkDeviceSize offsets2[1] = {};
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, 1, buffers2, offsets2, 0, 0);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-04111");
m_errorMonitor->SetUnexpectedError("UNASSIGNED-GeneralParameterError-RequiredParameter");
m_errorMonitor->SetUnexpectedError("VUID-vkCmdBindVertexBuffers2EXT-pBuffers-parameter");
VkBuffer buffers2[1] = {VK_NULL_HANDLE};
VkDeviceSize offsets2[1] = {16};
VkDeviceSize strides[1] = {m_device->props.limits.maxVertexInputBindingStride + 1ull};
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, 1, buffers2, offsets2, 0, 0);
m_errorMonitor->VerifyFound();
buffers2[0] = buffers[0];
VkDeviceSize sizes[1] = {16};
// m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-04112");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358");
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, 1, buffers2, offsets2, sizes, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03362");
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, 1, buffers2, offsets2, 0, strides);
m_errorMonitor->VerifyFound();
}
commandBuffer.BeginRenderPass(m_renderPassBeginInfo);
CreatePipelineHelper pipe2(*this);
pipe2.InitInfo();
VkPipelineDynamicStateCreateInfo dyn_state_ci2 = {};
dyn_state_ci2.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci2.dynamicStateCount = 1;
VkDynamicState dynamic_state2 = VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT;
dyn_state_ci2.pDynamicStates = &dynamic_state2;
pipe2.dyn_state_ci_ = dyn_state_ci2;
pipe2.vp_state_ci_.viewportCount = 0;
pipe2.InitState();
pipe2.CreateGraphicsPipeline();
vk::CmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe2.pipeline_);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-viewportCount-03417");
vk::CmdDraw(commandBuffer.handle(), 1, 1, 0, 0);
m_errorMonitor->VerifyFound();
CreatePipelineHelper pipe3(*this);
pipe3.InitInfo();
VkPipelineDynamicStateCreateInfo dyn_state_ci3 = {};
dyn_state_ci3.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci3.dynamicStateCount = 1;
VkDynamicState dynamic_state3 = VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT;
dyn_state_ci3.pDynamicStates = &dynamic_state3;
pipe3.dyn_state_ci_ = dyn_state_ci3;
pipe3.vp_state_ci_.scissorCount = 0;
pipe3.InitState();
pipe3.CreateGraphicsPipeline();
vk::CmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe3.pipeline_);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-scissorCount-03418");
vk::CmdDraw(commandBuffer.handle(), 1, 1, 0, 0);
m_errorMonitor->VerifyFound();
vk::CmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
VkDeviceSize strides[1] = {0};
vkCmdSetPrimitiveTopologyEXT(commandBuffer.handle(), VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, 1, buffers.data(), offsets.data(), 0, strides);
VkRect2D scissor = {{0, 0}, {1, 1}};
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), 1, &scissor);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03363");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-viewportCount-03419");
vk::CmdDraw(commandBuffer.handle(), 1, 1, 0, 0);
m_errorMonitor->VerifyFound();
VkViewport viewport = {0, 0, 1, 1, 0.0f, 0.0f};
vkCmdSetViewportWithCountEXT(commandBuffer.handle(), 1, &viewport);
strides[0] = 4;
vkCmdBindVertexBuffers2EXT(commandBuffer.handle(), 0, 1, buffers.data(), offsets.data(), 0, strides);
vkCmdSetPrimitiveTopologyEXT(commandBuffer.handle(), VK_PRIMITIVE_TOPOLOGY_LINE_LIST);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-primitiveTopology-03420");
vk::CmdDraw(commandBuffer.handle(), 1, 1, 0, 0);
m_errorMonitor->VerifyFound();
vk::CmdEndRenderPass(commandBuffer.handle());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportWithCountEXT-viewportCount-03394");
m_errorMonitor->SetUnexpectedError("VUID-vkCmdSetViewportWithCountEXT-viewportCount-arraylength");
VkViewport viewport2 = {
0, 0, 1, 1, 0.0f, 0.0f,
};
vkCmdSetViewportWithCountEXT(commandBuffer.handle(), 0, &viewport2);
m_errorMonitor->VerifyFound();
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-offset-03400");
VkRect2D scissor2 = {{1, 0}, {INT32_MAX, 16}};
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), 1, &scissor2);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-offset-03401");
VkRect2D scissor2 = {{0, 1}, {16, INT32_MAX}};
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), 1, &scissor2);
m_errorMonitor->VerifyFound();
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-scissorCount-03397");
m_errorMonitor->SetUnexpectedError("VUID-vkCmdSetScissorWithCountEXT-scissorCount-arraylength");
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), 0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-x-03399");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-x-03399");
VkRect2D scissor3 = {{-1, -1}, {0, 0}};
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), 1, &scissor3);
m_errorMonitor->VerifyFound();
commandBuffer.end();
}
TEST_F(VkLayerTest, ValidateExtendedDynamicStateEnabledNoMultiview) {
TEST_DESCRIPTION("Validate VK_EXT_extended_dynamic_state VUs");
uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_1);
if (version < VK_API_VERSION_1_1) {
printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
return;
}
auto extended_dynamic_state_features = lvl_init_struct<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2>(&extended_dynamic_state_features);
vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
if (!extended_dynamic_state_features.extendedDynamicState) {
printf("%s Test requires (unsupported) extendedDynamicState, skipping\n", kSkipPrefix);
return;
}
features2.features.multiViewport = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
auto vkCmdSetViewportWithCountEXT =
(PFN_vkCmdSetViewportWithCountEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetViewportWithCountEXT");
auto vkCmdSetScissorWithCountEXT =
(PFN_vkCmdSetScissorWithCountEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetScissorWithCountEXT");
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
commandBuffer.begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportWithCountEXT-viewportCount-03395");
VkViewport viewport = {0, 0, 1, 1, 0.0f, 0.0f};
VkViewport viewports[] = {viewport, viewport};
vkCmdSetViewportWithCountEXT(commandBuffer.handle(), size(viewports), viewports);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetScissorWithCountEXT-scissorCount-03398");
VkRect2D scissor = {{0, 0}, {1, 1}};
VkRect2D scissors[] = {scissor, scissor};
vkCmdSetScissorWithCountEXT(commandBuffer.handle(), size(scissors), scissors);
m_errorMonitor->VerifyFound();
commandBuffer.end();
}
| 1 | 14,169 | I am now confused here, the spec says > If format is VK_FORMAT_UNDEFINED, all members of samplerYcbcrConversionComponents must be the identity swizzle. The spec also says > samplerYcbcrConversionComponents is the component swizzle that **should** be used in VkSamplerYcbcrConversionCreateInfo. so you are allowed to set the swizzle to anything I want, so I guess this is valid usage here... but now I am more curious if I have an external format does it not have to follow any component swizzle rules and can be anything regardless of the `ycbcrModel`? (I guess really a question for @critsec and is not a blocking concern for the scope of this PR) | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -47,8 +47,8 @@ func handleSequelProCommand(appLocation string) (string, error) {
return "", err
}
- if app.SiteStatus() != "running" {
- return "", errors.New("app not running locally. Try `ddev start`")
+ if app.SiteStatus() != platform.SiteRunning {
+ return "", errors.New("App not running locally. Try `ddev start`")
}
db, err := app.FindContainerByType("db") | 1 | package cmd
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"runtime"
"github.com/drud/ddev/pkg/appports"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/plugins/platform"
"github.com/drud/ddev/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// SequelproLoc is where we expect to find the sequel pro.app
// It's global so it can be mocked in testing.
var SequelproLoc = "/Applications/sequel pro.app"
// localDevSequelproCmd represents the sequelpro command
var localDevSequelproCmd = &cobra.Command{
Use: "sequelpro",
Short: "Easily connect local site to sequelpro",
Long: `A helper command for easily using sequelpro (OSX database browser) with a ddev app that has been initialized locally.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 0 {
log.Fatalf("invalid arguments to sequelpro command: %v", args)
}
out, err := handleSequelProCommand(SequelproLoc)
if err != nil {
log.Fatalf("Could not run sequelpro command: %s", err)
}
util.Success(out)
},
}
// handleSequelProCommand() is the "real" handler for the real command
func handleSequelProCommand(appLocation string) (string, error) {
app, err := getActiveApp("")
if err != nil {
return "", err
}
if app.SiteStatus() != "running" {
return "", errors.New("app not running locally. Try `ddev start`")
}
db, err := app.FindContainerByType("db")
if err != nil {
return "", err
}
dbPrivatePort, err := strconv.ParseInt(appports.GetPort("db"), 10, 64)
if err != nil {
return "", err
}
dbPublishPort := fmt.Sprint(dockerutil.GetPublishedPort(dbPrivatePort, db))
tmpFilePath := filepath.Join(app.AppRoot(), ".ddev/sequelpro.spf")
tmpFile, err := os.Create(tmpFilePath)
if err != nil {
log.Fatalln(err)
}
defer util.CheckClose(tmpFile)
_, err = tmpFile.WriteString(fmt.Sprintf(
platform.SequelproTemplate,
"db", //dbname
"127.0.0.1", //host
app.HostName(), //connection name
"db", // dbpass
dbPublishPort, // port
"db", //dbuser
))
util.CheckErr(err)
err = exec.Command("open", tmpFilePath).Run()
if err != nil {
return "", err
}
return "sequelpro command finished successfully!", nil
}
// dummyDevSequelproCmd represents the "not available" sequelpro command
var dummyDevSequelproCmd = &cobra.Command{
Use: "sequelpro",
Short: "This command is not available since sequel pro.app is not installed",
Long: `Where installed, "ddev sequelpro" launches the sequel pro database browser`,
Run: func(cmd *cobra.Command, args []string) {
util.Failed("The sequelpro command is not available because sequel pro.app is not detected on your workstation")
},
}
// init installs the real command if it's available, otherwise dummy command (if on OSX), otherwise no command
func init() {
switch {
case detectSequelpro():
RootCmd.AddCommand(localDevSequelproCmd)
case runtime.GOOS == "darwin":
RootCmd.AddCommand(dummyDevSequelproCmd)
}
}
// detectSequelpro looks for the sequel pro app in /Applications; returns true if found
func detectSequelpro() bool {
if _, err := os.Stat(SequelproLoc); err == nil {
return true
}
return false
}
| 1 | 11,413 | I'd say the error should be an error, not instructions to the user. So error would be something like "site should be running and is not" | drud-ddev | php |
@@ -894,8 +894,11 @@ class SNSTest(unittest.TestCase):
return queue_name, queue_arn, queue_url
def test_publish_sms_endpoint(self):
+ # Clean posible previous sms messages
+ sns_backend = SNSBackend.get()
+ sns_backend.sms_messages = []
+
def check_messages():
- sns_backend = SNSBackend.get()
self.assertEqual(len(list_of_contacts), len(sns_backend.sms_messages))
list_of_contacts = ["+10123456789", "+10000000000", "+19876543210"] | 1 | # -*- coding: utf-8 -*-
import json
import os
import time
import unittest
import pytest
import requests
from botocore.exceptions import ClientError
from localstack import config
from localstack.config import external_service_url
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services.generic_proxy import ProxyListener
from localstack.services.infra import start_proxy
from localstack.services.install import SQS_BACKEND_IMPL
from localstack.services.sns.sns_listener import SNSBackend
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
get_free_tcp_port,
get_service_protocol,
retry,
short_uid,
to_str,
wait_for_port_open,
)
from localstack.utils.testutil import check_expected_lambda_log_events_length
from .lambdas import lambda_integration
from .test_lambda import LAMBDA_RUNTIME_PYTHON36, TEST_LAMBDA_LIBS, TEST_LAMBDA_PYTHON
TEST_TOPIC_NAME = "TestTopic_snsTest"
TEST_QUEUE_NAME = "TestQueue_snsTest"
TEST_QUEUE_DLQ_NAME = "TestQueue_DLQ_snsTest"
TEST_TOPIC_NAME_2 = "topic-test-2"
PUBLICATION_TIMEOUT = 0.500
PUBLICATION_RETRIES = 4
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_ECHO_FILE = os.path.join(THIS_FOLDER, "lambdas", "lambda_echo.py")
class SNSTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sqs_client = aws_stack.connect_to_service("sqs")
cls.sns_client = aws_stack.connect_to_service("sns")
cls.topic_arn = cls.sns_client.create_topic(Name=TEST_TOPIC_NAME)["TopicArn"]
cls.queue_url = cls.sqs_client.create_queue(QueueName=TEST_QUEUE_NAME)["QueueUrl"]
cls.dlq_url = cls.sqs_client.create_queue(QueueName=TEST_QUEUE_DLQ_NAME)["QueueUrl"]
@classmethod
def tearDownClass(cls):
cls.sqs_client.delete_queue(QueueUrl=cls.queue_url)
cls.sqs_client.delete_queue(QueueUrl=cls.dlq_url)
cls.sns_client.delete_topic(TopicArn=cls.topic_arn)
def test_publish_unicode_chars(self):
# connect an SNS topic to a new SQS queue
_, queue_arn, queue_url = self._create_queue()
self.sns_client.subscribe(TopicArn=self.topic_arn, Protocol="sqs", Endpoint=queue_arn)
# publish message to SNS, receive it from SQS, assert that messages are equal
message = 'ö§a1"_!?,. £$-'
self.sns_client.publish(TopicArn=self.topic_arn, Message=message)
def check_message():
msgs = self.sqs_client.receive_message(QueueUrl=queue_url)
msg_received = msgs["Messages"][0]
msg_received = json.loads(to_str(msg_received["Body"]))
msg_received = msg_received["Message"]
self.assertEqual(message, msg_received)
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
def test_subscribe_http_endpoint(self):
# create HTTP endpoint and connect it to SNS topic
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append((json.loads(to_str(data)), headers))
return 200
records = []
local_port = get_free_tcp_port()
proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
wait_for_port_open(local_port)
queue_arn = "%s://localhost:%s" % (get_service_protocol(), local_port)
self.sns_client.subscribe(TopicArn=self.topic_arn, Protocol="http", Endpoint=queue_arn)
def received():
self.assertEqual(records[0][0]["Type"], "SubscriptionConfirmation")
self.assertEqual(records[0][1]["x-amz-sns-message-type"], "SubscriptionConfirmation")
token = records[0][0]["Token"]
subscribe_url = records[0][0]["SubscribeURL"]
self.assertEqual(
subscribe_url,
"%s/?Action=ConfirmSubscription&TopicArn=%s&Token=%s"
% (external_service_url("sns"), self.topic_arn, token),
)
self.assertIn("Signature", records[0][0])
self.assertIn("SigningCertURL", records[0][0])
retry(received, retries=5, sleep=1)
proxy.stop()
def test_attribute_raw_subscribe(self):
# create SNS topic and connect it to an SQS queue
queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME)
self.sns_client.subscribe(
TopicArn=self.topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"RawMessageDelivery": "true"},
)
# fetch subscription information
subscription_list = self.sns_client.list_subscriptions()
subscription_arn = ""
for subscription in subscription_list["Subscriptions"]:
if subscription["TopicArn"] == self.topic_arn:
subscription_arn = subscription["SubscriptionArn"]
actual_attributes = self.sns_client.get_subscription_attributes(
SubscriptionArn=subscription_arn
)["Attributes"]
# assert the attributes are well set
self.assertTrue(actual_attributes["RawMessageDelivery"])
# publish message to SNS, receive it from SQS, assert that messages are equal and that they are Raw
message = "This is a test message"
binary_attribute = b"\x02\x03\x04"
# extending this test case to test support for binary message attribute data
# https://github.com/localstack/localstack/issues/2432
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={"store": {"DataType": "Binary", "BinaryValue": binary_attribute}},
)
def check_message():
msgs = self.sqs_client.receive_message(
QueueUrl=self.queue_url, MessageAttributeNames=["All"]
)
msg_received = msgs["Messages"][0]
self.assertEqual(message, msg_received["Body"])
self.assertEqual(
binary_attribute,
msg_received["MessageAttributes"]["store"]["BinaryValue"],
)
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_filter_policy(self):
# connect SNS topic to an SQS queue
queue_name, queue_arn, queue_url = self._create_queue()
filter_policy = {"attr1": [{"numeric": [">", 0, "<=", 100]}]}
self.sns_client.subscribe(
TopicArn=self.topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
# get number of messages
num_msgs_0 = len(self.sqs_client.receive_message(QueueUrl=queue_url).get("Messages", []))
# publish message that satisfies the filter policy, assert that message is received
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "99"}},
)
def check_message():
num_msgs_1 = len(
self.sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
self.assertEqual(num_msgs_1, num_msgs_0 + 1)
return num_msgs_1
num_msgs_1 = retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# publish message that does not satisfy the filter policy, assert that message is not received
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "111"}},
)
def check_message2():
num_msgs_2 = len(
self.sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
self.assertEqual(num_msgs_2, num_msgs_1)
return num_msgs_2
retry(check_message2, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
def test_exists_filter_policy(self):
# connect SNS topic to an SQS queue
queue_name, queue_arn, queue_url = self._create_queue()
filter_policy = {"store": [{"exists": True}]}
def do_subscribe(self, filter_policy, queue_arn):
self.sns_client.subscribe(
TopicArn=self.topic_arn,
Protocol="sqs",
Endpoint=queue_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
do_subscribe(self, filter_policy, queue_arn)
# get number of messages
num_msgs_0 = len(self.sqs_client.receive_message(QueueUrl=queue_url).get("Messages", []))
# publish message that satisfies the filter policy, assert that message is received
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={
"store": {"DataType": "Number", "StringValue": "99"},
"def": {"DataType": "Number", "StringValue": "99"},
},
)
def check_message1():
num_msgs_1 = len(
self.sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
self.assertEqual(num_msgs_1, num_msgs_0 + 1)
return num_msgs_1
num_msgs_1 = retry(check_message1, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# publish message that does not satisfy the filter policy, assert that message is not received
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "111"}},
)
def check_message2():
num_msgs_2 = len(
self.sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)["Messages"]
)
self.assertEqual(num_msgs_2, num_msgs_1)
return num_msgs_2
retry(check_message2, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# test with exist operator set to false.
queue_arn = aws_stack.sqs_queue_arn(TEST_QUEUE_NAME)
filter_policy = {"store": [{"exists": False}]}
do_subscribe(self, filter_policy, queue_arn)
# get number of messages
num_msgs_0 = len(
self.sqs_client.receive_message(QueueUrl=self.queue_url).get("Messages", [])
)
# publish message with the attribute and see if its getting filtered.
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={
"store": {"DataType": "Number", "StringValue": "99"},
"def": {"DataType": "Number", "StringValue": "99"},
},
)
def check_message():
num_msgs_1 = len(
self.sqs_client.receive_message(QueueUrl=self.queue_url, VisibilityTimeout=0).get(
"Messages", []
)
)
self.assertEqual(num_msgs_1, num_msgs_0)
return num_msgs_1
num_msgs_1 = retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# publish message that without the attribute and see if its getting filtered.
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "111"}},
)
def check_message3():
num_msgs_2 = len(
self.sqs_client.receive_message(QueueUrl=self.queue_url, VisibilityTimeout=0).get(
"Messages", []
)
)
self.assertEqual(num_msgs_2, num_msgs_1)
return num_msgs_2
retry(check_message3, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
def test_subscribe_sqs_queue(self):
_, queue_arn, queue_url = self._create_queue()
# publish message
subscription = self._publish_sns_message_with_attrs(queue_arn, "sqs")
# assert that message is received
def check_message():
messages = self.sqs_client.receive_message(QueueUrl=queue_url, VisibilityTimeout=0)[
"Messages"
]
self.assertEqual(
json.loads(messages[0]["Body"])["MessageAttributes"]["attr1"]["Value"],
"99.12",
)
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self.sns_client.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
def test_subscribe_platform_endpoint(self):
sns = self.sns_client
sns_backend = SNSBackend.get()
app_arn = sns.create_platform_application(Name="app1", Platform="p1", Attributes={})[
"PlatformApplicationArn"
]
platform_arn = sns.create_platform_endpoint(
PlatformApplicationArn=app_arn, Token="token_1"
)["EndpointArn"]
subscription = self._publish_sns_message_with_attrs(platform_arn, "application")
# assert that message has been received
def check_message():
self.assertGreater(len(sns_backend.platform_endpoint_messages[platform_arn]), 0)
retry(check_message, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
# clean up
sns.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
sns.delete_endpoint(EndpointArn=platform_arn)
sns.delete_platform_application(PlatformApplicationArn=app_arn)
def _publish_sns_message_with_attrs(self, endpoint_arn, protocol):
# create subscription with filter policy
filter_policy = {"attr1": [{"numeric": [">", 0, "<=", 100]}]}
subscription = self.sns_client.subscribe(
TopicArn=self.topic_arn,
Protocol=protocol,
Endpoint=endpoint_arn,
Attributes={"FilterPolicy": json.dumps(filter_policy)},
)
# publish message that satisfies the filter policy
message = "This is a test message"
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=message,
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "99.12"}},
)
time.sleep(PUBLICATION_TIMEOUT)
return subscription
def test_unknown_topic_publish(self):
fake_arn = "arn:aws:sns:us-east-1:123456789012:i_dont_exist"
message = "This is a test message"
try:
self.sns_client.publish(TopicArn=fake_arn, Message=message)
self.fail("This call should not be successful as the topic does not exist")
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "NotFound")
self.assertEqual(e.response["Error"]["Message"], "Topic does not exist")
self.assertEqual(e.response["ResponseMetadata"]["HTTPStatusCode"], 404)
def test_publish_sms(self):
response = self.sns_client.publish(PhoneNumber="+33000000000", Message="This is a SMS")
self.assertTrue("MessageId" in response)
self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200)
def test_publish_target(self):
response = self.sns_client.publish(
TargetArn="arn:aws:sns:us-east-1:000000000000:endpoint/APNS/abcdef/0f7d5971-aa8b-4bd5-b585-0826e9f93a66",
Message="This is a push notification",
)
self.assertTrue("MessageId" in response)
self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200)
def test_tags(self):
self.sns_client.tag_resource(
ResourceArn=self.topic_arn,
Tags=[
{"Key": "123", "Value": "abc"},
{"Key": "456", "Value": "def"},
{"Key": "456", "Value": "def"},
],
)
tags = self.sns_client.list_tags_for_resource(ResourceArn=self.topic_arn)
distinct_tags = [
tag for idx, tag in enumerate(tags["Tags"]) if tag not in tags["Tags"][:idx]
]
# test for duplicate tags
self.assertEqual(len(tags["Tags"]), len(distinct_tags))
self.assertEqual(len(tags["Tags"]), 2)
self.assertEqual(tags["Tags"][0]["Key"], "123")
self.assertEqual(tags["Tags"][0]["Value"], "abc")
self.assertEqual(tags["Tags"][1]["Key"], "456")
self.assertEqual(tags["Tags"][1]["Value"], "def")
self.sns_client.untag_resource(ResourceArn=self.topic_arn, TagKeys=["123"])
tags = self.sns_client.list_tags_for_resource(ResourceArn=self.topic_arn)
self.assertEqual(len(tags["Tags"]), 1)
self.assertEqual(tags["Tags"][0]["Key"], "456")
self.assertEqual(tags["Tags"][0]["Value"], "def")
self.sns_client.tag_resource(
ResourceArn=self.topic_arn, Tags=[{"Key": "456", "Value": "pqr"}]
)
tags = self.sns_client.list_tags_for_resource(ResourceArn=self.topic_arn)
self.assertEqual(len(tags["Tags"]), 1)
self.assertEqual(tags["Tags"][0]["Key"], "456")
self.assertEqual(tags["Tags"][0]["Value"], "pqr")
def test_topic_subscription(self):
subscription = self.sns_client.subscribe(
TopicArn=self.topic_arn, Protocol="email", Endpoint="[email protected]"
)
sns_backend = SNSBackend.get()
def check_subscription():
subscription_arn = subscription["SubscriptionArn"]
subscription_obj = sns_backend.subscription_status[subscription_arn]
self.assertEqual(subscription_obj["Status"], "Not Subscribed")
_token = subscription_obj["Token"]
self.sns_client.confirm_subscription(TopicArn=self.topic_arn, Token=_token)
self.assertEqual(subscription_obj["Status"], "Subscribed")
retry(check_subscription, retries=PUBLICATION_RETRIES, sleep=PUBLICATION_TIMEOUT)
def test_dead_letter_queue(self):
lambda_name = "test-%s" % short_uid()
lambda_arn = aws_stack.lambda_function_arn(lambda_name)
topic_name = "test-%s" % short_uid()
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
queue_name = "test-%s" % short_uid()
queue_url = self.sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
queue_arn = aws_stack.sqs_queue_arn(queue_name)
testutil.create_lambda_function(
func_name=lambda_name,
handler_file=TEST_LAMBDA_PYTHON,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON36,
DeadLetterConfig={"TargetArn": queue_arn},
)
self.sns_client.subscribe(TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn)
payload = {
lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1,
}
self.sns_client.publish(TopicArn=topic_arn, Message=json.dumps(payload))
def receive_dlq():
result = self.sqs_client.receive_message(
QueueUrl=queue_url, MessageAttributeNames=["All"]
)
msg_attrs = result["Messages"][0]["MessageAttributes"]
self.assertGreater(len(result["Messages"]), 0)
self.assertIn("RequestID", msg_attrs)
self.assertIn("ErrorCode", msg_attrs)
self.assertIn("ErrorMessage", msg_attrs)
retry(receive_dlq, retries=8, sleep=2)
def unsubscribe_all_from_sns(self):
for subscription_arn in self.sns_client.list_subscriptions()["Subscriptions"]:
self.sns_client.unsubscribe(SubscriptionArn=subscription_arn["SubscriptionArn"])
def test_redrive_policy_http_subscription(self):
self.unsubscribe_all_from_sns()
# create HTTP endpoint and connect it to SNS topic
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append((json.loads(to_str(data)), headers))
return 200
records = []
local_port = get_free_tcp_port()
proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
wait_for_port_open(local_port)
http_endpoint = "%s://localhost:%s" % (get_service_protocol(), local_port)
subscription = self.sns_client.subscribe(
TopicArn=self.topic_arn, Protocol="http", Endpoint=http_endpoint
)
self.sns_client.set_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="RedrivePolicy",
AttributeValue=json.dumps(
{"deadLetterTargetArn": aws_stack.sqs_queue_arn(TEST_QUEUE_DLQ_NAME)}
),
)
proxy.stop()
# for some reason, it takes a long time to stop the proxy thread -> TODO investigate
time.sleep(5)
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=json.dumps({"message": "test_redrive_policy"}),
)
def receive_dlq():
result = self.sqs_client.receive_message(
QueueUrl=self.dlq_url, MessageAttributeNames=["All"]
)
self.assertGreater(len(result["Messages"]), 0)
self.assertEqual(
json.loads(json.loads(result["Messages"][0]["Body"])["Message"][0])["message"],
"test_redrive_policy",
)
retry(receive_dlq, retries=7, sleep=2.5)
def test_redrive_policy_lambda_subscription(self):
self.unsubscribe_all_from_sns()
lambda_name = "test-%s" % short_uid()
lambda_arn = aws_stack.lambda_function_arn(lambda_name)
testutil.create_lambda_function(
func_name=lambda_name,
libs=TEST_LAMBDA_LIBS,
handler_file=TEST_LAMBDA_PYTHON,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
subscription = self.sns_client.subscribe(
TopicArn=self.topic_arn, Protocol="lambda", Endpoint=lambda_arn
)
self.sns_client.set_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="RedrivePolicy",
AttributeValue=json.dumps(
{"deadLetterTargetArn": aws_stack.sqs_queue_arn(TEST_QUEUE_DLQ_NAME)}
),
)
testutil.delete_lambda_function(lambda_name)
self.sns_client.publish(
TopicArn=self.topic_arn,
Message=json.dumps({"message": "test_redrive_policy"}),
)
def receive_dlq():
result = self.sqs_client.receive_message(
QueueUrl=self.dlq_url, MessageAttributeNames=["All"]
)
self.assertGreater(len(result["Messages"]), 0)
self.assertEqual(
json.loads(json.loads(result["Messages"][0]["Body"])["Message"][0])["message"],
"test_redrive_policy",
)
retry(receive_dlq, retries=10, sleep=2)
def test_redrive_policy_queue_subscription(self):
self.unsubscribe_all_from_sns()
topic_arn = self.sns_client.create_topic(Name="topic-%s" % short_uid())["TopicArn"]
invalid_queue_arn = aws_stack.sqs_queue_arn("invalid_queue")
# subscribe with an invalid queue ARN, to trigger event on DLQ below
subscription = self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="sqs", Endpoint=invalid_queue_arn
)
self.sns_client.set_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"],
AttributeName="RedrivePolicy",
AttributeValue=json.dumps(
{"deadLetterTargetArn": aws_stack.sqs_queue_arn(TEST_QUEUE_DLQ_NAME)}
),
)
self.sns_client.publish(
TopicArn=topic_arn, Message=json.dumps({"message": "test_redrive_policy"})
)
def receive_dlq():
result = self.sqs_client.receive_message(
QueueUrl=self.dlq_url, MessageAttributeNames=["All"]
)
self.assertGreater(len(result["Messages"]), 0)
self.assertEqual(
json.loads(json.loads(result["Messages"][0]["Body"])["Message"][0])["message"],
"test_redrive_policy",
)
retry(receive_dlq, retries=10, sleep=2)
def test_publish_with_empty_subject(self):
topic_arn = self.sns_client.create_topic(Name=TEST_TOPIC_NAME_2)["TopicArn"]
# Publish without subject
rs = self.sns_client.publish(
TopicArn=topic_arn, Message=json.dumps({"message": "test_publish"})
)
self.assertEqual(rs["ResponseMetadata"]["HTTPStatusCode"], 200)
try:
# Publish with empty subject
self.sns_client.publish(
TopicArn=topic_arn,
Subject="",
Message=json.dumps({"message": "test_publish"}),
)
self.fail("This call should not be successful as the subject is empty")
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "InvalidParameter")
# clean up
self.sns_client.delete_topic(TopicArn=topic_arn)
def test_create_topic_test_arn(self):
response = self.sns_client.create_topic(Name=TEST_TOPIC_NAME)
topic_arn_params = response["TopicArn"].split(":")
self.assertEqual(topic_arn_params[4], TEST_AWS_ACCOUNT_ID)
self.assertEqual(topic_arn_params[5], TEST_TOPIC_NAME)
def test_publish_message_by_target_arn(self):
self.unsubscribe_all_from_sns()
topic_name = "queue-{}".format(short_uid())
func_name = "lambda-%s" % short_uid()
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
testutil.create_lambda_function(
handler_file=TEST_LAMBDA_ECHO_FILE,
func_name=func_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
lambda_arn = aws_stack.lambda_function_arn(func_name)
subscription_arn = self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="lambda", Endpoint=lambda_arn
)["SubscriptionArn"]
self.sns_client.publish(
TopicArn=topic_arn, Message="test_message_1", Subject="test subject"
)
# Lambda invoked 1 time
events = retry(
check_expected_lambda_log_events_length,
retries=3,
sleep=1,
function_name=func_name,
expected_length=1,
)
message = events[0]["Records"][0]
self.assertEqual(message["EventSubscriptionArn"], subscription_arn)
self.sns_client.publish(
TargetArn=topic_arn, Message="test_message_2", Subject="test subject"
)
events = retry(
check_expected_lambda_log_events_length,
retries=3,
sleep=1,
function_name=func_name,
expected_length=2,
)
# Lambda invoked 1 more time
self.assertEqual(len(events), 2)
for event in events:
message = event["Records"][0]
self.assertEqual(message["EventSubscriptionArn"], subscription_arn)
# clean up
self.sns_client.delete_topic(TopicArn=topic_arn)
lambda_client = aws_stack.connect_to_service("lambda")
lambda_client.delete_function(FunctionName=func_name)
def test_publish_message_after_subscribe_topic(self):
self.unsubscribe_all_from_sns()
topic_name = "queue-{}".format(short_uid())
queue_name = "test-%s" % short_uid()
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
queue_url = self.sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
queue_arn = aws_stack.sqs_queue_arn(queue_name)
rs = self.sns_client.publish(
TopicArn=topic_arn, Subject="test subject", Message="test_message_1"
)
self.assertEqual(rs["ResponseMetadata"]["HTTPStatusCode"], 200)
self.sns_client.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
message_subject = "sqs subject"
message_body = "test_message_2"
rs = self.sns_client.publish(
TopicArn=topic_arn, Subject=message_subject, Message=message_body
)
self.assertEqual(rs["ResponseMetadata"]["HTTPStatusCode"], 200)
message_id = rs["MessageId"]
def get_message(q_url):
resp = self.sqs_client.receive_message(QueueUrl=q_url)
return json.loads(resp["Messages"][0]["Body"])
message = retry(get_message, retries=3, sleep=2, q_url=queue_url)
self.assertEqual(message["MessageId"], message_id)
self.assertEqual(message["Subject"], message_subject)
self.assertEqual(message["Message"], message_body)
# clean up
self.sns_client.delete_topic(TopicArn=topic_arn)
self.sqs_client.delete_queue(QueueUrl=queue_url)
def test_create_duplicate_topic_with_different_tags(self):
topic_name = "test-%s" % short_uid()
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
with self.assertRaises(ClientError) as ctx:
self.sns_client.create_topic(Name=topic_name, Tags=[{"Key": "456", "Value": "pqr"}])
self.fail(
"This call should not be successful as the topic already exists with different tags"
)
e = ctx.exception
self.assertEqual(e.response["Error"]["Code"], "InvalidParameter")
self.assertEqual(e.response["Error"]["Message"], "Topic already exists with different tags")
self.assertEqual(e.response["ResponseMetadata"]["HTTPStatusCode"], 400)
# clean up
self.sns_client.delete_topic(TopicArn=topic_arn)
def test_create_duplicate_topic_check_idempotentness(self):
topic_name = "test-%s" % short_uid()
tags = [{"Key": "a", "Value": "1"}, {"Key": "b", "Value": "2"}]
kwargs = [
{"Tags": tags}, # to create topic with two tags
{"Tags": tags}, # to create the same topic again with same tags
{"Tags": [tags[0]]}, # to create the same topic again with one of the tags from above
{"Tags": []}, # to create the same topic again with no tags
]
responses = []
for arg in kwargs:
responses.append(self.sns_client.create_topic(Name=topic_name, **arg))
# assert TopicArn is returned by all the above create_topic calls
for i in range(len(responses)):
self.assertIn("TopicArn", responses[i])
# clean up
self.sns_client.delete_topic(TopicArn=responses[0]["TopicArn"])
def test_create_platform_endpoint_check_idempotentness(self):
response = self.sns_client.create_platform_application(
Name="test-%s" % short_uid(),
Platform="GCM",
Attributes={"PlatformCredential": "123"},
)
kwargs_list = [
{"Token": "test1", "CustomUserData": "test-data"},
{"Token": "test1", "CustomUserData": "test-data"},
{"Token": "test1"},
{"Token": "test1"},
]
platform_arn = response["PlatformApplicationArn"]
responses = []
for kwargs in kwargs_list:
responses.append(
self.sns_client.create_platform_endpoint(
PlatformApplicationArn=platform_arn, **kwargs
)
)
# Assert endpointarn is returned in every call create platform call
for i in range(len(responses)):
self.assertIn("EndpointArn", responses[i])
endpoint_arn = responses[0]["EndpointArn"]
# clean up
self.sns_client.delete_endpoint(EndpointArn=endpoint_arn)
self.sns_client.delete_platform_application(PlatformApplicationArn=platform_arn)
def test_publish_by_path_parameters(self):
topic_name = "topic-{}".format(short_uid())
queue_name = "queue-{}".format(short_uid())
message = "test message {}".format(short_uid())
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
base_url = "{}://{}:{}".format(
get_service_protocol(), config.LOCALSTACK_HOSTNAME, config.PORT_SNS
)
path = "Action=Publish&Version=2010-03-31&TopicArn={}&Message={}".format(topic_arn, message)
queue_url = self.sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
queue_arn = aws_stack.sqs_queue_arn(queue_name)
self.sns_client.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn)
r = requests.post(
url="{}/?{}".format(base_url, path),
headers=aws_stack.mock_aws_request_headers("sns"),
)
self.assertEqual(r.status_code, 200)
def get_notification(q_url):
resp = self.sqs_client.receive_message(QueueUrl=q_url)
return json.loads(resp["Messages"][0]["Body"])
notification = retry(get_notification, retries=3, sleep=2, q_url=queue_url)
self.assertEqual(notification["TopicArn"], topic_arn)
self.assertEqual(notification["Message"], message)
# clean up
self.sns_client.delete_topic(TopicArn=topic_arn)
self.sqs_client.delete_queue(QueueUrl=queue_url)
def test_multiple_subscriptions_http_endpoint(self):
self.unsubscribe_all_from_sns()
# create HTTP endpoint and connect it to SNS topic
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append((json.loads(to_str(data)), headers))
return 429
number_of_subscriptions = 4
records = []
proxies = []
for _ in range(number_of_subscriptions):
local_port = get_free_tcp_port()
proxies.append(
start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
)
wait_for_port_open(local_port)
http_endpoint = "%s://localhost:%s" % (get_service_protocol(), local_port)
self.sns_client.subscribe(
TopicArn=self.topic_arn, Protocol="http", Endpoint=http_endpoint
)
# fetch subscription information
subscription_list = self.sns_client.list_subscriptions()
self.assertEqual(subscription_list["ResponseMetadata"]["HTTPStatusCode"], 200)
self.assertEqual(len(subscription_list["Subscriptions"]), number_of_subscriptions)
self.assertEqual(number_of_subscriptions, len(records))
for proxy in proxies:
proxy.stop()
def _create_queue(self):
queue_name = "queue-%s" % short_uid()
queue_arn = aws_stack.sqs_queue_arn(queue_name)
queue_url = self.sqs_client.create_queue(QueueName=queue_name)["QueueUrl"]
return queue_name, queue_arn, queue_url
def test_publish_sms_endpoint(self):
def check_messages():
sns_backend = SNSBackend.get()
self.assertEqual(len(list_of_contacts), len(sns_backend.sms_messages))
list_of_contacts = ["+10123456789", "+10000000000", "+19876543210"]
message = "Good news everyone!"
# Add SMS Subscribers
for number in list_of_contacts:
self.sns_client.subscribe(TopicArn=self.topic_arn, Protocol="sms", Endpoint=number)
# Publish a message.
self.sns_client.publish(Message=message, TopicArn=self.topic_arn)
retry(check_messages, retries=3, sleep=0.5)
def test_publish_sqs_from_sns(self):
topic = self.sns_client.create_topic(Name="test_topic3")
topic_arn = topic["TopicArn"]
test_queue = self.sqs_client.create_queue(QueueName="test_queue3")
queue_url = test_queue["QueueUrl"]
subscription_arn = self.sns_client.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=queue_url,
Attributes={"RawMessageDelivery": "true"},
)["SubscriptionArn"]
self.sns_client.publish(
TargetArn=topic_arn,
Message="Test msg",
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "99.12"}},
)
def get_message_with_attributes(queue_url):
response = self.sqs_client.receive_message(
QueueUrl=queue_url, MessageAttributeNames=["All"]
)
self.assertEqual(
response["Messages"][0]["MessageAttributes"],
{"attr1": {"DataType": "Number", "StringValue": "99.12"}},
)
self.sqs_client.delete_message(
QueueUrl=queue_url, ReceiptHandle=response["Messages"][0]["ReceiptHandle"]
)
retry(get_message_with_attributes, retries=3, sleep=10, queue_url=queue_url)
self.sns_client.set_subscription_attributes(
SubscriptionArn=subscription_arn,
AttributeName="RawMessageDelivery",
AttributeValue="false",
)
self.sns_client.publish(
TargetArn=topic_arn,
Message="Test msg",
MessageAttributes={"attr1": {"DataType": "Number", "StringValue": "100.12"}},
)
def get_message_without_attributes(queue_url):
response = self.sqs_client.receive_message(
QueueUrl=queue_url, MessageAttributeNames=["All"]
)
self.assertIsNone(response["Messages"][0].get("MessageAttributes"))
self.assertIn(
"100.12",
response["Messages"][0]["Body"],
)
self.sqs_client.delete_message(
QueueUrl=queue_url, ReceiptHandle=response["Messages"][0]["ReceiptHandle"]
)
retry(get_message_without_attributes, retries=3, sleep=10, queue_url=queue_url)
def add_xray_header(self, request, **kwargs):
request.headers[
"X-Amzn-Trace-Id"
] = "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1"
def test_publish_sqs_from_sns_with_xray_propagation(self):
if SQS_BACKEND_IMPL != "elasticmq":
pytest.skip("not using elasticmq as SQS backend")
self.sns_client.meta.events.register("before-send.sns.Publish", self.add_xray_header)
topic = self.sns_client.create_topic(Name="test_topic4")
topic_arn = topic["TopicArn"]
test_queue = self.sqs_client.create_queue(QueueName="test_queue4")
queue_url = test_queue["QueueUrl"]
self.sns_client.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_url)
self.sns_client.publish(TargetArn=topic_arn, Message="X-Ray propagation test msg")
response = self.sqs_client.receive_message(
QueueUrl=queue_url,
AttributeNames=["SentTimestamp", "AWSTraceHeader"],
MaxNumberOfMessages=1,
MessageAttributeNames=["All"],
VisibilityTimeout=2,
WaitTimeSeconds=2,
)
self.assertEqual(len(response["Messages"]), 1)
message = response["Messages"][0]
self.assertTrue("Attributes" in message)
self.assertTrue("AWSTraceHeader" in message["Attributes"])
self.assertEqual(
message["Attributes"]["AWSTraceHeader"],
"Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1",
)
def test_create_topic_after_delete_with_new_tags(self):
topic_name = "test-%s" % short_uid()
topic = self.sns_client.create_topic(
Name=topic_name, Tags=[{"Key": "Name", "Value": "pqr"}]
)
self.sns_client.delete_topic(TopicArn=topic["TopicArn"])
topic1 = self.sns_client.create_topic(
Name=topic_name, Tags=[{"Key": "Name", "Value": "abc"}]
)
self.assertEqual(topic["TopicArn"], topic1["TopicArn"])
# cleanup
self.sns_client.delete_topic(TopicArn=topic1["TopicArn"])
def test_not_found_error_on_get_subscription_attributes(self):
topic_name = "queue-{}".format(short_uid())
queue_name = "test-%s" % short_uid()
topic_arn = self.sns_client.create_topic(Name=topic_name)["TopicArn"]
queue = self.sqs_client.create_queue(QueueName=queue_name)
queue_url = queue["QueueUrl"]
queue_arn = aws_stack.sqs_queue_arn(queue_name)
subscription = self.sns_client.subscribe(
TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_arn
)
subscription_attributes = self.sns_client.get_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"]
)
self.assertEqual(
subscription_attributes.get("Attributes").get("SubscriptionArn"),
subscription["SubscriptionArn"],
)
self.sns_client.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"])
with self.assertRaises(ClientError) as ctx:
self.sns_client.get_subscription_attributes(
SubscriptionArn=subscription["SubscriptionArn"]
)
self.assertEqual(ctx.exception.response["Error"]["Code"], "NotFound")
self.assertEqual(ctx.exception.response["ResponseMetadata"]["HTTPStatusCode"], 404)
# cleanup
self.sns_client.delete_topic(TopicArn=topic_arn)
self.sqs_client.delete_queue(QueueUrl=queue_url)
| 1 | 13,730 | Thanks for this PR @pinzon ! Rather than resetting the `sms_messages` list, can we fix the assertion in line 902 - rather than asserting `len` equality, we should assert that all expected messages are contained in the list. (In this case the phone numbers should be reasonably random, and ideally we should not have duplicates across different tests). We're planning to introduce test parallelization in the near future, and this will help us avoiding undesired side-effects. Thanks | localstack-localstack | py |
@@ -21,10 +21,12 @@ import (
"context"
"errors"
"sync"
- "time"
+
+ "github.com/asaskevich/EventBus"
log "github.com/cihub/seelog"
- "github.com/mysteriumnetwork/node/client/stats"
+ stats_dto "github.com/mysteriumnetwork/node/client/stats/dto"
+
"github.com/mysteriumnetwork/node/communication"
"github.com/mysteriumnetwork/node/firewall"
"github.com/mysteriumnetwork/node/identity" | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package connection
import (
"context"
"errors"
"sync"
"time"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/client/stats"
"github.com/mysteriumnetwork/node/communication"
"github.com/mysteriumnetwork/node/firewall"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/service_discovery/dto"
"github.com/mysteriumnetwork/node/session"
)
const managerLogPrefix = "[connection-manager] "
var (
// ErrNoConnection error indicates that action applied to manager expects active connection (i.e. disconnect)
ErrNoConnection = errors.New("no connection exists")
// ErrAlreadyExists error indicates that action applied to manager expects no active connection (i.e. connect)
ErrAlreadyExists = errors.New("connection already exists")
// ErrConnectionCancelled indicates that connection in progress was cancelled by request of api user
ErrConnectionCancelled = errors.New("connection was cancelled")
// ErrConnectionFailed indicates that Connect method didn't reach "Connected" phase due to connection error
ErrConnectionFailed = errors.New("connection has failed")
// ErrUnsupportedServiceType indicates that target proposal contains unsupported service type
ErrUnsupportedServiceType = errors.New("unsupported service type in proposal")
)
// ConnectionCreator creates new connection by given options and uses state channel to report state changes
// Given options:
// - session,
// - consumer identity
// - service provider identity
// - service proposal
type ConnectionCreator func(ConnectOptions, StateChannel) (Connection, error)
type sessionSaver interface {
Save(Session) error
Update(session.ID, time.Time, stats.SessionStats, SessionStatus) error
}
type connectionManager struct {
//these are passed on creation
newDialog DialogCreator
newPromiseIssuer PromiseIssuerCreator
newConnection ConnectionCreator
statsKeeper stats.SessionStatsKeeper
sessionStorage sessionSaver
//these are populated by Connect at runtime
ctx context.Context
mutex sync.RWMutex
status ConnectionStatus
cleanConnection func()
}
// NewManager creates connection manager with given dependencies
func NewManager(
dialogCreator DialogCreator,
promiseIssuerCreator PromiseIssuerCreator,
connectionCreator ConnectionCreator,
statsKeeper stats.SessionStatsKeeper,
sessionStorage sessionSaver,
) *connectionManager {
return &connectionManager{
statsKeeper: statsKeeper,
newDialog: dialogCreator,
newPromiseIssuer: promiseIssuerCreator,
newConnection: connectionCreator,
status: statusNotConnected(),
cleanConnection: warnOnClean,
sessionStorage: sessionStorage,
}
}
func (manager *connectionManager) Connect(consumerID identity.Identity, proposal dto.ServiceProposal, params ConnectParams) (err error) {
if manager.status.State != NotConnected {
return ErrAlreadyExists
}
manager.mutex.Lock()
manager.ctx, manager.cleanConnection = context.WithCancel(context.Background())
manager.status = statusConnecting()
manager.mutex.Unlock()
defer func() {
if err != nil {
manager.mutex.Lock()
manager.status = statusNotConnected()
manager.mutex.Unlock()
}
}()
err = manager.startConnection(consumerID, proposal, params)
if err == context.Canceled {
return ErrConnectionCancelled
}
return err
}
func (manager *connectionManager) startConnection(consumerID identity.Identity, proposal dto.ServiceProposal, params ConnectParams) (err error) {
manager.mutex.Lock()
cancelCtx := manager.cleanConnection
manager.mutex.Unlock()
var cancel []func()
defer func() {
manager.cleanConnection = func() {
manager.status = statusDisconnecting()
cancelCtx()
for _, f := range cancel {
f()
}
}
if err != nil {
log.Info(managerLogPrefix, "Cancelling connection initiation")
defer manager.cleanConnection()
}
}()
providerID := identity.FromAddress(proposal.ProviderID)
dialog, err := manager.newDialog(consumerID, providerID, proposal.ProviderContacts[0])
if err != nil {
return err
}
cancel = append(cancel, func() { dialog.Close() })
sessionID, sessionConfig, err := session.RequestSessionCreate(dialog, proposal.ID)
if err != nil {
return err
}
promiseIssuer := manager.newPromiseIssuer(consumerID, dialog)
err = promiseIssuer.Start(proposal)
if err != nil {
return err
}
cancel = append(cancel, func() { promiseIssuer.Stop() })
stateChannel := make(chan State, 10)
connectOptions := ConnectOptions{
SessionID: sessionID,
SessionConfig: sessionConfig,
ConsumerID: consumerID,
ProviderID: providerID,
Proposal: proposal,
}
connection, err := manager.newConnection(connectOptions, stateChannel)
if err != nil {
return err
}
err = manager.saveSession(connectOptions)
if err != nil {
return err
}
if err = connection.Start(); err != nil {
return err
}
cancel = append(cancel, connection.Stop)
err = manager.waitForConnectedState(stateChannel, sessionID)
if err != nil {
return err
}
if !params.DisableKillSwitch {
// TODO: Implement fw based kill switch for respective OS
// we may need to wait for tun device to bet setup
firewall.NewKillSwitch().Enable()
}
go connectionWaiter(connection, dialog, promiseIssuer)
go manager.consumeConnectionStates(stateChannel, sessionID)
return nil
}
func (manager *connectionManager) Status() ConnectionStatus {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
return manager.status
}
func (manager *connectionManager) Disconnect() error {
manager.mutex.RLock()
defer manager.mutex.RUnlock()
if manager.status.State == NotConnected {
return ErrNoConnection
}
manager.cleanConnection()
return nil
}
func warnOnClean() {
log.Warn(managerLogPrefix, "Trying to close when there is nothing to close. Possible bug or race condition")
}
func connectionWaiter(connection Connection, dialog communication.Dialog, promiseIssuer PromiseIssuer) {
err := connection.Wait()
if err != nil {
log.Warn(managerLogPrefix, "Connection exited with error: ", err)
} else {
log.Info(managerLogPrefix, "Connection exited")
}
promiseIssuer.Stop()
dialog.Close()
}
func (manager *connectionManager) waitForConnectedState(stateChannel <-chan State, sessionID session.ID) error {
for {
select {
case state, more := <-stateChannel:
if !more {
return ErrConnectionFailed
}
switch state {
case Connected:
manager.onStateChanged(state, sessionID)
return nil
default:
manager.onStateChanged(state, sessionID)
}
case <-manager.ctx.Done():
return manager.ctx.Err()
}
}
}
func (manager *connectionManager) consumeConnectionStates(stateChannel <-chan State, sessionID session.ID) {
for state := range stateChannel {
manager.onStateChanged(state, sessionID)
}
manager.mutex.Lock()
defer manager.mutex.Unlock()
manager.status = statusNotConnected()
log.Debug(managerLogPrefix, "State updater stopCalled")
}
func (manager *connectionManager) onStateChanged(state State, sessionID session.ID) {
manager.mutex.Lock()
defer manager.mutex.Unlock()
switch state {
case Connected:
manager.statsKeeper.MarkSessionStart()
manager.status = statusConnected(sessionID)
case Disconnecting:
manager.statsKeeper.MarkSessionEnd()
manager.sessionStorage.Update(sessionID, time.Now(), manager.statsKeeper.Retrieve(), SessionStatusCompleted)
case Reconnecting:
manager.status = statusReconnecting()
}
}
func (manager *connectionManager) saveSession(connectOptions ConnectOptions) error {
providerCountry := connectOptions.Proposal.ServiceDefinition.GetLocation().Country
se := NewSession(connectOptions.SessionID, connectOptions.ProviderID, connectOptions.Proposal.ServiceType, providerCountry)
return manager.sessionStorage.Save(*se)
}
| 1 | 12,766 | Some weird blocks formation again. | mysteriumnetwork-node | go |
@@ -39,7 +39,7 @@ func TestOvirtActuator(t *testing.T) {
clusterDeployment: testOvirtClusterDeployment(),
pool: testOvirtPool(),
expectedMachineSetReplicas: map[string]int64{
- fmt.Sprintf("%s-worker-0", testInfraID): 3,
+ fmt.Sprintf("%s-worker", testInfraID): 3,
},
},
} | 1 | package remotemachineset
import (
"fmt"
"testing"
"github.com/golang/mock/gomock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1"
machineapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1"
hivev1 "github.com/openshift/hive/apis/hive/v1"
hivev1ovirt "github.com/openshift/hive/apis/hive/v1/ovirt"
)
const (
vmTypeServer string = "server"
cores int32 = 4
sockets int32 = 1
memoryMB int32 = 16384
sizeGB int64 = 120
)
func TestOvirtActuator(t *testing.T) {
tests := []struct {
name string
clusterDeployment *hivev1.ClusterDeployment
pool *hivev1.MachinePool
expectedMachineSetReplicas map[string]int64
expectedErr bool
}{
{
name: "generate machineset",
clusterDeployment: testOvirtClusterDeployment(),
pool: testOvirtPool(),
expectedMachineSetReplicas: map[string]int64{
fmt.Sprintf("%s-worker-0", testInfraID): 3,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
actuator := &OvirtActuator{
logger: log.WithField("actuator", "ovirtactuator_test"),
}
generatedMachineSets, _, err := actuator.GenerateMachineSets(test.clusterDeployment, test.pool, actuator.logger)
if test.expectedErr {
assert.Error(t, err, "expected error for test case")
} else {
require.NoError(t, err, "unexpected error for test cast")
validateOvirtMachineSets(t, generatedMachineSets, test.expectedMachineSetReplicas)
}
})
}
}
func validateOvirtMachineSets(t *testing.T, mSets []*machineapi.MachineSet, expectedMSReplicas map[string]int64) {
assert.Equal(t, len(expectedMSReplicas), len(mSets), "different number of machine sets generated than expected")
for _, ms := range mSets {
expectedReplicas, ok := expectedMSReplicas[ms.Name]
if assert.True(t, ok, "unexpected machine set") {
assert.Equal(t, expectedReplicas, int64(*ms.Spec.Replicas), "replica mismatch")
}
ovirtProvider, ok := ms.Spec.Template.Spec.ProviderSpec.Value.Object.(*ovirtprovider.OvirtMachineProviderSpec)
if assert.True(t, ok, "failed to convert to ovirt provider spec") {
assert.Equal(t, memoryMB, ovirtProvider.MemoryMB, "unexpected MemeoryMiB")
assert.Equal(t, cores, ovirtProvider.CPU.Cores, "unexpected number of CPU Cores")
assert.Equal(t, sockets, ovirtProvider.CPU.Sockets, "unexpected number of CPU Sockets")
assert.Equal(t, sizeGB, ovirtProvider.OSDisk.SizeGB, "unexpected DiskGiB")
assert.Equal(t, vmTypeServer, ovirtProvider.VMType, "unexpected VMType")
}
}
}
func testOvirtPool() *hivev1.MachinePool {
p := testMachinePool()
p.Spec.Platform = hivev1.MachinePoolPlatform{
Ovirt: &hivev1ovirt.MachinePool{
CPU: &hivev1ovirt.CPU{
Cores: cores,
Sockets: sockets,
},
MemoryMB: memoryMB,
OSDisk: &hivev1ovirt.Disk{
SizeGB: sizeGB,
},
VMType: hivev1ovirt.VMType(vmTypeServer),
},
}
return p
}
func testOvirtClusterDeployment() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Platform = hivev1.Platform{
Ovirt: &hivev1ovirt.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: "ovirt-credentials",
},
},
}
return cd
}
| 1 | 19,500 | This is worrisome. | openshift-hive | go |
@@ -219,6 +219,9 @@ type FilesystemTrimDriver interface {
// AutoFilesystemTrimStatus returns the status of auto fs trim
// operations on volumes
AutoFilesystemTrimStatus(request *api.SdkAutoFSTrimStatusRequest) (*api.SdkAutoFSTrimStatusResponse, error)
+ // AutoFilesystemTrimUsage returns the usage of auto fs trim
+ // operations on locally mounted volumes
+ AutoFilesystemTrimUsage(request *api.SdkAutoFSTrimUsageRequest) (*api.SdkAutoFSTrimUsageResponse, error)
// FilesystemTrimStop stops a filesystem trim background operation on
// a specified volume, if any
FilesystemTrimStop(request *api.SdkFilesystemTrimStopRequest) (*api.SdkFilesystemTrimStopResponse, error) | 1 | package volume
import (
"context"
"errors"
"github.com/libopenstorage/openstorage/api"
)
var (
// ErrAlreadyShutdown returned when driver is shutdown
ErrAlreadyShutdown = errors.New("VolumeDriverProvider already shutdown")
// ErrExit returned when driver already registered
ErrExist = errors.New("Already exists")
// ErrDriverNotFound returned when a driver is not registered
ErrDriverNotFound = errors.New("Driver implementation not found")
// ErrDriverInitializing returned when a driver is initializing
ErrDriverInitializing = errors.New("Driver is initializing")
// ErrEnoEnt returned when volume does not exist
ErrEnoEnt = errors.New("Volume does not exist.")
// ErrEnomem returned when we are out of memory
ErrEnomem = errors.New("Out of memory.")
// ErrEinval returned when an invalid input is provided
ErrEinval = errors.New("Invalid argument")
// ErrVolDetached returned when volume is in detached state
ErrVolDetached = errors.New("Volume is detached")
// ErrAttachedHostSpecNotFound returned when the attached host's spec is not found
ErrAttachedHostSpecNotFound = errors.New("Spec of the attached host is not found")
// ErrVolAttached returned when volume is in attached state
ErrVolAttached = errors.New("Volume is attached")
// ErrVolAttachedOnRemoteNode returned when volume is in attached on different node
ErrVolAttachedOnRemoteNode = errors.New("Volume is attached on another node")
// ErrVolAttachedScale returned when volume is attached and can be scaled
ErrVolAttachedScale = errors.New("Volume is attached on another node." +
" Increase scale factor to create more instances")
// ErrVolHasSnaps returned when volume has previous snapshots
ErrVolHasSnaps = errors.New("Volume has snapshots associated")
// ErrNotSupported returned when the operation is not supported
ErrNotSupported = errors.New("Operation not supported")
// ErrVolBusy returned when volume is in busy state
ErrVolBusy = errors.New("Volume is busy")
// ErrAborted returned when capacityUsageInfo cannot be returned
ErrAborted = errors.New("Aborted CapacityUsage request")
// ErrInvalidName returned when Cloudbackup Name/request is invalid
ErrInvalidName = errors.New("Invalid name for cloud backup/restore request")
// ErrFsResizeFailed returned when Filesystem resize failed because of filesystem
// errors
ErrFsResizeFailed = errors.New("Filesystem Resize failed due to filesystem errors")
// ErrNoVolumeUpdate is returned when a volume update has no changes requested
ErrNoVolumeUpdate = errors.New("No change requested")
)
// Constants used by the VolumeDriver
const (
// APIVersion for the volume management apis
APIVersion = "v1"
// PluginAPIBase where the docker unix socket resides
PluginAPIBase = "/run/docker/plugins/"
// DriverAPIBase where the osd unix socket resides
DriverAPIBase = "/var/lib/osd/driver/"
// MountBase for osd mountpoints
MountBase = "/var/lib/osd/mounts/"
// VolumeBase for osd volumes
VolumeBase = "/var/lib/osd/"
)
const (
// LocationConstaint is a label that specifies data location constraint.
LocationConstraint = "LocationConstraint"
// LocalNode is an alias for this node - similar to localhost.
LocalNode = "LocalNode"
// FromTrashCan is a label that specified a volume being in the TrashCan
FromTrashCan = "FromTrashCan"
)
// Store defines the interface for basic volume store operations
type Store interface {
// Lock volume specified by volumeID.
Lock(volumeID string) (interface{}, error)
// Lock volume with token obtained from call to Lock.
Unlock(token interface{}) error
// CreateVol returns error if volume with the same ID already existe.
CreateVol(vol *api.Volume) error
// GetVol from volumeID.
GetVol(volumeID string) (*api.Volume, error)
// UpdateVol with vol
UpdateVol(vol *api.Volume) error
// DeleteVol. Returns error if volume does not exist.
DeleteVol(volumeID string) error
}
// VolumeDriver is the main interface to be implemented by any storage driver.
// Every driver must at minimum implement the ProtoDriver sub interface.
type VolumeDriver interface {
IODriver
ProtoDriver
BlockDriver
Enumerator
}
// IODriver interfaces applicable to object store interfaces.
type IODriver interface {
// Read sz bytes from specified volume at specified offset.
// Return number of bytes read and error.
Read(volumeID string, buf []byte, sz uint64, offset int64) (int64, error)
// Write sz bytes from specified volume at specified offset.
// Return number of bytes written and error.
Write(volumeID string, buf []byte, sz uint64, offset int64) (int64, error)
// Flush writes to stable storage.
// Return error.
Flush(volumeID string) error
}
// SnapshotDriver interfaces provides snapshot capability
type SnapshotDriver interface {
// Snapshot create volume snapshot.
// Errors ErrEnoEnt may be returned
Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator, noRetry bool) (string, error)
// Restore restores volume to specified snapshot.
Restore(volumeID string, snapshotID string) error
// SnapshotGroup takes a snapshot of a group of volumes that can be specified with either of the following
// 1. group ID
// 2. labels
// 3. volumeIDs
// deleteOnFailure indicates whether to delete the successful snaps if some of the snapshots failed
SnapshotGroup(groupID string, labels map[string]string, volumeIDs []string, deleteOnFailure bool) (*api.GroupSnapCreateResponse, error)
}
// StatsDriver interface provides stats features
type StatsDriver interface {
// Stats for specified volume.
// cumulative stats are /proc/diskstats style stats.
// nonCumulative stats are stats for specific duration.
// Errors ErrEnoEnt may be returned
Stats(volumeID string, cumulative bool) (*api.Stats, error)
// UsedSize returns currently used volume size.
// Errors ErrEnoEnt may be returned.
UsedSize(volumeID string) (uint64, error)
// GetActiveRequests get active requests
GetActiveRequests() (*api.ActiveRequests, error)
// CapacityUsage returns both exclusive and shared usage
// of a snap/volume
CapacityUsage(ID string) (*api.CapacityUsageResponse, error)
// VolumeUsageByNode returns capacity usage of all volumes and snaps for a
// given node
VolumeUsageByNode(nodeID string) (*api.VolumeUsageByNode, error)
// RelaxedReclaimPurge triggers the purge of RelaxedReclaim queue for a
// given node
RelaxedReclaimPurge(nodeID string) (*api.RelaxedReclaimPurge, error)
}
type QuiesceDriver interface {
// Freezes mounted filesystem resulting in a quiesced volume state.
// Only one freeze operation may be active at any given time per volume.
// Unfreezes after timeout seconds if it is non-zero.
// An optional quiesceID can be passed for driver-specific use.
Quiesce(volumeID string, timeoutSeconds uint64, quiesceID string) error
// Unfreezes mounted filesystem if it was frozen.
Unquiesce(volumeID string) error
}
// CloudBackupDriver interface provides Cloud backup features
type CloudBackupDriver interface {
// CloudBackupCreate uploads snapshot of a volume to the cloud
CloudBackupCreate(input *api.CloudBackupCreateRequest) (*api.CloudBackupCreateResponse, error)
// CloudBackupGroupCreate creates and then uploads volumegroup snapshots
CloudBackupGroupCreate(input *api.CloudBackupGroupCreateRequest) (*api.CloudBackupGroupCreateResponse, error)
// CloudBackupRestore downloads a cloud backup and restores it to a volume
CloudBackupRestore(input *api.CloudBackupRestoreRequest) (*api.CloudBackupRestoreResponse, error)
// CloudBackupEnumerate enumerates the backups for a given cluster/credential/volumeID
CloudBackupEnumerate(input *api.CloudBackupEnumerateRequest) (*api.CloudBackupEnumerateResponse, error)
// CloudBackupDelete deletes the specified backup in cloud
CloudBackupDelete(input *api.CloudBackupDeleteRequest) error
// CloudBackupDelete deletes all the backups for a given volume in cloud
CloudBackupDeleteAll(input *api.CloudBackupDeleteAllRequest) error
// CloudBackupStatus indicates the most recent status of backup/restores
CloudBackupStatus(input *api.CloudBackupStatusRequest) (*api.CloudBackupStatusResponse, error)
// CloudBackupCatalog displays listing of backup content
CloudBackupCatalog(input *api.CloudBackupCatalogRequest) (*api.CloudBackupCatalogResponse, error)
// CloudBackupHistory displays past backup/restore operations on a volume
CloudBackupHistory(input *api.CloudBackupHistoryRequest) (*api.CloudBackupHistoryResponse, error)
// CloudBackupStateChange allows a current backup state transisions(pause/resume/stop)
CloudBackupStateChange(input *api.CloudBackupStateChangeRequest) error
// CloudBackupSchedCreate creates a schedule to backup volume to cloud
CloudBackupSchedCreate(input *api.CloudBackupSchedCreateRequest) (*api.CloudBackupSchedCreateResponse, error)
// CloudBackupGroupSchedCreate creates a schedule to backup a volumegroup to cloud
CloudBackupGroupSchedCreate(input *api.CloudBackupGroupSchedCreateRequest) (*api.CloudBackupSchedCreateResponse, error)
// CloudBackupSchedCreate creates a schedule to backup volume to cloud
CloudBackupSchedUpdate(input *api.CloudBackupSchedUpdateRequest) error
// CloudBackupGroupSchedCreate creates a schedule to backup a volumegroup to cloud
CloudBackupGroupSchedUpdate(input *api.CloudBackupGroupSchedUpdateRequest) error
// CloudBackupSchedDelete delete a backup schedule
CloudBackupSchedDelete(input *api.CloudBackupSchedDeleteRequest) error
// CloudBackupSchedEnumerate enumerates the configured backup schedules in the cluster
CloudBackupSchedEnumerate() (*api.CloudBackupSchedEnumerateResponse, error)
// CloudBackupSize fetches the size of a cloud backup
CloudBackupSize(input *api.SdkCloudBackupSizeRequest) (*api.SdkCloudBackupSizeResponse, error)
}
// CloudMigrateDriver interface provides Cloud migration features
type CloudMigrateDriver interface {
// CloudMigrateStart starts a migrate operation
CloudMigrateStart(request *api.CloudMigrateStartRequest) (*api.CloudMigrateStartResponse, error)
// CloudMigrateCancel cancels a migrate operation
CloudMigrateCancel(request *api.CloudMigrateCancelRequest) error
// CloudMigrateStatus returns status for the migration operations
CloudMigrateStatus(request *api.CloudMigrateStatusRequest) (*api.CloudMigrateStatusResponse, error)
}
// FilesystemTrimDriver interface exposes APIs to manage filesystem trim
// operation on a volume
type FilesystemTrimDriver interface {
// FilesystemTrimStart starts a filesystem trim background operation on a
// specified volume
FilesystemTrimStart(request *api.SdkFilesystemTrimStartRequest) (*api.SdkFilesystemTrimStartResponse, error)
// FilesystemTrimStatus returns the status of a filesystem trim
// background operation on a specified volume, if any
FilesystemTrimStatus(request *api.SdkFilesystemTrimStatusRequest) (*api.SdkFilesystemTrimStatusResponse, error)
// AutoFilesystemTrimStatus returns the status of auto fs trim
// operations on volumes
AutoFilesystemTrimStatus(request *api.SdkAutoFSTrimStatusRequest) (*api.SdkAutoFSTrimStatusResponse, error)
// FilesystemTrimStop stops a filesystem trim background operation on
// a specified volume, if any
FilesystemTrimStop(request *api.SdkFilesystemTrimStopRequest) (*api.SdkFilesystemTrimStopResponse, error)
}
// FilesystemCheckDriver interface exposes APIs to manage filesystem check
// operation on a volume
type FilesystemCheckDriver interface {
// FilesystemCheckStart starts a filesystem check background operation
// on a specified volume
FilesystemCheckStart(request *api.SdkFilesystemCheckStartRequest) (*api.SdkFilesystemCheckStartResponse, error)
// FilesystemCheckStatus returns the status of a filesystem check
// background operation on the filesystem of a specified volume, if any.
FilesystemCheckStatus(request *api.SdkFilesystemCheckStatusRequest) (*api.SdkFilesystemCheckStatusResponse, error)
// FilesystemCheckStop stops the filesystem check background operation on
// the filesystem of a specified volume, if any.
FilesystemCheckStop(request *api.SdkFilesystemCheckStopRequest) (*api.SdkFilesystemCheckStopResponse, error)
}
// ProtoDriver must be implemented by all volume drivers. It specifies the
// most basic functionality, such as creating and deleting volumes.
type ProtoDriver interface {
SnapshotDriver
StatsDriver
QuiesceDriver
CredsDriver
CloudBackupDriver
CloudMigrateDriver
FilesystemTrimDriver
FilesystemCheckDriver
// Name returns the name of the driver.
Name() string
// Type of this driver
Type() api.DriverType
// Version information of the driver
Version() (*api.StorageVersion, error)
// Create a new Vol for the specific volume spec.
// It returns a system generated VolumeID that uniquely identifies the volume
Create(locator *api.VolumeLocator, Source *api.Source, spec *api.VolumeSpec) (string, error)
// Delete volume.
// Errors ErrEnoEnt, ErrVolHasSnaps may be returned.
Delete(volumeID string) error
// Mount volume at specified path
// Errors ErrEnoEnt, ErrVolDetached may be returned.
Mount(ctx context.Context, volumeID string, mountPath string, options map[string]string) error
// MountedAt return volume mounted at specified mountpath.
MountedAt(ctx context.Context, mountPath string) string
// Unmount volume at specified path
// Errors ErrEnoEnt, ErrVolDetached may be returned.
Unmount(ctx context.Context, volumeID string, mountPath string, options map[string]string) error
// Update not all fields of the spec are supported, ErrNotSupported will be thrown for unsupported
// updates.
Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Shutdown and cleanup.
Shutdown()
// DU specified volume and potentially the subfolder if provided.
Catalog(volumeid, subfolder string, depth string) (api.CatalogResponse, error)
// Does a Filesystem Trim operation to free unused space to block device(block discard)
VolService(volumeID string, vsreq *api.VolumeServiceRequest) (*api.VolumeServiceResponse, error)
}
// Enumerator provides a set of interfaces to get details on a set of volumes.
type Enumerator interface {
// Inspect specified volumes.
// Returns slice of volumes that were found.
Inspect(volumeIDs []string) ([]*api.Volume, error)
// Enumerate volumes that map to the volumeLocator. Locator fields may be regexp.
// If locator fields are left blank, this will return all volumes.
Enumerate(locator *api.VolumeLocator, labels map[string]string) ([]*api.Volume, error)
// Enumerate snaps for specified volumes
SnapEnumerate(volID []string, snapLabels map[string]string) ([]*api.Volume, error)
}
// StoreEnumerator combines Store and Enumerator capabilities
type StoreEnumerator interface {
Store
Enumerator
}
// BlockDriver needs to be implemented by block volume drivers. Filesystem volume
// drivers can ignore this interface and include the builtin DefaultBlockDriver.
type BlockDriver interface {
// Attach map device to the host.
// On success the devicePath specifies location where the device is exported
// Errors ErrEnoEnt, ErrVolAttached may be returned.
Attach(ctx context.Context, volumeID string, attachOptions map[string]string) (string, error)
// Detach device from the host.
// Errors ErrEnoEnt, ErrVolDetached may be returned.
Detach(ctx context.Context, volumeID string, options map[string]string) error
}
// CredsDriver provides methods to handle credentials
type CredsDriver interface {
// CredsCreate creates credential for a given cloud provider
CredsCreate(params map[string]string) (string, error)
// CredsUpdate updates credential for an already configured credential
CredsUpdate(name string, params map[string]string) error
// CredsEnumerate lists the configured credentials in the cluster
CredsEnumerate() (map[string]interface{}, error)
// CredsDelete deletes the credential associated credUUID
CredsDelete(credUUID string) error
// CredsValidate validates the credential associated credUUID
CredsValidate(credUUID string) error
// CredsDeleteReferences delets any with the creds
CredsDeleteReferences(credUUID string) error
}
// VolumeDriverProvider provides VolumeDrivers.
type VolumeDriverProvider interface {
// Get gets the VolumeDriver for the given name.
// If a VolumeDriver was not created for the given name, the error ErrDriverNotFound is returned.
Get(name string) (VolumeDriver, error)
// Shutdown shuts down all volume drivers.
Shutdown() error
}
// VolumeDriverRegistry registers VolumeDrivers.
type VolumeDriverRegistry interface {
VolumeDriverProvider
// New creates the VolumeDriver for the given name.
// If a VolumeDriver was already created for the given name, the error ErrExist is returned.
Register(name string, params map[string]string) error
// Add inserts a new VolumeDriver provider with a well known name.
Add(name string, init func(map[string]string) (VolumeDriver, error)) error
// Removes driver from registry. Does nothing if driver name does not exist.
Remove(name string)
}
// NewVolumeDriverRegistry constructs a new VolumeDriverRegistry.
func NewVolumeDriverRegistry(nameToInitFunc map[string]func(map[string]string) (VolumeDriver, error)) VolumeDriverRegistry {
return newVolumeDriverRegistry(nameToInitFunc)
}
| 1 | 8,987 | nit try rephrasing to something like " AutoFilesystemTrimUsage returns the volume usage and trimmable space of locally mounted pxd volumes" | libopenstorage-openstorage | go |
@@ -64,7 +64,7 @@ module Unix
h = self.foss_defaults
h['puppetserver-confdir'] = '/etc/puppetlabs/puppetserver/conf.d'
h['puppetservice'] = 'puppetserver'
- h['puppetbindir'] = '/opt/puppetlabs/agent/bin'
+ h['puppetbindir'] = '/opt/puppetlabs/agent/bin:/opt/puppetlabs/bin'
h['puppetbin'] = "#{h['puppetbindir']}/puppet"
h['puppetpath'] = '/etc/puppetlabs/agent'
h['puppetconfdir'] = "#{h['puppetpath']}/config" | 1 | [ 'host', 'command_factory', 'command', 'options' ].each do |lib|
require "beaker/#{lib}"
end
module Unix
class Host < Beaker::Host
[ 'user', 'group', 'exec', 'pkg', 'file' ].each do |lib|
require "beaker/host/unix/#{lib}"
end
include Unix::User
include Unix::Group
include Unix::File
include Unix::Exec
include Unix::Pkg
def self.pe_defaults
h = Beaker::Options::OptionsHash.new
h.merge({
'user' => 'root',
'group' => 'pe-puppet',
'puppetserver-confdir' => '/etc/puppetlabs/puppetserver/conf.d',
'puppetservice' => 'pe-httpd',
'puppetpath' => '/etc/puppetlabs/puppet',
'puppetconfdir' => '/etc/puppetlabs/puppet',
'puppetbin' => '/opt/puppet/bin/puppet',
'puppetbindir' => '/opt/puppet/bin',
'puppetsbindir' => '/opt/puppet/sbin',
'systembindir' => '/opt/puppet/bin',
'puppetvardir' => '/var/opt/lib/pe-puppet',
'hieradatadir' => '/var/lib/hiera',
'hieraconf' => '/etc/puppetlabs/puppet/hiera.yaml',
'distmoduledir' => '/etc/puppetlabs/puppet/modules',
'sitemoduledir' => '/opt/puppet/share/puppet/modules',
'pathseparator' => ':',
})
end
def self.foss_defaults
h = Beaker::Options::OptionsHash.new
h.merge({
'user' => 'root',
'group' => 'puppet',
'puppetserver-confdir' => '/etc/puppetserver/conf.d',
'puppetservice' => 'puppetmaster',
'puppetpath' => '/etc/puppet',
'puppetconfdir' => '/etc/puppet',
'puppetvardir' => '/var/lib/puppet',
'puppetbin' => '/usr/bin/puppet',
'puppetbindir' => '/usr/bin',
'systembindir' => '/usr/bin',
'hieralibdir' => '/opt/puppet-git-repos/hiera/lib',
'hierapuppetlibdir' => '/opt/puppet-git-repos/hiera-puppet/lib',
'hierabindir' => '/opt/puppet-git-repos/hiera/bin',
'hieradatadir' => '/etc/puppet/hieradata',
'hieraconf' => '/etc/puppet/hiera.yaml',
'distmoduledir' => '/etc/puppet/modules',
'sitemoduledir' => '/usr/share/puppet/modules',
'pathseparator' => ':',
})
end
def self.aio_defaults
h = self.foss_defaults
h['puppetserver-confdir'] = '/etc/puppetlabs/puppetserver/conf.d'
h['puppetservice'] = 'puppetserver'
h['puppetbindir'] = '/opt/puppetlabs/agent/bin'
h['puppetbin'] = "#{h['puppetbindir']}/puppet"
h['puppetpath'] = '/etc/puppetlabs/agent'
h['puppetconfdir'] = "#{h['puppetpath']}/config"
h['puppetcodedir'] = "#{h['puppetpath']}/code"
h['puppetvardir'] = '/opt/puppetlabs/agent/cache'
h['distmoduledir'] = "#{h['puppetcodedir']}/modules"
h['sitemoduledir'] = '/opt/puppetlabs/agent/modules'
h['hieraconf'] = "#{h['puppetcodedir']}/hiera.yaml"
h['hieradatadir'] = "#{h['puppetcodedir']}/hieradata"
h
end
end
end
| 1 | 8,947 | currently the puppet-agent package doesn't symlink files into /opt/puppetlabs/bin. Also many puppet acceptance tests that execute on Unix assume puppetbindir is a single directory (windows doesn't have this issue). So I think if we want to unblock testing, this should just be changed to `h['puppetbindir'] = '/opt/puppetlabs/puppet/bin` | voxpupuli-beaker | rb |
@@ -4,10 +4,10 @@ import Ember from 'ember';
import PouchAdapterUtils from 'hospitalrun/mixins/pouch-adapter-utils';
export default Adapter.extend(PouchAdapterUtils, {
- databaseName: 'config',
- db: Ember.computed.alias('pouchDBService.configDB'),
- pouchDBService: Ember.inject.service('pouchdb'),
-
+ databaseName: 'config',
+ database: Ember.inject.service(),
+ db: Ember.computed.alias('database.configDB'),
+
_mapQuery: function(doc, emit) {
if (doc._id) {
emit(doc._id, null); | 1 | import { Adapter } from 'ember-pouch';
import Ember from 'ember';
import PouchAdapterUtils from 'hospitalrun/mixins/pouch-adapter-utils';
export default Adapter.extend(PouchAdapterUtils, {
databaseName: 'config',
db: Ember.computed.alias('pouchDBService.configDB'),
pouchDBService: Ember.inject.service('pouchdb'),
_mapQuery: function(doc, emit) {
if (doc._id) {
emit(doc._id, null);
}
},
findQuery: function(store, type, query, options) {
if (!query.exactKeys) {
return this._super(store, type, query, options);
} else {
var self = this,
queryParams = {
reduce: false,
include_docs: true
};
queryParams.keys = query.exactKeys.map(function(key) {
return this.get('pouchDBService').getPouchId(key, 'config');
}.bind(this));
return new Ember.RSVP.Promise(function(resolve, reject){
self._getDb().then(function(db){
try {
db.query(self._mapQuery, queryParams, function(err, response) {
if (err) {
self._pouchError(reject)(err);
} else {
if (response.rows) {
var data = Ember.A(response.rows).mapBy('doc');
Ember.run(null, resolve, data);
}
}
});
} catch (err){
self._pouchError(reject)(err);
}
}, self._pouchError(reject));
}, "findQuery in config-pouchdb-adapter");
}
}
}); | 1 | 12,421 | any point in keeping this here ? | HospitalRun-hospitalrun-frontend | js |
@@ -233,8 +233,8 @@ type GossipData struct {
}
func (g *GossipData) Merge(o router.GossipData) {
- checkAndPanic(CaseSensitive(g.Entries))
- defer func() { checkAndPanic(CaseSensitive(g.Entries)) }()
+ checkAndPanic(CaseInsensitive(g.Entries))
+ defer func() { checkAndPanic(CaseInsensitive(g.Entries)) }()
other := o.(*GossipData)
g.Entries.merge(other.Entries)
if g.Timestamp < other.Timestamp { | 1 | package nameserver
import (
"bytes"
"encoding/gob"
"fmt"
"sort"
"strings"
"time"
"github.com/weaveworks/weave/net/address"
"github.com/weaveworks/weave/router"
)
var now = func() int64 { return time.Now().Unix() }
type Entry struct {
ContainerID string
Origin router.PeerName
Addr address.Address
Hostname string
Version int
Tombstone int64 // timestamp of when it was deleted
}
type Entries []Entry
type CaseSensitive Entries
type CaseInsensitive Entries
type SortableEntries interface {
sort.Interface
Get(i int) Entry
}
// Gossip messages are sorted in a case sensitive order...
func (es CaseSensitive) Len() int { return len(es) }
func (es CaseSensitive) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
func (es CaseSensitive) Get(i int) Entry { return es[i] }
func (es CaseSensitive) Less(i, j int) bool { return es[i].less(&es[j]) }
// ... but we store entries in a case insensitive order.
func (es CaseInsensitive) Len() int { return len(es) }
func (es CaseInsensitive) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
func (es CaseInsensitive) Get(i int) Entry { return es[i] }
func (es CaseInsensitive) Less(i, j int) bool { return es[i].insensitiveLess(&es[j]) }
func (e1 Entry) equal(e2 Entry) bool {
return e1.ContainerID == e2.ContainerID &&
e1.Origin == e2.Origin &&
e1.Addr == e2.Addr &&
e1.Hostname == e2.Hostname
}
func (e1 *Entry) less(e2 *Entry) bool {
// Entries are kept sorted by Hostname, Origin, ContainerID then address
switch {
case e1.Hostname != e2.Hostname:
return e1.Hostname < e2.Hostname
case e1.Origin != e2.Origin:
return e1.Origin < e2.Origin
case e1.ContainerID != e2.ContainerID:
return e1.ContainerID < e2.ContainerID
default:
return e1.Addr < e2.Addr
}
}
func (e1 *Entry) insensitiveLess(e2 *Entry) bool {
// Entries are kept sorted by Hostname, Origin, ContainerID then address
e1Hostname, e2Hostname := strings.ToLower(e1.Hostname), strings.ToLower(e2.Hostname)
switch {
case e1Hostname != e2Hostname:
return e1Hostname < e2Hostname
case e1.Origin != e2.Origin:
return e1.Origin < e2.Origin
case e1.ContainerID != e2.ContainerID:
return e1.ContainerID < e2.ContainerID
default:
return e1.Addr < e2.Addr
}
}
// returns true to indicate a change
func (e1 *Entry) merge(e2 *Entry) bool {
// we know container id, origin, add and hostname are equal
if e2.Version > e1.Version {
e1.Version = e2.Version
e1.Tombstone = e2.Tombstone
return true
}
return false
}
func (e1 *Entry) String() string {
return fmt.Sprintf("%s -> %s", e1.Hostname, e1.Addr.String())
}
func check(es SortableEntries) error {
if !sort.IsSorted(es) {
return fmt.Errorf("Not sorted!")
}
for i := 1; i < es.Len(); i++ {
if es.Get(i).equal(es.Get(i - 1)) {
return fmt.Errorf("Duplicate entry: %d:%v and %d:%v", i-1, es.Get(i-1), i, es.Get(i))
}
}
return nil
}
func checkAndPanic(es SortableEntries) {
if err := check(es); err != nil {
panic(err)
}
}
func (es *Entries) checkAndPanic() *Entries {
checkAndPanic(CaseInsensitive(*es))
return es
}
func (es *Entries) add(hostname, containerid string, origin router.PeerName, addr address.Address) Entry {
defer es.checkAndPanic().checkAndPanic()
entry := Entry{Hostname: hostname, Origin: origin, ContainerID: containerid, Addr: addr}
i := sort.Search(len(*es), func(i int) bool {
return !(*es)[i].insensitiveLess(&entry)
})
if i < len(*es) && (*es)[i].equal(entry) {
if (*es)[i].Tombstone > 0 {
(*es)[i].Tombstone = 0
(*es)[i].Version++
}
} else {
*es = append(*es, Entry{})
copy((*es)[i+1:], (*es)[i:])
(*es)[i] = entry
}
return (*es)[i]
}
func (es *Entries) merge(incoming Entries) Entries {
defer es.checkAndPanic().checkAndPanic()
newEntries := Entries{}
i := 0
for _, entry := range incoming {
for i < len(*es) && (*es)[i].insensitiveLess(&entry) {
i++
}
if i < len(*es) && (*es)[i].equal(entry) {
if (*es)[i].merge(&entry) {
newEntries = append(newEntries, entry)
}
} else {
*es = append(*es, Entry{})
copy((*es)[i+1:], (*es)[i:])
(*es)[i] = entry
newEntries = append(newEntries, entry)
}
}
return newEntries
}
// f returning true means keep the entry.
func (es *Entries) tombstone(ourname router.PeerName, f func(*Entry) bool) Entries {
defer es.checkAndPanic().checkAndPanic()
tombstoned := Entries{}
for i, e := range *es {
if f(&e) && e.Origin == ourname {
e.Version++
e.Tombstone = now()
(*es)[i] = e
tombstoned = append(tombstoned, e)
}
}
return tombstoned
}
func (es *Entries) filter(f func(*Entry) bool) {
defer es.checkAndPanic().checkAndPanic()
i := 0
for _, e := range *es {
if !f(&e) {
continue
}
(*es)[i] = e
i++
}
*es = (*es)[:i]
}
func (es Entries) lookup(hostname string) Entries {
es.checkAndPanic()
lowerHostname := strings.ToLower(hostname)
i := sort.Search(len(es), func(i int) bool {
return strings.ToLower(es[i].Hostname) >= lowerHostname
})
if i >= len(es) || strings.ToLower(es[i].Hostname) != lowerHostname {
return Entries{}
}
j := sort.Search(len(es)-i, func(j int) bool {
return strings.ToLower(es[i+j].Hostname) > lowerHostname
})
return es[i : i+j]
}
func (es *Entries) first(f func(*Entry) bool) (*Entry, error) {
es.checkAndPanic()
for _, e := range *es {
if f(&e) {
return &e, nil
}
}
return nil, fmt.Errorf("Not found")
}
type GossipData struct {
Timestamp int64
Entries
}
func (g *GossipData) Merge(o router.GossipData) {
checkAndPanic(CaseSensitive(g.Entries))
defer func() { checkAndPanic(CaseSensitive(g.Entries)) }()
other := o.(*GossipData)
g.Entries.merge(other.Entries)
if g.Timestamp < other.Timestamp {
g.Timestamp = other.Timestamp
}
}
func (g *GossipData) Encode() [][]byte {
checkAndPanic(CaseSensitive(g.Entries))
buf := &bytes.Buffer{}
if err := gob.NewEncoder(buf).Encode(g); err != nil {
panic(err)
}
return [][]byte{buf.Bytes()}
}
| 1 | 11,526 | I don't think we need this check. Instead `Entries.merge` should check that its argument is correctly sorted (in addition to the existing pre/post check on the method receiver). | weaveworks-weave | go |
@@ -1,6 +1,11 @@
#include "engine/routing_algorithms/shortest_path.hpp"
+
+#ifdef OSRM_EXTERNAL_MEMORY
+#include "routing/routing_base_offline.hpp"
+#else
#include "engine/routing_algorithms/routing_base_ch.hpp"
#include "engine/routing_algorithms/routing_base_mld.hpp"
+#endif
#include <boost/assert.hpp>
#include <boost/optional.hpp> | 1 | #include "engine/routing_algorithms/shortest_path.hpp"
#include "engine/routing_algorithms/routing_base_ch.hpp"
#include "engine/routing_algorithms/routing_base_mld.hpp"
#include <boost/assert.hpp>
#include <boost/optional.hpp>
#include <memory>
namespace osrm
{
namespace engine
{
namespace routing_algorithms
{
namespace
{
const static constexpr bool DO_NOT_FORCE_LOOP = false;
// allows a uturn at the target_phantom
// searches source forward/reverse -> target forward/reverse
template <typename Algorithm>
void searchWithUTurn(SearchEngineData<Algorithm> &engine_working_data,
const DataFacade<Algorithm> &facade,
typename SearchEngineData<Algorithm>::QueryHeap &forward_heap,
typename SearchEngineData<Algorithm>::QueryHeap &reverse_heap,
const bool search_from_forward_node,
const bool search_from_reverse_node,
const bool search_to_forward_node,
const bool search_to_reverse_node,
const PhantomNode &source_phantom,
const PhantomNode &target_phantom,
const int total_weight_to_forward,
const int total_weight_to_reverse,
int &new_total_weight,
std::vector<NodeID> &leg_packed_path)
{
forward_heap.Clear();
reverse_heap.Clear();
if (search_from_forward_node)
{
forward_heap.Insert(source_phantom.forward_segment_id.id,
-source_phantom.GetForwardWeightPlusOffset(),
source_phantom.forward_segment_id.id);
}
if (search_from_reverse_node)
{
forward_heap.Insert(source_phantom.reverse_segment_id.id,
-source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
if (search_to_forward_node)
{
reverse_heap.Insert(target_phantom.forward_segment_id.id,
target_phantom.GetForwardWeightPlusOffset(),
target_phantom.forward_segment_id.id);
}
if (search_to_reverse_node)
{
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
target_phantom.GetReverseWeightPlusOffset(),
target_phantom.reverse_segment_id.id);
}
// this is only relevent if source and target are on the same compressed edge
auto is_oneway_source = !(search_from_forward_node && search_from_reverse_node);
auto is_oneway_target = !(search_to_forward_node && search_to_reverse_node);
// we only enable loops here if we can't search from forward to backward node
auto needs_loop_forwards = is_oneway_source && needsLoopForward(source_phantom, target_phantom);
auto needs_loop_backwards =
is_oneway_target && needsLoopBackwards(source_phantom, target_phantom);
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
new_total_weight,
leg_packed_path,
needs_loop_forwards,
needs_loop_backwards,
{source_phantom, target_phantom});
// if no route is found between two parts of the via-route, the entire route becomes
// invalid. Adding to invalid edge weight sadly doesn't return an invalid edge weight. Here
// we prevent the possible overflow, faking the addition of infinity + x == infinity
if (new_total_weight != INVALID_EDGE_WEIGHT)
new_total_weight += std::min(total_weight_to_forward, total_weight_to_reverse);
}
// searches shortest path between:
// source forward/reverse -> target forward
// source forward/reverse -> target reverse
template <typename Algorithm>
void search(SearchEngineData<Algorithm> &engine_working_data,
const DataFacade<Algorithm> &facade,
typename SearchEngineData<Algorithm>::QueryHeap &forward_heap,
typename SearchEngineData<Algorithm>::QueryHeap &reverse_heap,
const bool search_from_forward_node,
const bool search_from_reverse_node,
const bool search_to_forward_node,
const bool search_to_reverse_node,
const PhantomNode &source_phantom,
const PhantomNode &target_phantom,
const int total_weight_to_forward,
const int total_weight_to_reverse,
int &new_total_weight_to_forward,
int &new_total_weight_to_reverse,
std::vector<NodeID> &leg_packed_path_forward,
std::vector<NodeID> &leg_packed_path_reverse)
{
if (search_to_forward_node)
{
forward_heap.Clear();
reverse_heap.Clear();
reverse_heap.Insert(target_phantom.forward_segment_id.id,
target_phantom.GetForwardWeightPlusOffset(),
target_phantom.forward_segment_id.id);
if (search_from_forward_node)
{
forward_heap.Insert(source_phantom.forward_segment_id.id,
total_weight_to_forward -
source_phantom.GetForwardWeightPlusOffset(),
source_phantom.forward_segment_id.id);
}
if (search_from_reverse_node)
{
forward_heap.Insert(source_phantom.reverse_segment_id.id,
total_weight_to_reverse -
source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
new_total_weight_to_forward,
leg_packed_path_forward,
needsLoopForward(source_phantom, target_phantom),
routing_algorithms::DO_NOT_FORCE_LOOP,
{source_phantom, target_phantom});
}
if (search_to_reverse_node)
{
forward_heap.Clear();
reverse_heap.Clear();
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
target_phantom.GetReverseWeightPlusOffset(),
target_phantom.reverse_segment_id.id);
if (search_from_forward_node)
{
forward_heap.Insert(source_phantom.forward_segment_id.id,
total_weight_to_forward -
source_phantom.GetForwardWeightPlusOffset(),
source_phantom.forward_segment_id.id);
}
if (search_from_reverse_node)
{
forward_heap.Insert(source_phantom.reverse_segment_id.id,
total_weight_to_reverse -
source_phantom.GetReverseWeightPlusOffset(),
source_phantom.reverse_segment_id.id);
}
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
new_total_weight_to_reverse,
leg_packed_path_reverse,
routing_algorithms::DO_NOT_FORCE_LOOP,
needsLoopBackwards(source_phantom, target_phantom),
{source_phantom, target_phantom});
}
}
template <typename Algorithm>
void unpackLegs(const DataFacade<Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const std::vector<NodeID> &total_packed_path,
const std::vector<std::size_t> &packed_leg_begin,
const EdgeWeight shortest_path_weight,
InternalRouteResult &raw_route_data)
{
raw_route_data.unpacked_path_segments.resize(packed_leg_begin.size() - 1);
raw_route_data.shortest_path_weight = shortest_path_weight;
for (const auto current_leg : util::irange<std::size_t>(0UL, packed_leg_begin.size() - 1))
{
auto leg_begin = total_packed_path.begin() + packed_leg_begin[current_leg];
auto leg_end = total_packed_path.begin() + packed_leg_begin[current_leg + 1];
const auto &unpack_phantom_node_pair = phantom_nodes_vector[current_leg];
unpackPath(facade,
leg_begin,
leg_end,
unpack_phantom_node_pair,
raw_route_data.unpacked_path_segments[current_leg]);
raw_route_data.source_traversed_in_reverse.push_back(
(*leg_begin != phantom_nodes_vector[current_leg].source_phantom.forward_segment_id.id));
raw_route_data.target_traversed_in_reverse.push_back(
(*std::prev(leg_end) !=
phantom_nodes_vector[current_leg].target_phantom.forward_segment_id.id));
}
}
}
template <typename Algorithm>
InternalRouteResult shortestPathSearch(SearchEngineData<Algorithm> &engine_working_data,
const DataFacade<Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint)
{
InternalRouteResult raw_route_data;
raw_route_data.segment_end_coordinates = phantom_nodes_vector;
const bool allow_uturn_at_waypoint =
!(continue_straight_at_waypoint ? *continue_straight_at_waypoint
: facade.GetContinueStraightDefault());
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
auto &forward_heap = *engine_working_data.forward_heap_1;
auto &reverse_heap = *engine_working_data.reverse_heap_1;
int total_weight_to_forward = 0;
int total_weight_to_reverse = 0;
bool search_from_forward_node =
phantom_nodes_vector.front().source_phantom.IsValidForwardSource();
bool search_from_reverse_node =
phantom_nodes_vector.front().source_phantom.IsValidReverseSource();
std::vector<NodeID> prev_packed_leg_to_forward;
std::vector<NodeID> prev_packed_leg_to_reverse;
std::vector<NodeID> total_packed_path_to_forward;
std::vector<std::size_t> packed_leg_to_forward_begin;
std::vector<NodeID> total_packed_path_to_reverse;
std::vector<std::size_t> packed_leg_to_reverse_begin;
std::size_t current_leg = 0;
// this implements a dynamic program that finds the shortest route through
// a list of vias
for (const auto &phantom_node_pair : phantom_nodes_vector)
{
int new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
int new_total_weight_to_reverse = INVALID_EDGE_WEIGHT;
std::vector<NodeID> packed_leg_to_forward;
std::vector<NodeID> packed_leg_to_reverse;
const auto &source_phantom = phantom_node_pair.source_phantom;
const auto &target_phantom = phantom_node_pair.target_phantom;
bool search_to_forward_node = target_phantom.IsValidForwardTarget();
bool search_to_reverse_node = target_phantom.IsValidReverseTarget();
BOOST_ASSERT(!search_from_forward_node || source_phantom.IsValidForwardSource());
BOOST_ASSERT(!search_from_reverse_node || source_phantom.IsValidReverseSource());
if (search_to_reverse_node || search_to_forward_node)
{
if (allow_uturn_at_waypoint)
{
searchWithUTurn(engine_working_data,
facade,
forward_heap,
reverse_heap,
search_from_forward_node,
search_from_reverse_node,
search_to_forward_node,
search_to_reverse_node,
source_phantom,
target_phantom,
total_weight_to_forward,
total_weight_to_reverse,
new_total_weight_to_forward,
packed_leg_to_forward);
// if only the reverse node is valid (e.g. when using the match plugin) we
// actually need to move
if (!target_phantom.IsValidForwardTarget())
{
BOOST_ASSERT(target_phantom.IsValidReverseTarget());
new_total_weight_to_reverse = new_total_weight_to_forward;
packed_leg_to_reverse = std::move(packed_leg_to_forward);
new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
// (*)
//
// Below we have to check if new_total_weight_to_forward is invalid.
// This prevents use-after-move on packed_leg_to_forward.
}
else if (target_phantom.IsValidReverseTarget())
{
new_total_weight_to_reverse = new_total_weight_to_forward;
packed_leg_to_reverse = packed_leg_to_forward;
}
}
else
{
search(engine_working_data,
facade,
forward_heap,
reverse_heap,
search_from_forward_node,
search_from_reverse_node,
search_to_forward_node,
search_to_reverse_node,
source_phantom,
target_phantom,
total_weight_to_forward,
total_weight_to_reverse,
new_total_weight_to_forward,
new_total_weight_to_reverse,
packed_leg_to_forward,
packed_leg_to_reverse);
}
}
// Note: To make sure we do not access the moved-from packed_leg_to_forward
// we guard its access by a check for invalid edge weight. See (*) above.
// No path found for both target nodes?
if ((INVALID_EDGE_WEIGHT == new_total_weight_to_forward) &&
(INVALID_EDGE_WEIGHT == new_total_weight_to_reverse))
{
return raw_route_data;
}
// we need to figure out how the new legs connect to the previous ones
if (current_leg > 0)
{
bool forward_to_forward =
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
packed_leg_to_forward.front() == source_phantom.forward_segment_id.id;
bool reverse_to_forward =
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
packed_leg_to_forward.front() == source_phantom.reverse_segment_id.id;
bool forward_to_reverse =
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
packed_leg_to_reverse.front() == source_phantom.forward_segment_id.id;
bool reverse_to_reverse =
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
packed_leg_to_reverse.front() == source_phantom.reverse_segment_id.id;
BOOST_ASSERT(!forward_to_forward || !reverse_to_forward);
BOOST_ASSERT(!forward_to_reverse || !reverse_to_reverse);
// in this case we always need to copy
if (forward_to_forward && forward_to_reverse)
{
// in this case we copy the path leading to the source forward node
// and change the case
total_packed_path_to_reverse = total_packed_path_to_forward;
packed_leg_to_reverse_begin = packed_leg_to_forward_begin;
forward_to_reverse = false;
reverse_to_reverse = true;
}
else if (reverse_to_forward && reverse_to_reverse)
{
total_packed_path_to_forward = total_packed_path_to_reverse;
packed_leg_to_forward_begin = packed_leg_to_reverse_begin;
reverse_to_forward = false;
forward_to_forward = true;
}
BOOST_ASSERT(!forward_to_forward || !forward_to_reverse);
BOOST_ASSERT(!reverse_to_forward || !reverse_to_reverse);
// in this case we just need to swap to regain the correct mapping
if (reverse_to_forward || forward_to_reverse)
{
total_packed_path_to_forward.swap(total_packed_path_to_reverse);
packed_leg_to_forward_begin.swap(packed_leg_to_reverse_begin);
}
}
if (new_total_weight_to_forward != INVALID_EDGE_WEIGHT)
{
BOOST_ASSERT(target_phantom.IsValidForwardTarget());
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
total_packed_path_to_forward.insert(total_packed_path_to_forward.end(),
packed_leg_to_forward.begin(),
packed_leg_to_forward.end());
search_from_forward_node = true;
}
else
{
total_packed_path_to_forward.clear();
packed_leg_to_forward_begin.clear();
search_from_forward_node = false;
}
if (new_total_weight_to_reverse != INVALID_EDGE_WEIGHT)
{
BOOST_ASSERT(target_phantom.IsValidReverseTarget());
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
total_packed_path_to_reverse.insert(total_packed_path_to_reverse.end(),
packed_leg_to_reverse.begin(),
packed_leg_to_reverse.end());
search_from_reverse_node = true;
}
else
{
total_packed_path_to_reverse.clear();
packed_leg_to_reverse_begin.clear();
search_from_reverse_node = false;
}
prev_packed_leg_to_forward = std::move(packed_leg_to_forward);
prev_packed_leg_to_reverse = std::move(packed_leg_to_reverse);
total_weight_to_forward = new_total_weight_to_forward;
total_weight_to_reverse = new_total_weight_to_reverse;
++current_leg;
}
BOOST_ASSERT(total_weight_to_forward != INVALID_EDGE_WEIGHT ||
total_weight_to_reverse != INVALID_EDGE_WEIGHT);
// We make sure the fastest route is always in packed_legs_to_forward
if (total_weight_to_forward < total_weight_to_reverse ||
(total_weight_to_forward == total_weight_to_reverse &&
total_packed_path_to_forward.size() < total_packed_path_to_reverse.size()))
{
// insert sentinel
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
BOOST_ASSERT(packed_leg_to_forward_begin.size() == phantom_nodes_vector.size() + 1);
unpackLegs(facade,
phantom_nodes_vector,
total_packed_path_to_forward,
packed_leg_to_forward_begin,
total_weight_to_forward,
raw_route_data);
}
else
{
// insert sentinel
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
BOOST_ASSERT(packed_leg_to_reverse_begin.size() == phantom_nodes_vector.size() + 1);
unpackLegs(facade,
phantom_nodes_vector,
total_packed_path_to_reverse,
packed_leg_to_reverse_begin,
total_weight_to_reverse,
raw_route_data);
}
return raw_route_data;
}
template InternalRouteResult
shortestPathSearch(SearchEngineData<ch::Algorithm> &engine_working_data,
const DataFacade<ch::Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint);
template InternalRouteResult
shortestPathSearch(SearchEngineData<corech::Algorithm> &engine_working_data,
const DataFacade<corech::Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint);
template InternalRouteResult
shortestPathSearch(SearchEngineData<mld::Algorithm> &engine_working_data,
const DataFacade<mld::Algorithm> &facade,
const std::vector<PhantomNodes> &phantom_nodes_vector,
const boost::optional<bool> continue_straight_at_waypoint);
} // namespace routing_algorithms
} // namespace engine
} // namespace osrm
| 1 | 22,435 | This is not really good to mix different code bases. @TheMarex I understand the point about placing code into cpp file, but we still need to move templated version into an impl header, remove references to offline code here and keep template instantiations in a cpp file to avoid rebuild. So offline code could instantiate own implementations using private impl headers. Otherwise we again make wrong dependencies into OSRM code and block changes in separate projects. | Project-OSRM-osrm-backend | cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.