patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -0,0 +1,15 @@
+import re
+import logging
+from localstack.utils.common import to_str, to_bytes
+from localstack.services.generic_proxy import ProxyListener
+from moto.elbv2 import urls
+
+LOG = logging.getLogger(__name__)
+
+
+class ProxyListenerELBV2(ProxyListener):
+ pass
+
+
+# # instantiate listener
+UPDATE_ELBV2 = ProxyListenerELBV2() | 1 | 1 | 12,106 | As we're currently not applying any logic in the proxy listener, I guess we can simply remove this file (and remove the `listener` argument from the `register_plugin(..)` call above). | localstack-localstack | py |
|
@@ -86,6 +86,7 @@ func (w *DefaultWorker) Generate(ctx context.Context,
Proof: proof,
StateRoot: newStateTreeCid,
Ticket: ticket,
+ Timestamp: types.Uint64(time.Now().Unix()),
}
for i, msg := range res.PermanentFailures { | 1 | package mining
// Block generation is part of the logic of the DefaultWorker.
// 'generate' is that function that actually creates a new block from a base
// TipSet using the DefaultWorker's many utilities.
import (
"context"
"time"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm"
)
// Generate returns a new block created from the messages in the pool.
func (w *DefaultWorker) Generate(ctx context.Context,
baseTipSet types.TipSet,
ticket types.Signature,
proof types.PoStProof,
nullBlockCount uint64) (*types.Block, error) {
generateTimer := time.Now()
defer func() {
log.Infof("[TIMER] DefaultWorker.Generate baseTipset: %s - elapsed time: %s", baseTipSet.String(), time.Since(generateTimer).Round(time.Millisecond))
}()
stateTree, err := w.getStateTree(ctx, baseTipSet)
if err != nil {
return nil, errors.Wrap(err, "get state tree")
}
if !w.powerTable.HasPower(ctx, stateTree, w.blockstore, w.minerAddr) {
return nil, errors.Errorf("bad miner address, miner must store files before mining: %s", w.minerAddr)
}
weight, err := w.getWeight(ctx, baseTipSet)
if err != nil {
return nil, errors.Wrap(err, "get weight")
}
baseHeight, err := baseTipSet.Height()
if err != nil {
return nil, errors.Wrap(err, "get base tip set height")
}
blockHeight := baseHeight + nullBlockCount + 1
ancestors, err := w.getAncestors(ctx, baseTipSet, types.NewBlockHeight(blockHeight))
if err != nil {
return nil, errors.Wrap(err, "get base tip set ancestors")
}
pending := w.messageSource.Pending()
mq := NewMessageQueue(pending)
messages := mq.Drain()
vms := vm.NewStorageMap(w.blockstore)
res, err := w.processor.ApplyMessagesAndPayRewards(ctx, stateTree, vms, messages, w.minerOwnerAddr, types.NewBlockHeight(blockHeight), ancestors)
if err != nil {
return nil, errors.Wrap(err, "generate apply messages")
}
newStateTreeCid, err := stateTree.Flush(ctx)
if err != nil {
return nil, errors.Wrap(err, "generate flush state tree")
}
if err = vms.Flush(); err != nil {
return nil, errors.Wrap(err, "generate flush vm storage map")
}
var receipts []*types.MessageReceipt
for _, r := range res.Results {
receipts = append(receipts, r.Receipt)
}
next := &types.Block{
Miner: w.minerAddr,
Height: types.Uint64(blockHeight),
Messages: res.SuccessfulMessages,
MessageReceipts: receipts,
Parents: baseTipSet.ToSortedCidSet(),
ParentWeight: types.Uint64(weight),
Proof: proof,
StateRoot: newStateTreeCid,
Ticket: ticket,
}
for i, msg := range res.PermanentFailures {
// We will not be able to apply this message in the future because the error was permanent.
// Therefore, we will remove it from the MessagePool now.
// There might be better places to do this, such as wherever successful messages are removed
// from the pool, or by posting the failure to an event bus to be handled async.
log.Infof("permanent ApplyMessage failure, [%s] (%s)", msg, res.PermanentErrors[i])
mc, err := msg.Cid()
if err == nil {
w.messageSource.Remove(mc)
} else {
log.Warningf("failed to get CID from message", err)
}
}
for i, msg := range res.TemporaryFailures {
// We might be able to apply this message in the future because the error was temporary.
// Therefore, we will leave it in the MessagePool for now.
log.Infof("temporary ApplyMessage failure, [%s] (%s)", msg, res.TemporaryErrors[i])
}
return next, nil
}
| 1 | 19,642 | Isn't the point of the clock module to encapsulate access to `time.Now()`? Coming later? | filecoin-project-venus | go |
@@ -117,6 +117,10 @@ func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtoc
}
fastUpgradeProtocols[consensusTestFastUpgrade(proto)] = fastParams
+
+ // support the ALGOSMALLLAMBDAMSEC = 500 env variable
+ fastParams.AgreementFilterTimeout = time.Second
+ fastParams.AgreementFilterTimeoutPeriod0 = time.Second
}
return
} | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package upgrades
import (
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
func GenerateRandomBytes(n int) []byte {
b := make([]byte, n)
_, err := rand.Read(b)
// Note that err == nil only if we read len(b) bytes.
if err != nil {
return nil
}
return b
}
// this test checks that two accounts can send money to one another
// across a protocol upgrade.
func TestAccountsCanSendMoneyAcrossUpgradeV7toV8(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV7Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV8toV9(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV8Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV9toV10(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV9Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV10toV11(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV10Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV11toV12(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV11Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV12toV13(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV12Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV13toV14(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV13Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV14toV15(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV14Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV15toV16(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV15Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV21toV22(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV21Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV22toV23(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV22Upgrade.json"))
}
func TestAccountsCanSendMoneyAcrossUpgradeV23toV24(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV23Upgrade.json"))
}
// ConsensusTestFastUpgrade is meant for testing of protocol upgrades:
// during testing, it is equivalent to another protocol with the exception
// of the upgrade parameters, which allow for upgrades to take place after
// only a few rounds.
func consensusTestFastUpgrade(proto protocol.ConsensusVersion) protocol.ConsensusVersion {
return "test-fast-upgrade-" + proto
}
func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtocols) {
fastUpgradeProtocols = make(config.ConsensusProtocols)
for proto, params := range config.Consensus {
fastParams := params
fastParams.UpgradeVoteRounds = 5
fastParams.UpgradeThreshold = 3
fastParams.DefaultUpgradeWaitRounds = 5
fastParams.MinUpgradeWaitRounds = 0
fastParams.MaxUpgradeWaitRounds = 0
fastParams.MaxVersionStringLen += len(consensusTestFastUpgrade(""))
fastParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
for ver := range params.ApprovedUpgrades {
fastParams.ApprovedUpgrades[consensusTestFastUpgrade(ver)] = 0
}
fastUpgradeProtocols[consensusTestFastUpgrade(proto)] = fastParams
}
return
}
func testAccountsCanSendMoneyAcrossUpgrade(t *testing.T, templatePath string) {
t.Parallel()
a := require.New(t)
os.Setenv("ALGOSMALLLAMBDAMSEC", "500")
consensus := generateFastUpgradeConsensus()
var fixture fixtures.RestClientFixture
fixture.SetConsensus(consensus)
fixture.Setup(t, templatePath)
defer fixture.Shutdown()
c := fixture.LibGoalClient
initialStatus, err := c.Status()
a.NoError(err, "getting status")
pingClient := fixture.LibGoalClient
pingAccountList, err := fixture.GetWalletsSortedByBalance()
a.NoError(err, "fixture should be able to get wallets sorted by balance")
pingAccount := pingAccountList[0].Address
pongClient := fixture.GetLibGoalClientForNamedNode("Node")
wh, err := pongClient.GetUnencryptedWalletHandle()
a.NoError(err)
pongAccountList, err := pongClient.ListAddresses(wh)
a.NoError(err)
pongAccount := pongAccountList[0]
pingBalance, err := c.GetBalance(pingAccount)
pongBalance, err := c.GetBalance(pongAccount)
a.Equal(pingBalance, pongBalance, "both accounts should start with same balance")
a.NotEqual(pingAccount, pongAccount, "accounts under study should be different")
expectedPingBalance := pingBalance
expectedPongBalance := pongBalance
const transactionFee = uint64(9000)
const amountPongSendsPing = uint64(10000)
const amountPingSendsPong = uint64(11000)
curStatus, err := c.Status()
a.NoError(err, "getting status")
var pingTxids []string
var pongTxids []string
startTime := time.Now()
for curStatus.LastVersion == initialStatus.LastVersion {
pongTx, err := pongClient.SendPaymentFromUnencryptedWallet(pongAccount, pingAccount, transactionFee, amountPongSendsPing, GenerateRandomBytes(8))
a.NoError(err, "fixture should be able to send money (pong -> ping)")
pongTxids = append(pongTxids, pongTx.ID().String())
pingTx, err := pingClient.SendPaymentFromUnencryptedWallet(pingAccount, pongAccount, transactionFee, amountPingSendsPong, GenerateRandomBytes(8))
a.NoError(err, "fixture should be able to send money (ping -> pong)")
pingTxids = append(pingTxids, pingTx.ID().String())
expectedPingBalance = expectedPingBalance - transactionFee - amountPingSendsPong + amountPongSendsPing
expectedPongBalance = expectedPongBalance - transactionFee - amountPongSendsPing + amountPingSendsPong
curStatus, err = pongClient.Status()
a.NoError(err)
time.Sleep(time.Second)
if time.Now().After(startTime.Add(3 * time.Minute)) {
a.Fail("upgrade taking too long")
}
}
initialStatus, err = c.Status()
a.NoError(err, "getting status")
// submit a few more transactions to make sure payments work in new protocol
// perform this for two rounds.
for {
curStatus, err = pongClient.Status()
a.NoError(err)
if curStatus.LastRound > initialStatus.LastRound+2 {
break
}
pongTx, err := pongClient.SendPaymentFromUnencryptedWallet(pongAccount, pingAccount, transactionFee, amountPongSendsPing, GenerateRandomBytes(8))
a.NoError(err, "fixture should be able to send money (pong -> ping)")
pongTxids = append(pongTxids, pongTx.ID().String())
pingTx, err := pingClient.SendPaymentFromUnencryptedWallet(pingAccount, pongAccount, transactionFee, amountPingSendsPong, GenerateRandomBytes(8))
a.NoError(err, "fixture should be able to send money (ping -> pong)")
pingTxids = append(pingTxids, pingTx.ID().String())
expectedPingBalance = expectedPingBalance - transactionFee - amountPingSendsPong + amountPongSendsPing
expectedPongBalance = expectedPongBalance - transactionFee - amountPongSendsPing + amountPingSendsPong
time.Sleep(time.Second)
}
curStatus, err = pongClient.Status()
a.NoError(err)
// wait for all transactions to confirm
for _, txid := range pingTxids {
_, err = fixture.WaitForConfirmedTxn(curStatus.LastRound+5, pingAccount, txid)
a.NoError(err, "waiting for txn")
}
for _, txid := range pongTxids {
_, err = fixture.WaitForConfirmedTxn(curStatus.LastRound+5, pongAccount, txid)
a.NoError(err, "waiting for txn")
}
// check balances
pingBalance, err = c.GetBalance(pingAccount)
a.NoError(err)
pongBalance, err = c.GetBalance(pongAccount)
a.NoError(err)
a.True(expectedPingBalance <= pingBalance, "ping balance is different than expected")
a.True(expectedPongBalance <= pongBalance, "pong balance is different than expected")
}
| 1 | 40,226 | as before - if you've set this, you should be able to get rid of the `os.Setenv("ALGOSMALLLAMBDAMSEC", "500")`. make sure that the various tests still takes have the same execution time. | algorand-go-algorand | go |
@@ -450,8 +450,8 @@ module Travis
mactex = 'BasicTeX.pkg'
# TODO(craigcitro): Confirm that this will route us to the
# nearest mirror.
- sh.cmd 'wget http://mirror.ctan.org/systems/mac/mactex/'\
- "#{mactex} -O \"/tmp/#{mactex}\""
+ sh.cmd 'curl -fLo \"/tmp/#{mactex}\" --retry 3 http://mirror.ctan.org/systems/mac/mactex/'\
+ '#{mactex}'
sh.echo 'Installing OS X binary package for MacTeX'
sh.cmd "sudo installer -pkg \"/tmp/#{mactex}\" -target /" | 1 | # Maintained by:
# Jim Hester @jimhester [email protected]
# Craig Citro @craigcitro [email protected]
#
module Travis
module Build
class Script
class R < Script
DEFAULTS = {
# Basic config options
cran: 'https://cloud.r-project.org',
repos: {},
warnings_are_errors: true,
# Dependencies (installed in this order)
apt_packages: [],
brew_packages: [],
r_binary_packages: [],
r_packages: [],
bioc_packages: [],
r_github_packages: [],
# Build/test options
r_build_args: '',
r_check_args: '--as-cran',
r_check_revdep: false,
# Heavy dependencies
pandoc: true,
latex: true,
pandoc_version: '1.15.2',
# Bioconductor
bioc: 'https://bioconductor.org/biocLite.R',
bioc_required: false,
bioc_use_devel: false,
disable_homebrew: false,
r: 'release'
}
def initialize(data)
# TODO(craigcitro): Is there a way to avoid explicitly
# naming arguments here?
super
@devtools_installed = false
@bioc_installed = false
end
def export
super
sh.export 'TRAVIS_R_VERSION', r_version, echo: false
sh.export 'TRAVIS_R_VERSION_STRING', config[:r].to_s, echo: false
sh.export 'R_LIBS_USER', '~/R/Library', echo: false
sh.export 'R_LIBS_SITE', '/usr/local/lib/R/site-library:/usr/lib/R/site-library', echo: false
sh.export '_R_CHECK_CRAN_INCOMING_', 'false', echo: false
sh.export 'NOT_CRAN', 'true', echo: false
sh.export 'R_PROFILE', "~/.Rprofile.site", echo: false
end
def configure
super
sh.echo 'R for Travis-CI is not officially supported, '\
'but is community maintained.', ansi: :green
sh.echo 'Please file any issues at https://github.com/travis-ci/travis-ci/issues'
sh.echo 'and mention @craigcitro and @jimhester in the issue'
sh.fold 'R-install' do
sh.with_options({ assert: true, echo: true, timing: true }) do
sh.echo 'Installing R', ansi: :yellow
case config[:os]
when 'linux'
# Set up our CRAN mirror.
sh.cmd 'sudo add-apt-repository '\
"\"deb #{repos[:CRAN]}/bin/linux/ubuntu "\
"$(lsb_release -cs)/\""
sh.cmd 'sudo apt-key adv --keyserver keyserver.ubuntu.com '\
'--recv-keys E084DAB9'
# Add marutter's c2d4u repository.
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/rrutter"'
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/c2d4u"'
# Update after adding all repositories. Retry several
# times to work around flaky connection to Launchpad PPAs.
sh.cmd 'sudo apt-get update -qq', retry: true
# Install precompiled R
# Install only the dependencies for an R development environment except for
# libpcre3-dev or r-base-core because they will be included in
# the R binary tarball.
# Dependencies queried with `apt-cache depends -i r-base-dev`.
# qpdf and texinfo are also needed for --as-cran # checks:
# https://stat.ethz.ch/pipermail/r-help//2012-September/335676.html
sh.cmd 'sudo apt-get install -y --no-install-recommends '\
'build-essential gcc g++ gfortran libblas-dev liblapack-dev '\
'libncurses5-dev libreadline-dev libjpeg-dev '\
'libpng-dev zlib1g-dev libbz2-dev liblzma-dev cdbs qpdf texinfo '\
'libmagick++-dev libssh2-1-dev', retry: true
r_filename = "R-#{r_version}-$(lsb_release -cs).xz"
r_url = "https://s3.amazonaws.com/rstudio-travis/#{r_filename}"
sh.cmd "curl -fLo /tmp/#{r_filename} #{r_url}", retry: true
sh.cmd "tar xJf /tmp/#{r_filename} -C ~"
sh.export 'PATH', "$HOME/R-bin/bin:$PATH", echo: false
sh.export 'LD_LIBRARY_PATH', "$HOME/R-bin/lib:$LD_LIBRARY_PATH", echo: false
sh.rm "/tmp/#{r_filename}"
sh.cmd "sudo mkdir -p /usr/local/lib/R/site-library $R_LIBS_USER"
sh.cmd 'sudo chmod 2777 /usr/local/lib/R /usr/local/lib/R/site-library $R_LIBS_USER'
when 'osx'
# We want to update, but we don't need the 800+ lines of
# output.
sh.cmd 'brew update >/dev/null', retry: true
# R-devel builds available at research.att.com
if r_version == 'devel'
r_url = "https://r.research.att.com/mavericks/R-devel/R-devel-mavericks-signed.pkg"
# The latest release is the only one available in /bin/macosx
elsif r_version == r_latest
r_url = "#{repos[:CRAN]}/bin/macosx/R-latest.pkg"
# 3.2.5 was never built for OS X so
# we need to use 3.2.4-revised, which is the same codebase
# https://stat.ethz.ch/pipermail/r-devel/2016-May/072642.html
elsif r_version == '3.2.5'
r_url = "#{repos[:CRAN]}/bin/macosx/old/R-3.2.4-revised.pkg"
# all other binaries are in /bin/macosx/old
else
r_url = "#{repos[:CRAN]}/bin/macosx/old/R-#{r_version}.pkg"
end
# Install from latest CRAN binary build for OS X
sh.cmd "curl -fLo /tmp/R.pkg #{r_url}", retry: true
sh.echo 'Installing OS X binary package for R'
sh.cmd 'sudo installer -pkg "/tmp/R.pkg" -target /'
sh.rm '/tmp/R.pkg'
# Install gfortran libraries the precompiled binaries are linked to
sh.cmd 'curl -fLo /tmp/gfortran.tar.bz2 http://r.research.att.com/libs/gfortran-4.8.2-darwin13.tar.bz2', retry: true
sh.cmd 'sudo tar fvxz /tmp/gfortran.tar.bz2 -C /'
sh.rm '/tmp/gfortran.tar.bz2'
else
sh.failure "Operating system not supported: #{config[:os]}"
end
# Set repos in ~/.Rprofile
repos_str = repos.collect {|k,v| "#{k} = \"#{v}\""}.join(", ")
options_repos = "options(repos = c(#{repos_str}))"
sh.cmd %Q{echo '#{options_repos}' > ~/.Rprofile.site}
# PDF manual requires latex
if config[:latex]
setup_latex
else
config[:r_check_args] << " --no-manual"
end
setup_bioc if needs_bioc?
setup_pandoc if config[:pandoc]
# Removes preinstalled homebrew
disable_homebrew if config[:disable_homebrew]
end
end
end
def announce
super
sh.fold 'R-session-info' do
sh.echo 'R session information', ansi: :yellow
sh.cmd 'Rscript -e \'sessionInfo()\''
end
end
def install
super
sh.if '! -e DESCRIPTION' do
sh.failure "No DESCRIPTION file found, user must supply their own install and script steps"
end
sh.fold "R-dependencies" do
sh.echo 'Installing package dependencies', ansi: :yellow
# Install any declared packages
apt_install config[:apt_packages]
brew_install config[:brew_packages]
r_binary_install config[:r_binary_packages]
r_install config[:r_packages]
r_install config[:bioc_packages]
r_github_install config[:r_github_packages]
# Install dependencies for the package we're testing.
install_deps
end
if @devtools_installed
sh.fold 'R-installed-versions' do
sh.echo 'Installed package versions', ansi: :yellow
sh.cmd 'Rscript -e \'devtools::session_info(installed.packages()[, "Package"])\''
end
end
end
def script
# Build the package
sh.if '! -e DESCRIPTION' do
sh.failure "No DESCRIPTION file found, user must supply their own install and script steps"
end
tarball_script =
'$version = $1 if (/^Version:\s(\S+)/);'\
'$package = $1 if (/^Package:\s*(\S+)/);'\
'END { print "${package}_$version.tar.gz" }'\
sh.export 'PKG_TARBALL', "$(perl -ne '#{tarball_script}' DESCRIPTION)", echo: false
sh.fold 'R-build' do
sh.echo 'Building package', ansi: :yellow
sh.echo "Building with: R CMD build ${R_BUILD_ARGS}"
sh.cmd "R CMD build #{config[:r_build_args]} .",
assert: true
end
# Check the package
sh.fold 'R-check' do
sh.echo 'Checking package', ansi: :yellow
# Test the package
sh.echo 'Checking with: R CMD check "${PKG_TARBALL}" '\
"#{config[:r_check_args]}"
sh.cmd "R CMD check \"${PKG_TARBALL}\" #{config[:r_check_args]}; "\
"CHECK_RET=$?", assert: false
end
export_rcheck_dir
if @devtools_installed
# Output check summary
sh.cmd 'Rscript -e "message(devtools::check_failures(path = \"${RCHECK_DIR}\"))"', echo: false
end
# Build fails if R CMD check fails
sh.if '$CHECK_RET -ne 0' do
dump_logs
sh.failure 'R CMD check failed'
end
# Turn warnings into errors, if requested.
if config[:warnings_are_errors]
sh.cmd 'grep -q -R "WARNING" "${RCHECK_DIR}/00check.log"', echo: false, assert: false
sh.if '$? -eq 0' do
dump_logs
sh.failure "Found warnings, treating as errors (as requested)."
end
end
# Check revdeps, if requested.
if @devtools_installed and config[:r_check_revdep]
sh.echo "Checking reverse dependencies"
revdep_script =
'library("devtools");' \
'res <- revdep_check();' \
'if (length(res) > 0) {' \
' revdep_check_summary(res);' \
' revdep_check_save_logs(res);' \
' q(status = 1, save = "no");' \
'}'
sh.cmd "Rscript -e '#{revdep_script}'", assert: true
end
end
def setup_cache
if data.cache?(:packages)
sh.fold 'package cache' do
sh.echo 'Setting up package cache', ansi: :yellow
directory_cache.add '$R_LIBS_USER'
end
end
end
def cache_slug
super << '--R-' << r_version
end
def use_directory_cache?
super || data.cache?(:packages)
end
private
def needs_bioc?
config[:bioc_required] || !config[:bioc_packages].empty?
end
def packages_as_arg(packages)
packages = Array(packages)
quoted_pkgs = packages.collect{|p| "\"#{p}\""}
"c(#{quoted_pkgs.join(', ')})"
end
def as_r_boolean(bool)
bool ? "TRUE" : "FALSE"
end
def r_install(packages)
return if packages.empty?
packages = Array(packages)
sh.echo "Installing R packages: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
install_script =
"install.packages(#{pkg_arg});"\
"if (!all(#{pkg_arg} %in% installed.packages())) {"\
' q(status = 1, save = "no")'\
'}'
sh.cmd "Rscript -e '#{install_script}'"
end
def r_github_install(packages)
return if packages.empty?
packages = Array(packages)
setup_devtools
sh.echo "Installing R packages from GitHub: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
install_script = "devtools::install_github(#{pkg_arg}, build_vignettes = FALSE)"
sh.cmd "Rscript -e '#{install_script}'"
end
def r_binary_install(packages)
return if packages.empty?
packages = Array(packages)
if config[:os] == 'linux'
if !config[:sudo] or config[:dist] == 'precise'
sh.echo "R binary packages not supported with 'sudo: false' or 'dist: precise', "\
' falling back to source install'
return r_install packages
end
sh.echo "Installing *binary* R packages: #{packages.join(', ')}"
apt_install packages.collect{|p| "r-cran-#{p.downcase}"}
else
sh.echo "R binary packages not supported on #{config[:os]}, "\
'falling back to source install'
r_install packages
end
end
def apt_install(packages)
return if packages.empty?
packages = Array(packages)
return unless (config[:os] == 'linux')
pkg_arg = packages.join(' ')
sh.echo "Installing apt packages: #{packages.join(', ')}"
sh.cmd "sudo apt-get install -y #{pkg_arg}", retry: true
end
def brew_install(packages)
return if packages.empty?
packages = Array(packages)
return unless (config[:os] == 'osx')
pkg_arg = packages.join(' ')
sh.echo "Installing brew packages: #{packages.join(', ')}"
sh.cmd "brew install #{pkg_arg}", retry: true
end
def install_deps
setup_devtools
install_script =
'deps <- devtools::dev_package_deps(dependencies = NA);'\
'devtools::install_deps(dependencies = TRUE);'\
'if (!all(deps$package %in% installed.packages())) {'\
' message("missing: ", paste(setdiff(deps$package, installed.packages()), collapse=", "));'\
' q(status = 1, save = "no")'\
'}'
sh.cmd "Rscript -e '#{install_script}'"
end
def export_rcheck_dir
# Simply strip the tarball name until the last _ and add '.Rcheck',
# relevant R code # https://github.com/wch/r-source/blob/840a972338042b14aa5855cc431b2d0decf68234/src/library/tools/R/check.R#L4608-L4615
sh.export 'RCHECK_DIR', "$(expr \"$PKG_TARBALL\" : '\\(.*\\)_').Rcheck", echo: false
end
def dump_logs
export_rcheck_dir
dump_log("fail")
dump_log("log")
dump_log("out")
end
def dump_log(type)
sh.fold "#{type} logs" do
sh.echo "R CMD check #{type} logs", ansi: :yellow
cmd =
'for name in '\
"$(find \"${RCHECK_DIR}\" -type f -name \"*#{type}\");"\
'do '\
'echo ">>> Filename: ${name} <<<";'\
'cat ${name};'\
'done'
sh.cmd cmd
end
end
def setup_bioc
unless @bioc_installed
sh.fold 'Bioconductor' do
sh.echo 'Installing Bioconductor', ansi: :yellow
bioc_install_script =
"source(\"#{config[:bioc]}\");"\
'tryCatch('\
" useDevel(#{as_r_boolean(config[:bioc_use_devel])}),"\
' error=function(e) {if (!grepl("already in use", e$message)) {e}}'\
');'\
'cat(append = TRUE, file = "~/.Rprofile.site", "options(repos = BiocInstaller::biocinstallRepos());")'
sh.cmd "Rscript -e '#{bioc_install_script}'", retry: true
end
end
@bioc_installed = true
end
def setup_devtools
unless @devtools_installed
case config[:os]
when 'linux'
if config[:sudo]
r_binary_install ['devtools']
else
r_install ['devtools']
end
else
devtools_check = '!requireNamespace("devtools", quietly = TRUE)'
devtools_install = 'install.packages("devtools")'
sh.cmd "Rscript -e 'if (#{devtools_check}) #{devtools_install}'",
retry: true
end
end
@devtools_installed = true
end
def setup_latex
case config[:os]
when 'linux'
texlive_filename = 'texlive.tar.gz'
texlive_url = 'https://github.com/jimhester/ubuntu-bin/releases/download/latest/texlive.tar.gz'
sh.cmd "curl -fLo /tmp/#{texlive_filename} #{texlive_url}"
sh.cmd "tar xzf /tmp/#{texlive_filename} -C ~"
sh.export 'PATH', "/$HOME/texlive/bin/x86_64-linux:$PATH"
sh.cmd 'tlmgr update --self', assert: false
when 'osx'
# We use basictex due to disk space constraints.
mactex = 'BasicTeX.pkg'
# TODO(craigcitro): Confirm that this will route us to the
# nearest mirror.
sh.cmd 'wget http://mirror.ctan.org/systems/mac/mactex/'\
"#{mactex} -O \"/tmp/#{mactex}\""
sh.echo 'Installing OS X binary package for MacTeX'
sh.cmd "sudo installer -pkg \"/tmp/#{mactex}\" -target /"
sh.rm "/tmp/#{mactex}"
sh.export 'PATH', '/usr/texbin:/Library/TeX/texbin:$PATH'
sh.cmd 'sudo tlmgr update --self', assert: false
# Install common packages
sh.cmd 'sudo tlmgr install inconsolata upquote '\
'courier courier-scaled helvetic', assert: false
end
end
def setup_pandoc
case config[:os]
when 'linux'
pandoc_filename = "pandoc-#{config[:pandoc_version]}-1-amd64.deb"
pandoc_url = "https://github.com/jgm/pandoc/releases/download/#{config[:pandoc_version]}/"\
"#{pandoc_filename}"
# Download and install pandoc
sh.cmd "curl -fLo /tmp/#{pandoc_filename} #{pandoc_url}"
sh.cmd "sudo dpkg -i /tmp/#{pandoc_filename}"
# Fix any missing dependencies
sh.cmd "sudo apt-get install -f"
# Cleanup
sh.rm "/tmp/#{pandoc_filename}"
when 'osx'
pandoc_filename = "pandoc-#{config[:pandoc_version]}-osx.pkg"
pandoc_url = "https://github.com/jgm/pandoc/releases/download/#{config[:pandoc_version]}/"\
"#{pandoc_filename}"
# Download and install pandoc
sh.cmd "curl -fLo /tmp/#{pandoc_filename} #{pandoc_url}"
sh.cmd "sudo installer -pkg \"/tmp/#{pandoc_filename}\" -target /"
# Cleanup
sh.rm "/tmp/#{pandoc_filename}"
end
end
# Uninstalls the preinstalled homebrew
# See FAQ: https://github.com/Homebrew/brew/blob/master/share/doc/homebrew/FAQ.md
def disable_homebrew
return unless (config[:os] == 'osx')
sh.cmd "curl -fsSOL https://raw.githubusercontent.com/Homebrew/install/master/uninstall"
sh.cmd "sudo ruby uninstall --force"
sh.cmd "rm uninstall"
end
def r_version
@r_version ||= normalized_r_version
end
def normalized_r_version(v=config[:r].to_s)
case v
when 'release' then '3.4.1'
when 'oldrel' then '3.3.3'
when '3.0' then '3.0.3'
when '3.1' then '3.1.3'
when '3.2' then '3.2.5'
when '3.3' then '3.3.3'
when '3.4' then '3.4.1'
when 'bioc-devel'
config[:bioc_required] = true
config[:bioc_use_devel] = true
'devel'
when 'bioc-release'
config[:bioc_required] = true
config[:bioc_use_devel] = false
config[:r] = 'release'
normalized_r_version
else v
end
end
def r_latest
normalized_r_version('release')
end
def repos
@repos ||= normalized_repos
end
# If CRAN is not set in repos set it with cran
def normalized_repos
v = config[:repos]
if not v.has_key?(:CRAN)
v[:CRAN] = config[:cran]
end
# If the version is less than 3.2 we need to use http repositories
if r_version < '3.2'
v.each {|_, url| url.sub!(/^https:/, "http:")}
config[:bioc].sub!(/^https:/, "http:")
end
v
end
end
end
end
end
| 1 | 15,558 | Pretty sure the single quotes here should be double quotes, single quotes are not expanded by the shell. | travis-ci-travis-build | rb |
@@ -109,8 +109,11 @@ public final class ValidateConstantMessageTests {
" void f(boolean bArg, int iArg, Object oArg, Integer[] arrayArg, "
+ "Collection<String> collectionArg, Map<String, String> mapArg, String stringArg, "
+ "Iterable<String> iterableArg, double dArg) {",
+ " String localConstant = \"constant\";",
" Validate.isTrue(bArg, \"message %d\", 123L);",
" Validate.isTrue(bArg, \"message %f\", 0.0);",
+ " Validate.notBlank(stringArg, \"message %s\", compileTimeConstant);",
+ " Validate.notBlank(stringArg, \"message %s\", localConstant);",
" Validate.isTrue(bArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notNull(oArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notEmpty(arrayArg, \"message %s %s\", \"msg\", \"msg\");", | 1 | /*
* Copyright 2017 Palantir Technologies, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.errorprone;
import com.google.errorprone.CompilationTestHelper;
import org.junit.Before;
import org.junit.Test;
public final class ValidateConstantMessageTests {
private CompilationTestHelper compilationHelper;
@Before
public void before() {
compilationHelper = CompilationTestHelper.newInstance(ValidateConstantMessage.class, getClass());
}
private void test(String call) throws Exception {
compilationHelper
.addSourceLines(
"Test.java",
"import org.apache.commons.lang3.Validate;",
"import java.math.BigDecimal;",
"import java.util.Collection;",
"import java.util.Map;",
"class Test {",
" void f(String param, boolean bArg, int iArg, Object oArg, Integer[] arrayArg, "
+ "Collection<String> collectionArg, Map<String, String> mapArg, String stringArg, "
+ "Iterable<String> iterableArg, double dArg) {",
" // BUG: Diagnostic contains: non-constant message",
" " + call,
" }",
"}")
.doTest();
}
@Test
public void positive() throws Exception {
test("Validate.isTrue(param != \"string\", String.format(\"constant %s\", param));");
test("Validate.isTrue(param != \"string\", \"constant\" + param);");
test("Validate.isTrue(param != \"string\", \"constant\" + param, 0.0);");
test("Validate.isTrue(param != \"string\", \"constant\" + param, 123L);");
test("Validate.notNull(param, \"constant\" + param);");
test("Validate.notEmpty(collectionArg, \"constant\" + param);");
test("Validate.notEmpty(arrayArg, \"constant\" + param);");
test("Validate.notEmpty(mapArg, \"constant\" + param);");
test("Validate.notEmpty(stringArg, \"constant\" + param);");
test("Validate.notBlank(stringArg, \"constant\" + param);");
test("Validate.noNullElements(arrayArg, \"constant\" + param);");
test("Validate.noNullElements(iterableArg, \"constant\" + param);");
test("Validate.validIndex(arrayArg, 1, \"constant\" + param);");
test("Validate.validIndex(collectionArg, 1, \"constant\" + param);");
test("Validate.validIndex(stringArg, 1, \"constant\" + param);");
test("Validate.validState(bArg, \"constant\" + param);");
test("Validate.matchesPattern(stringArg, \"[A-Z]+\", \"constant\" + param);");
test("Validate.notNaN(dArg, \"constant\" + param);");
test("Validate.finite(dArg, \"constant\" + param);");
test("Validate.inclusiveBetween(BigDecimal.ZERO, BigDecimal.TEN, BigDecimal.ONE, "
+ "\"constant\" + param);");
test("Validate.inclusiveBetween(0L, 100L, 50L, \"constant\" + param);");
test("Validate.inclusiveBetween(0.0, 1.0, 0.5, \"constant\" + param);");
test("Validate.exclusiveBetween(BigDecimal.ZERO, BigDecimal.TEN, BigDecimal.ONE, "
+ "\"constant\" + param);");
test("Validate.exclusiveBetween(0L, 100L, 50L, \"constant\" + param);");
test("Validate.exclusiveBetween(0.0, 1.0, 0.5, \"constant\" + param);");
test("Validate.isInstanceOf(BigDecimal.class, BigDecimal.ONE, \"constant\" + param);");
test("Validate.isAssignableFrom(Object.class, BigDecimal.class, "
+ "\"constant\" + param);");
}
@Test
public void negative() throws Exception {
compilationHelper
.addSourceLines(
"Test.java",
"import org.apache.commons.lang3.Validate;",
"import java.math.BigDecimal;",
"import java.util.Collection;",
"import java.util.Iterator;",
"import java.util.Map;",
"class Test {",
" private static final String compileTimeConstant = \"constant\";",
" void f(boolean bArg, int iArg, Object oArg, Integer[] arrayArg, "
+ "Collection<String> collectionArg, Map<String, String> mapArg, String stringArg, "
+ "Iterable<String> iterableArg, double dArg) {",
" Validate.isTrue(bArg, \"message %d\", 123L);",
" Validate.isTrue(bArg, \"message %f\", 0.0);",
" Validate.isTrue(bArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notNull(oArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notEmpty(arrayArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notEmpty(collectionArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notEmpty(mapArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notEmpty(stringArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.notBlank(stringArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.noNullElements(arrayArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.noNullElements(iterableArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.validIndex(arrayArg, 1, \"message %s %s\", \"msg\", \"msg\");",
" Validate.validIndex(collectionArg, 1, \"message %s %s\", \"msg\", \"msg\");",
" Validate.validIndex(stringArg, 1, \"message %s %s\", \"msg\", \"msg\");",
" Validate.validState(bArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.matchesPattern(stringArg, \"[A-Z]+\", \"message %s %s\", \"msg\", \"msg\");",
" Validate.notNaN(dArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.finite(dArg, \"message %s %s\", \"msg\", \"msg\");",
" Validate.inclusiveBetween(BigDecimal.ZERO, BigDecimal.TEN, BigDecimal.ONE,"
+ " \"message %s %s\", \"msg\", \"msg\");",
" Validate.inclusiveBetween(0L, 100L, 50L, \"message\");",
" Validate.inclusiveBetween(0.0, 1.0, 0.5, \"message\");",
" Validate.exclusiveBetween(BigDecimal.ZERO, BigDecimal.TEN, BigDecimal.ONE,"
+ " \"message %s %s\", \"msg\", \"msg\");",
" Validate.exclusiveBetween(0L, 100L, 50L, \"message\");",
" Validate.exclusiveBetween(0.0, 1.0, 0.5, \"message\");",
" Validate.isInstanceOf(BigDecimal.class, BigDecimal.ONE,"
+ " \"message %s %s\", \"msg\", \"msg\");",
" Validate.isAssignableFrom(Object.class, BigDecimal.class,"
+ " \"message %s %s\", \"msg\", \"msg\");",
" }",
"}")
.doTest();
}
}
| 1 | 6,152 | these are not really constants, right? | palantir-gradle-baseline | java |
@@ -1,3 +1,4 @@
+from boto.compat import six
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
# | 1 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Batch(object):
"""
Used to construct a BatchGet request.
:ivar table: The Table object from which the item is retrieved.
:ivar keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:ivar attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:ivar consistent_read: Specify whether or not to use a
consistent read. Defaults to False.
"""
def __init__(self, table, keys, attributes_to_get=None,
consistent_read=False):
self.table = table
self.keys = keys
self.attributes_to_get = attributes_to_get
self.consistent_read = consistent_read
def to_dict(self):
"""
Convert the Batch object into the format required for Layer1.
"""
batch_dict = {}
key_list = []
for key in self.keys:
if isinstance(key, tuple):
hash_key, range_key = key
else:
hash_key = key
range_key = None
k = self.table.layer2.build_key_from_values(self.table.schema,
hash_key, range_key)
key_list.append(k)
batch_dict['Keys'] = key_list
if self.attributes_to_get:
batch_dict['AttributesToGet'] = self.attributes_to_get
if self.consistent_read:
batch_dict['ConsistentRead'] = True
else:
batch_dict['ConsistentRead'] = False
return batch_dict
class BatchWrite(object):
"""
Used to construct a BatchWrite request. Each BatchWrite object
represents a collection of PutItem and DeleteItem requests for
a single Table.
:ivar table: The Table object from which the item is retrieved.
:ivar puts: A list of :class:`boto.dynamodb.item.Item` objects
that you want to write to DynamoDB.
:ivar deletes: A list of scalar or tuple values. Each element in the
list represents one Item to delete. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema.
"""
def __init__(self, table, puts=None, deletes=None):
self.table = table
self.puts = puts or []
self.deletes = deletes or []
def to_dict(self):
"""
Convert the Batch object into the format required for Layer1.
"""
op_list = []
for item in self.puts:
d = {'Item': self.table.layer2.dynamize_item(item)}
d = {'PutRequest': d}
op_list.append(d)
for key in self.deletes:
if isinstance(key, tuple):
hash_key, range_key = key
else:
hash_key = key
range_key = None
k = self.table.layer2.build_key_from_values(self.table.schema,
hash_key, range_key)
d = {'Key': k}
op_list.append({'DeleteRequest': d})
return (self.table.name, op_list)
class BatchList(list):
"""
A subclass of a list object that contains a collection of
:class:`boto.dynamodb.batch.Batch` objects.
"""
def __init__(self, layer2):
list.__init__(self)
self.unprocessed = None
self.layer2 = layer2
def add_batch(self, table, keys, attributes_to_get=None,
consistent_read=False):
"""
Add a Batch to this BatchList.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object in which the items are contained.
:type keys: list
:param keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
"""
self.append(Batch(table, keys, attributes_to_get, consistent_read))
def resubmit(self):
"""
Resubmit the batch to get the next result set. The request object is
rebuild from scratch meaning that all batch added between ``submit``
and ``resubmit`` will be lost.
Note: This method is experimental and subject to changes in future releases
"""
del self[:]
if not self.unprocessed:
return None
for table_name, table_req in self.unprocessed.iteritems():
table_keys = table_req['Keys']
table = self.layer2.get_table(table_name)
keys = []
for key in table_keys:
h = key['HashKeyElement']
r = None
if 'RangeKeyElement' in key:
r = key['RangeKeyElement']
keys.append((h, r))
attributes_to_get = None
if 'AttributesToGet' in table_req:
attributes_to_get = table_req['AttributesToGet']
self.add_batch(table, keys, attributes_to_get=attributes_to_get)
return self.submit()
def submit(self):
res = self.layer2.batch_get_item(self)
if 'UnprocessedKeys' in res:
self.unprocessed = res['UnprocessedKeys']
return res
def to_dict(self):
"""
Convert a BatchList object into format required for Layer1.
"""
d = {}
for batch in self:
b = batch.to_dict()
if b['Keys']:
d[batch.table.name] = b
return d
class BatchWriteList(list):
"""
A subclass of a list object that contains a collection of
:class:`boto.dynamodb.batch.BatchWrite` objects.
"""
def __init__(self, layer2):
list.__init__(self)
self.layer2 = layer2
def add_batch(self, table, puts=None, deletes=None):
"""
Add a BatchWrite to this BatchWriteList.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object in which the items are contained.
:type puts: list of :class:`boto.dynamodb.item.Item` objects
:param puts: A list of items that you want to write to DynamoDB.
:type deletes: A list
:param deletes: A list of scalar or tuple values. Each element
in the list represents one Item to delete. If the schema
for the table has both a HashKey and a RangeKey, each
element in the list should be a tuple consisting of
(hash_key, range_key). If the schema for the table
contains only a HashKey, each element in the list should
be a scalar value of the appropriate type for the table
schema.
"""
self.append(BatchWrite(table, puts, deletes))
def submit(self):
return self.layer2.batch_write_item(self)
def to_dict(self):
"""
Convert a BatchWriteList object into format required for Layer1.
"""
d = {}
for batch in self:
table_name, batch_dict = batch.to_dict()
d[table_name] = batch_dict
return d
| 1 | 10,151 | import below copyright statement | boto-boto | py |
@@ -49,7 +49,7 @@ describe InvoiceNotifier do
def customer_should_receive_receipt_email(invoice)
email = ActionMailer::Base.deliveries.first
- email.subject.should include('receipt')
- email.to.should eq [invoice.user_email]
+ expect(email.subject).to include('receipt')
+ expect(email.to).to eq [invoice.user_email]
end
end | 1 | require 'spec_helper'
describe InvoiceNotifier do
describe '#send_receipt' do
context 'invoice has a user' do
it 'sends a receipt to the person who was charged' do
ActionMailer::Base.deliveries.clear
payment_processor = InvoiceNotifier.new(stub_invoice)
payment_processor.send_receipt
customer_should_receive_receipt_email(stub_invoice)
end
end
context 'invoice has no user' do
it 'sends a notification to Airbrake for further debugging' do
Airbrake.stubs(:notify_or_ignore)
payment_processor =
InvoiceNotifier.new(stub_invoice_with_no_user)
payment_processor.send_receipt
expect(Airbrake).to have_received(:notify_or_ignore)
end
end
end
def stub_invoice
stub(
'invoice',
user: true,
user_email: '[email protected]',
amount_paid: '$500',
stripe_invoice_id: 'stripe_id'
)
end
def stub_invoice_with_no_user
stub(
'invoice',
user: nil,
user_email: nil,
amount_paid: '$500',
stripe_invoice_id: 'stripe_id',
stripe_invoice: nil
)
end
def customer_should_receive_receipt_email(invoice)
email = ActionMailer::Base.deliveries.first
email.subject.should include('receipt')
email.to.should eq [invoice.user_email]
end
end
| 1 | 9,672 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -200,11 +200,11 @@ class Field implements Translatable
}
/**
- * @return string|Markup
+ * @return string|array|Markup
*/
public function getTwigValue()
{
- $value = $this->__toString();
+ $value = $this->getFlattenedValue();
if ($this->getDefinition()->get('allow_html')) {
$value = new Markup($value, 'UTF-8'); | 1 | <?php
declare(strict_types=1);
namespace Bolt\Entity;
use Bolt\Content\FieldType;
use Doctrine\ORM\Mapping as ORM;
use Gedmo\Mapping\Annotation as Gedmo;
use Symfony\Component\Serializer\Annotation\Groups;
use Tightenco\Collect\Support\Collection as LaravelCollection;
use Twig\Markup;
/**
* @ORM\Entity(repositoryClass="Bolt\Repository\FieldRepository")
* @ORM\Table(
* uniqueConstraints={
* @ORM\UniqueConstraint(name="content_field", columns={"content_id", "name"}),
* })
* @ORM\InheritanceType("SINGLE_TABLE")
* @ORM\DiscriminatorColumn(name="type", type="string", length=191)
* @ORM\DiscriminatorMap({
* "generic" = "field",
* "block" = "Bolt\Entity\Field\BlockField",
* "checkbox" = "Bolt\Entity\Field\CheckboxField",
* "date" = "Bolt\Entity\Field\DateField",
* "embed" = "Bolt\Entity\Field\EmbedField",
* "file" = "Bolt\Entity\Field\FileField",
* "filelist" = "Bolt\Entity\Field\FilelistField",
* "float" = "Bolt\Entity\Field\FloatField",
* "geolocation" = "Bolt\Entity\Field\GeolocationField",
* "hidden" = "Bolt\Entity\Field\HiddenField",
* "html" = "Bolt\Entity\Field\HtmlField",
* "image" = "Bolt\Entity\Field\ImageField",
* "imagelist" = "Bolt\Entity\Field\ImagelistField",
* "integer" = "Bolt\Entity\Field\IntegerField",
* "markdown" = "Bolt\Entity\Field\MarkdownField",
* "number" = "Bolt\Entity\Field\NumberField",
* "repeater" = "Bolt\Entity\Field\RepeaterField",
* "select" = "Bolt\Entity\Field\SelectField",
* "slug" = "Bolt\Entity\Field\SlugField",
* "templateselect" = "Bolt\Entity\Field\TemplateselectField",
* "text" = "Bolt\Entity\Field\TextField",
* "textarea" = "Bolt\Entity\Field\TextareaField",
* "video" = "Bolt\Entity\Field\VideoField"
* })
*/
class Field implements Translatable
{
/**
* @ORM\Id()
* @ORM\GeneratedValue()
* @ORM\Column(type="integer")
* @Groups({"put"})
*/
private $id;
/**
* @ORM\Column(type="string", length=191)
* @Groups("put")
*/
public $name;
/**
* @ORM\Column(type="json")
* @Groups({"put"})
* @Gedmo\Translatable
*/
protected $value = [];
/**
* @ORM\Column(type="integer")
* @Groups({"put"})
*/
private $sortorder = 0;
/**
* @Gedmo\Locale
*
* @var string|null
*/
protected $locale;
/**
* @ORM\Column(type="integer", nullable=true)
* @Groups("public")
*/
private $version;
/**
* @ORM\ManyToOne(targetEntity="Bolt\Entity\Content", inversedBy="fields")
* @ORM\JoinColumn(nullable=false)
*/
private $content;
/**
* @ORM\ManyToOne(targetEntity="Bolt\Entity\Field")
* @Groups("public")
*/
private $parent;
/** @var ?FieldType */
private $fieldTypeDefinition;
public function __toString(): string
{
return implode(', ', $this->getValue());
}
public static function factory(LaravelCollection $definition, string $name = ''): self
{
$type = $definition['type'];
$classname = '\\Bolt\\Entity\\Field\\' . ucwords($type) . 'Field';
if (class_exists($classname)) {
$field = new $classname();
} else {
$field = new self();
}
if ($name !== '') {
$field->setName($name);
}
$field->setDefinition($type, $definition);
return $field;
}
public function getId(): ?int
{
return $this->id;
}
public function getDefinition(): FieldType
{
if ($this->fieldTypeDefinition === null && $this->getContent()) {
$this->setDefinitionFromContentDefinition();
}
return $this->fieldTypeDefinition;
}
private function setDefinitionFromContentDefinition(): void
{
$contentTypeDefinition = $this->getContent()->getDefinition();
$this->fieldTypeDefinition = FieldType::factory($this->getName(), $contentTypeDefinition);
}
public function setDefinition($name, LaravelCollection $definition): void
{
$this->fieldTypeDefinition = FieldType::mock($name, $definition);
}
public function getName(): string
{
return $this->name;
}
public function setName(string $name): self
{
$this->name = $name;
return $this;
}
public function getType(): ?string
{
return $this->getDefinition()->get('type');
}
public function get($key)
{
return isset($this->value[$key]) ? $this->value[$key] : null;
}
public function getValue(): ?array
{
return $this->value;
}
/**
* like getValue() but returns single value for single value fields
*
* @return array|mixed|null
*/
public function getFlattenedValue()
{
$value = $this->getValue();
if (is_iterable($value)) {
$count = count($value);
if ($count === 0) {
return null;
} elseif ($count === 1) {
return reset($value);
}
}
return $value;
}
/**
* @return string|Markup
*/
public function getTwigValue()
{
$value = $this->__toString();
if ($this->getDefinition()->get('allow_html')) {
$value = new Markup($value, 'UTF-8');
}
return $value;
}
public function setValue($value): self
{
$this->value = (array) $value;
return $this;
}
public function getSortorder(): ?int
{
return $this->sortorder;
}
public function setSortorder(int $sortorder): self
{
$this->sortorder = $sortorder;
return $this;
}
public function setLocale(string $locale): void
{
$this->locale = $locale;
}
public function getLocale(): ?string
{
return $this->locale;
}
public function getVersion(): ?int
{
return $this->version;
}
public function setVersion(?int $version): self
{
$this->version = $version;
return $this;
}
public function getContent(): ?Content
{
return $this->content;
}
public function setContent(?Content $content): self
{
$this->content = $content;
return $this;
}
public function getParent(): ?self
{
return $this->parent;
}
public function setParent(?self $parent): self
{
$this->parent = $parent;
return $this;
}
}
| 1 | 11,134 | `if (is_string($value) && $this->getDefinition()->get('allow_html')) {` | bolt-core | php |
@@ -133,7 +133,7 @@ namespace OpenTelemetry.Trace
private void AddInternal(string key, object value)
{
- Guard.Null(key, nameof(key));
+ Debug.Assert(key != null, $"{nameof(key)} must not be null");
this.Attributes[key] = value;
} | 1 | // <copyright file="SpanAttributes.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Collections.Generic;
using System.Diagnostics;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Trace
{
/// <summary>
/// A class that represents the span attributes. Read more here https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes.
/// </summary>
/// <remarks>SpanAttributes is a wrapper around <see cref="ActivityTagsCollection"/> class.</remarks>
public class SpanAttributes
{
/// <summary>
/// Initializes a new instance of the <see cref="SpanAttributes"/> class.
/// </summary>
public SpanAttributes()
{
this.Attributes = new ActivityTagsCollection();
}
/// <summary>
/// Initializes a new instance of the <see cref="SpanAttributes"/> class.
/// </summary>
/// <param name="attributes">Initial attributes to store in the collection.</param>
public SpanAttributes(IEnumerable<KeyValuePair<string, object>> attributes)
: this()
{
Guard.Null(attributes, nameof(attributes));
foreach (KeyValuePair<string, object> kvp in attributes)
{
this.AddInternal(kvp.Key, kvp.Value);
}
}
internal ActivityTagsCollection Attributes { get; }
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="value">Entry value.</param>
public void Add(string key, long value)
{
this.AddInternal(key, value);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="value">Entry value.</param>
public void Add(string key, string value)
{
this.AddInternal(key, value);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="value">Entry value.</param>
public void Add(string key, bool value)
{
this.AddInternal(key, value);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="value">Entry value.</param>
public void Add(string key, double value)
{
this.AddInternal(key, value);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="values">Entry value.</param>
public void Add(string key, long[] values)
{
this.AddInternal(key, values);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="values">Entry value.</param>
public void Add(string key, string[] values)
{
this.AddInternal(key, values);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="values">Entry value.</param>
public void Add(string key, bool[] values)
{
this.AddInternal(key, values);
}
/// <summary>
/// Add entry to the attributes.
/// </summary>
/// <param name="key">Entry key.</param>
/// <param name="values">Entry value.</param>
public void Add(string key, double[] values)
{
this.AddInternal(key, values);
}
private void AddInternal(string key, object value)
{
Guard.Null(key, nameof(key));
this.Attributes[key] = value;
}
}
}
| 1 | 22,254 | I think we have the same problem here - if folks called the `public void Add` with a `null` key, we need to use `Guard`. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -0,0 +1,12 @@
+//
+// Copyright (C) 2020 Greg Landrum
+// @@ All Rights Reserved @@
+// This file is part of the RDKit.
+// The contents are covered by the terms of the BSD license
+// which is included in the file license.txt, found at the root
+// of the RDKit source tree.
+//
+
+#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do
+ // this in one cpp file
+#include "catch.hpp" | 1 | 1 | 22,130 | I thought that the main was in catch_qt.cpp? | rdkit-rdkit | cpp |
|
@@ -87,7 +87,7 @@ public class ImagesManageActivity extends BaseActivity {
private static final int REQUEST_UNSELECT_IMAGE_AFTER_LOGIN = 4;
public static final int REQUEST_EDIT_IMAGE = 1000;
private static final int REQUEST_CHOOSE_IMAGE = 1001;
- private static final List<ProductImageField> TYPE_IMAGE = Arrays.asList(ProductImageField.FRONT, ProductImageField.INGREDIENTS, ProductImageField.NUTRITION);
+ private static final List<ProductImageField> TYPE_IMAGE = Arrays.asList(ProductImageField.FRONT, ProductImageField.INGREDIENTS, ProductImageField.NUTRITION, ProductImageField.PACKAGING);
private ActivityFullScreenImageBinding binding;
private OpenFoodAPIClient client;
private File lastViewedImage; | 1 | /*
* Copyright 2016-2020 Open Food Facts
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openfoodfacts.github.scrachx.openfood.features;
import android.app.Activity;
import android.content.Intent;
import android.content.SharedPreferences;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.view.ViewTreeObserver;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.core.app.ActivityCompat;
import androidx.core.content.ContextCompat;
import com.github.chrisbanes.photoview.PhotoViewAttacher;
import com.squareup.picasso.Callback;
import com.theartofdev.edmodo.cropper.CropImage;
import com.theartofdev.edmodo.cropper.CropImageActivity;
import org.apache.commons.lang.StringUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import io.reactivex.android.schedulers.AndroidSchedulers;
import io.reactivex.disposables.CompositeDisposable;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.databinding.ActivityFullScreenImageBinding;
import openfoodfacts.github.scrachx.openfood.features.adapters.LanguageDataAdapter;
import openfoodfacts.github.scrachx.openfood.features.shared.BaseActivity;
import openfoodfacts.github.scrachx.openfood.images.ImageKeyHelper;
import openfoodfacts.github.scrachx.openfood.images.ImageSize;
import openfoodfacts.github.scrachx.openfood.images.ImageTransformationUtils;
import openfoodfacts.github.scrachx.openfood.images.ProductImage;
import openfoodfacts.github.scrachx.openfood.models.Product;
import openfoodfacts.github.scrachx.openfood.models.ProductImageField;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIClient;
import openfoodfacts.github.scrachx.openfood.utils.FileDownloader;
import openfoodfacts.github.scrachx.openfood.utils.FileUtils;
import openfoodfacts.github.scrachx.openfood.utils.LocaleHelper;
import openfoodfacts.github.scrachx.openfood.utils.PhotoReceiverHandler;
import openfoodfacts.github.scrachx.openfood.utils.SwipeDetector;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import pl.aprilapps.easyphotopicker.EasyImage;
import smartdevelop.ir.eram.showcaseviewlib.GuideView;
import static android.Manifest.permission.CAMERA;
import static android.content.pm.PackageManager.PERMISSION_GRANTED;
import static openfoodfacts.github.scrachx.openfood.utils.Utils.MY_PERMISSIONS_REQUEST_CAMERA;
import static org.apache.commons.lang.StringUtils.isNotEmpty;
/**
* Activity to display/edit product images
*/
public class ImagesManageActivity extends BaseActivity {
private static final int RESULTCODE_MODIFIED = 1;
private static final int REQUEST_EDIT_IMAGE_AFTER_LOGIN = 1;
private static final int REQUEST_ADD_IMAGE_AFTER_LOGIN = 2;
private static final int REQUEST_CHOOSE_IMAGE_AFTER_LOGIN = 3;
private static final int REQUEST_UNSELECT_IMAGE_AFTER_LOGIN = 4;
public static final int REQUEST_EDIT_IMAGE = 1000;
private static final int REQUEST_CHOOSE_IMAGE = 1001;
private static final List<ProductImageField> TYPE_IMAGE = Arrays.asList(ProductImageField.FRONT, ProductImageField.INGREDIENTS, ProductImageField.NUTRITION);
private ActivityFullScreenImageBinding binding;
private OpenFoodAPIClient client;
private File lastViewedImage;
private PhotoViewAttacher attacher;
private SharedPreferences settings;
private CompositeDisposable disp;
public static boolean isImageModified(int requestCode, int resultCode) {
return requestCode == REQUEST_EDIT_IMAGE && resultCode == ImagesManageActivity.RESULTCODE_MODIFIED;
}
@Override
protected void onDestroy() {
super.onDestroy();
disp.dispose();
binding = null;
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
disp = new CompositeDisposable();
client = new OpenFoodAPIClient(this);
binding = ActivityFullScreenImageBinding.inflate(getLayoutInflater());
setContentView(binding.getRoot());
// Setup onclick listeners
binding.btnDone.setOnClickListener(v -> onExit());
binding.btnUnselectImage.setOnClickListener(v -> unSelectImage());
binding.btnChooseImage.setOnClickListener(v -> onChooseImage());
binding.btnAddImage.setOnClickListener(v -> onAddImage());
binding.btnChooseDefaultLanguage.setOnClickListener(v -> onSelectDefaultLanguage());
binding.btnEditImage.setOnClickListener(v -> onStartEditExistingImage());
binding.comboLanguages.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
onLanguageChanged();
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
// Do nothing
}
});
binding.comboImageType.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
onImageTypeChanged();
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
// Do nothing
}
});
settings = getSharedPreferences("prefs", 0);
if (settings.getBoolean(getString(R.string.check_first_time), true)) {
startShowCase(getString(R.string.title_image_type), getString(R.string.content_image_type), R.id.comboImageType, 1);
}
Intent intent = getIntent();
Product product = (Product) intent.getSerializableExtra(ImageKeyHelper.PRODUCT);
boolean canEdit = product != null;
binding.btnEditImage.setVisibility(canEdit ? View.VISIBLE : View.INVISIBLE);
binding.btnUnselectImage.setVisibility(binding.btnEditImage.getVisibility());
attacher = new PhotoViewAttacher(binding.imageViewFullScreen);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
//delaying the transition until the view has been laid out
postponeEnterTransition();
}
new SwipeDetector(binding.imageViewFullScreen, (view, swipeType) -> {
if (swipeType == SwipeDetector.SwipeTypeEnum.LEFT_TO_RIGHT) {
incrementImageType(-1);
} else if (swipeType == SwipeDetector.SwipeTypeEnum.RIGHT_TO_LEFT) {
incrementImageType(1);
} else if (swipeType == SwipeDetector.SwipeTypeEnum.TOP_TO_BOTTOM) {
onRefresh(true);
} else {
stopRefresh();
}
});
ArrayAdapter<String> adapter = new ArrayAdapter<>(this, R.layout.simple_spinner_item_white, generateImageTypeNames());
adapter.setDropDownViewResource(android.R.layout.simple_list_item_single_choice);
binding.comboImageType.setAdapter(adapter);
setSupportActionBar(binding.toolbar);
if (getSupportActionBar() != null) {
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
}
loadLanguage();
binding.comboImageType.setSelection(TYPE_IMAGE.indexOf(getSelectedType()));
updateProductImagesInfo(null);
onRefresh(false);
}
private void startShowCase(String title, String content, int viewId, final int type) {
new GuideView.Builder(this)
.setTitle(title)
.setContentText(content)
.setTargetView(findViewById(viewId))
.setContentTextSize(12)
.setTitleTextSize(16)
.setDismissType(GuideView.DismissType.outside)
.setGuideListener(view -> {
switch (type) {
case 1:
startShowCase(getString(R.string.title_choose_language), getString(R.string.content_choose_language), R.id.comboLanguages, 2);
break;
case 2:
startShowCase(getString(R.string.title_add_photo), getString(R.string.content_add_photo), R.id.btnAddImage, 3);
break;
case 3:
startShowCase(getString(R.string.title_choose_photo), getString(R.string.content_choose_photo), R.id.btnChooseImage, 4);
break;
case 4:
startShowCase(getString(R.string.title_edit_photo), getString(R.string.content_edit_photo), R.id.btnEditImage, 5);
break;
case 5:
startShowCase(getString(R.string.title_unselect_photo), getString(R.string.content_unselect_photo), R.id.btnUnselectImage, 6);
break;
case 6:
startShowCase(getString(R.string.title_exit), getString(R.string.content_exit), R.id.btn_done, 7);
break;
case 7:
SharedPreferences.Editor editor = settings.edit();
editor.putBoolean(getString(R.string.check_first_time), false);
editor.apply();
break;
}
})
.build()
.show();
}
@NonNull
private List<String> generateImageTypeNames() {
List<String> images = new ArrayList<>();
for (ProductImageField type : TYPE_IMAGE) {
images.add(getResources().getString(ImageKeyHelper.getResourceId(type)));
}
return images;
}
@Override
public boolean onSupportNavigateUp() {
finish();
return true;
}
private void incrementImageType(int inc) {
stopRefresh();
int newPosition = binding.comboImageType.getSelectedItemPosition() + inc;
final int count = binding.comboImageType.getAdapter().getCount();
if (newPosition < 0) {
newPosition = count - 1;
} else {
newPosition = newPosition % count;
}
binding.comboImageType.setSelection(newPosition, true);
}
private void loadLanguage() {
Product product = getProduct();
if (product == null) {
return;
}
//we load all available languages for product/type
String currentLanguage = getCurrentLanguage();
final ProductImageField productImageField = getSelectedType();
final Set<String> addedLanguages = new HashSet<>(product.getAvailableLanguageForImage(productImageField, ImageSize.DISPLAY));
final List<LocaleHelper.LanguageData> languageForImage = LocaleHelper.getLanguageData(addedLanguages, true);
int selectedIndex = LocaleHelper.find(languageForImage, currentLanguage);
if (selectedIndex < 0) {
addedLanguages.add(currentLanguage);
languageForImage.add(LocaleHelper.getLanguageData(currentLanguage, false));
}
String[] localeValues = getResources().getStringArray(R.array.languages_array);
List<String> otherNotSupportedCode = new ArrayList<>();
for (String local : localeValues) {
if (!addedLanguages.contains(local)) {
otherNotSupportedCode.add(local);
}
}
languageForImage.addAll(LocaleHelper.getLanguageData(otherNotSupportedCode, false));
LanguageDataAdapter adapter = new LanguageDataAdapter(this, R.layout.simple_spinner_item_white, languageForImage);
adapter.setDropDownViewResource(android.R.layout.simple_list_item_single_choice);
binding.comboLanguages.setAdapter(adapter);
selectedIndex = LocaleHelper.find(languageForImage, currentLanguage);
if (selectedIndex >= 0) {
binding.comboLanguages.setSelection(selectedIndex);
}
updateLanguageStatus();
updateSelectDefaultLanguageAction();
}
/**
* Use to warn the user that there is no image for the selected image.
*/
private boolean updateLanguageStatus() {
final ProductImageField serializableExtra = getSelectedType();
String imageUrl = getCurrentImageUrl();
String languageUsedByImage = ImageKeyHelper.getLanguageCodeFromUrl(serializableExtra, imageUrl);
String language = getCurrentLanguage();
//if the language of the displayed image is not the same that the language in this activity
//we use the language of the image
boolean languageSupported = language.equals(languageUsedByImage);
if (languageSupported) {
binding.textInfo.setText(null);
binding.textInfo.setTextColor(ContextCompat.getColor(this, R.color.white));
} else {
binding.textInfo.setText(R.string.image_not_defined_for_language);
binding.textInfo.setTextColor(ContextCompat.getColor(this, R.color.orange));
}
binding.btnEditImage.setVisibility(languageSupported ? View.VISIBLE : View.GONE);
binding.btnUnselectImage.setVisibility(binding.btnEditImage.getVisibility());
return languageSupported;
}
private String getCurrentLanguage() {
final String language = getIntent().getStringExtra(ImageKeyHelper.LANGUAGE);
if (language == null) {
return LocaleHelper.getLanguage(getBaseContext());
}
return language;
}
private void updateToolbarTitle(Product product) {
if (product != null) {
binding.toolbar.setTitle(String.format("%s / %s",
StringUtils.defaultString(product.getLocalProductName(this)),
binding.comboImageType.getSelectedItem().toString()));
}
}
@Override
protected void onResume() {
super.onResume();
updateToolbarTitle(getProduct());
}
private void onRefresh(boolean reloadProduct) {
String imageUrl = getCurrentImageUrl();
if (reloadProduct || imageUrl == null) {
reloadProduct();
} else {
loadImage(imageUrl);
}
}
private void loadImage(String imageUrl) {
if (isNotEmpty(imageUrl)) {
String url = imageUrl;
if (FileUtils.isAbsolute(url)) {
url = "file://" + url;
}
startRefresh(getString(R.string.txtLoading));
Utils.picassoBuilder(this)
.load(url)
.into(binding.imageViewFullScreen, new Callback() {
@Override
public void onSuccess() {
attacher.update();
scheduleStartPostponedTransition(binding.imageViewFullScreen);
binding.imageViewFullScreen.setVisibility(View.VISIBLE);
stopRefresh();
}
@Override
public void onError(Exception ex) {
binding.imageViewFullScreen.setVisibility(View.VISIBLE);
Toast.makeText(ImagesManageActivity.this, getResources().getString(R.string.txtConnectionError), Toast.LENGTH_LONG).show();
stopRefresh();
}
});
} else {
binding.imageViewFullScreen.setImageDrawable(null);
stopRefresh();
}
}
/**
* Reloads product images from the server. Updates images and the language.
*/
private void reloadProduct() {
if (isFinishing()) {
return;
}
Product product = getProduct();
if (product != null) {
startRefresh(getString(R.string.loading_product,
StringUtils.defaultString(product.getLocalProductName(this) + "...")));
client.getProductImages(product.getCode(), newState -> {
final Product newStateProduct = newState.getProduct();
boolean imageReloaded = false;
if (newStateProduct != null) {
updateToolbarTitle(newStateProduct);
String imageUrl = getCurrentImageUrl();
getIntent().putExtra(ImageKeyHelper.PRODUCT, newStateProduct);
final String newImageUrl = getImageUrlToDisplay(newStateProduct);
loadLanguage();
if (imageUrl == null || !imageUrl.equals(newImageUrl)) {
getIntent().putExtra(ImageKeyHelper.IMAGE_URL, newImageUrl);
loadImage(newImageUrl);
imageReloaded = true;
}
} else {
if (StringUtils.isNotBlank(newState.getStatusVerbose())) {
Toast.makeText(ImagesManageActivity.this, newState.getStatusVerbose(), Toast.LENGTH_LONG).show();
}
}
if (!imageReloaded) {
stopRefresh();
}
});
}
}
/**
* The additional field "images" is not loaded by default by OFF as it's only used to edit an image.
* So we load the product images in background.
* Could be improved by loading only the field "images".
*/
private void updateProductImagesInfo(Runnable toDoAfter) {
Product product = getProduct();
if (product != null) {
client.getProductImages(product.getCode(), newState -> {
final Product newStateProduct = newState.getProduct();
if (newStateProduct != null) {
getIntent().putExtra(ImageKeyHelper.PRODUCT, newStateProduct);
}
if (toDoAfter != null) {
toDoAfter.run();
}
});
}
}
private String getImageUrlToDisplay(@NonNull Product product) {
return product.getSelectedImage(getCurrentLanguage(), getSelectedType(),
ImageSize.DISPLAY);
}
private String getCurrentImageUrl() {
return getIntent().getStringExtra(ImageKeyHelper.IMAGE_URL);
}
/**
* @see #startRefresh(String)
*/
private void stopRefresh() {
binding.progressBar.setVisibility(View.GONE);
updateLanguageStatus();
}
private boolean isRefreshing() {
return binding.progressBar.getVisibility() == View.VISIBLE;
}
/**
* @param text
* @see #stopRefresh()
*/
private void startRefresh(@Nullable String text) {
binding.progressBar.setVisibility(View.VISIBLE);
if (text != null) {
binding.textInfo.setTextColor(ContextCompat.getColor(this, R.color.white));
binding.textInfo.setText(text);
}
}
void onSelectDefaultLanguage() {
String lang = LocaleHelper.getLocale(getProduct().getLang()).getLanguage();
LocaleHelper.getLanguageData(lang, true);
final int position = ((LanguageDataAdapter) binding.comboLanguages.getAdapter()).getPosition(lang);
if (position >= 0) {
binding.comboLanguages.setSelection(position, true);
}
}
void onExit() {
setResult(RESULT_OK);
finish();
}
private void unSelectImage() {
if (cannotEdit(REQUEST_UNSELECT_IMAGE_AFTER_LOGIN)) {
return;
}
startRefresh(getString(R.string.unselect_image));
client.unSelectImage(getProduct().getCode(), getSelectedType(), getCurrentLanguage(), (value, response) -> {
if (value) {
setResult(RESULTCODE_MODIFIED);
}
reloadProduct();
});
}
private void onChooseImage() {
if (cannotEdit(REQUEST_CHOOSE_IMAGE_AFTER_LOGIN)) {
return;
}
final Intent intent = new Intent(this, ImagesSelectActivity.class);
intent.putExtra(ImageKeyHelper.PRODUCT_BARCODE, getProduct().getCode());
intent.putExtra(ImagesSelectActivity.TOOLBAR_TITLE, binding.toolbar.getTitle());
startActivityForResult(intent, REQUEST_CHOOSE_IMAGE);
}
/**
* Check if user is able to edit or not.
*
* @param loginRequestCode request code to pass to {@link #startActivityForResult(Intent, int)}.
* @return true if user <strong>cannot edit</strong>, false otherwise.
*/
private boolean cannotEdit(int loginRequestCode) {
if (isRefreshing()) {
Toast.makeText(this, R.string.cant_modify_if_refreshing, Toast.LENGTH_SHORT).show();
return true;
}
//if user not logged in, we force to log
if (!isUserLoggedIn()) {
startActivityForResult(new Intent(this, LoginActivity.class), loginRequestCode);
return true;
}
return false;
}
private void onAddImage() {
if (cannotEdit(REQUEST_ADD_IMAGE_AFTER_LOGIN)) {
return;
}
if (ContextCompat.checkSelfPermission(this, CAMERA) != PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(this, new String[]{CAMERA}, MY_PERMISSIONS_REQUEST_CAMERA);
} else {
EasyImage.openCamera(this, 0);
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
if (requestCode == MY_PERMISSIONS_REQUEST_CAMERA && Utils.isAllGranted(grantResults)) {
onAddImage();
}
}
private void updateSelectDefaultLanguageAction() {
boolean isDefault = getProduct().getLang() != null && getCurrentLanguage().equals(LocaleHelper.getLocale(getProduct().getLang()).getLanguage());
binding.btnChooseDefaultLanguage.setVisibility(isDefault ? View.INVISIBLE : View.VISIBLE);
}
private void onStartEditExistingImage() {
if (cannotEdit(REQUEST_EDIT_IMAGE_AFTER_LOGIN)) {
return;
}
Product product = getProduct();
final ProductImageField productImageField = getSelectedType();
String language = getCurrentLanguage();
//the rotation/crop set on the server
ImageTransformationUtils transformation = ImageTransformationUtils.getScreenTransformation(product, productImageField, language);
//the first time, the images properties are not loaded...
if (transformation.isEmpty()) {
updateProductImagesInfo(() -> editPhoto(productImageField, ImageTransformationUtils.getScreenTransformation(product, productImageField, language)));
}
editPhoto(productImageField, transformation);
}
private void editPhoto(ProductImageField productImageField, @NonNull ImageTransformationUtils transformation) {
if (transformation.isNotEmpty()) {
disp.add(FileDownloader.download(this, transformation.getInitImageUrl())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(file -> {
//to delete the file after:
lastViewedImage = file;
cropRotateExistingImageOnServer(file, getString(ImageKeyHelper.getResourceIdForEditAction(productImageField)), transformation);
}));
}
}
private Product getProduct() {
return (Product) getIntent().getSerializableExtra(ImageKeyHelper.PRODUCT);
}
private void onLanguageChanged() {
LocaleHelper.LanguageData data = (LocaleHelper.LanguageData) binding.comboLanguages.getSelectedItem();
Product product = getProduct();
if (!data.getCode().equals(getCurrentLanguage())) {
getIntent().putExtra(ImageKeyHelper.LANGUAGE, data.getCode());
getIntent().putExtra(ImageKeyHelper.IMAGE_URL, getImageUrlToDisplay(product));
updateToolbarTitle(product);
onRefresh(false);
}
updateSelectDefaultLanguageAction();
}
private ProductImageField getSelectedType() {
return (ProductImageField) getIntent().getSerializableExtra(ImageKeyHelper.IMAGE_TYPE);
}
private void onImageTypeChanged() {
if (getProduct() == null) {
return;
}
ProductImageField newTypeSelected = TYPE_IMAGE.get(binding.comboImageType.getSelectedItemPosition());
final ProductImageField selectedType = getSelectedType();
if (newTypeSelected.equals(selectedType)) {
return;
}
getIntent().putExtra(ImageKeyHelper.IMAGE_TYPE, newTypeSelected);
getIntent().putExtra(ImageKeyHelper.IMAGE_URL, getImageUrlToDisplay(getProduct()));
onRefresh(false);
loadLanguage();
updateToolbarTitle(getProduct());
}
private void cropRotateExistingImageOnServer(File image, String title, ImageTransformationUtils transformation) {
Uri uri = Uri.fromFile(image);
final CropImage.ActivityBuilder activityBuilder = CropImage.activity(uri)
.setCropMenuCropButtonIcon(R.drawable.ic_check_white_24dp)
.setAllowFlipping(false)
//we just want crop size/rotation
.setNoOutputImage(true)
.setAllowRotation(true)
.setAllowCounterRotation(true)
.setAutoZoomEnabled(false)
.setInitialRotation(transformation.getRotationInDegree())
.setActivityTitle(title);
if (transformation.getCropRectangle() != null) {
activityBuilder.setInitialCropWindowRectangle(transformation.getCropRectangle());
} else {
activityBuilder.setInitialCropWindowPaddingRatio(0);
}
startActivityForResult(activityBuilder.getIntent(this, CropImageActivity.class), REQUEST_EDIT_IMAGE);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
// do nothing
switch (requestCode) {
case REQUEST_EDIT_IMAGE_AFTER_LOGIN:
if (resultCode == RESULT_OK) {
onStartEditExistingImage();
}
break;
case REQUEST_ADD_IMAGE_AFTER_LOGIN:
if (resultCode == RESULT_OK) {
onAddImage();
}
break;
case REQUEST_CHOOSE_IMAGE_AFTER_LOGIN:
if (resultCode == RESULT_OK) {
onChooseImage();
}
break;
case REQUEST_UNSELECT_IMAGE_AFTER_LOGIN:
if (resultCode == RESULT_OK) {
unSelectImage();
}
break;
case REQUEST_EDIT_IMAGE:
applyEditExistingImage(resultCode, data);
break;
case REQUEST_CHOOSE_IMAGE:
if (resultCode == RESULT_OK && data != null) {
File file = (File) data.getSerializableExtra(ImageKeyHelper.IMAGE_FILE);
String imgId = data.getStringExtra(ImageKeyHelper.IMG_ID);
//photo choosed from gallery
if (file != null) {
onPhotoReturned(file);
} else if (StringUtils.isNotBlank(imgId)) {
HashMap<String, String> imgMap = new HashMap<>();
imgMap.put(ImageKeyHelper.IMG_ID, imgId);
postEditImage(imgMap);
}
}
break;
default:
new PhotoReceiverHandler(this::onPhotoReturned).onActivityResult(this, requestCode, resultCode, data);
break;
}
}
/**
* @param resultCode should
* @param dataFromCropActivity from the crop activity. If not, action is ignored
*/
private void applyEditExistingImage(int resultCode, @Nullable Intent dataFromCropActivity) {
// Delete downloaded local file
deleteLocalFiles();
// if the selected language is not the same than current image we can't modify: only add
if (!isUserLoggedIn() || !updateLanguageStatus() || dataFromCropActivity == null) {
return;
}
if (resultCode == Activity.RESULT_OK) {
startRefresh(StringUtils.EMPTY);
CropImage.ActivityResult result = CropImage.getActivityResult(dataFromCropActivity);
final Product product = getProduct();
ImageTransformationUtils currentServerTransformation = ImageTransformationUtils.getInitialServerTransformation(product, getSelectedType(), getCurrentLanguage());
ImageTransformationUtils newServerTransformation = ImageTransformationUtils
.toServerTransformation(new ImageTransformationUtils(result.getRotation(), result.getCropRect()), product, getSelectedType(), getCurrentLanguage());
boolean isModified = !currentServerTransformation.equals(newServerTransformation);
if (isModified) {
startRefresh(getString(R.string.toastSending));
HashMap<String, String> imgMap = new HashMap<>();
imgMap.put(ImageKeyHelper.IMG_ID, newServerTransformation.getInitImageId());
ImageTransformationUtils.addTransformToMap(newServerTransformation, imgMap);
postEditImage(imgMap);
} else {
stopRefresh();
}
}
}
private void postEditImage(@NonNull HashMap<String, String> imgMap) {
final String code = getProduct().getCode();
imgMap.put(ImageKeyHelper.PRODUCT_BARCODE, code);
imgMap.put(ImageKeyHelper.IMAGE_STRING_ID, ImageKeyHelper.getImageStringKey(getSelectedType(), getCurrentLanguage()));
binding.imageViewFullScreen.setVisibility(View.INVISIBLE);
client.editImage(code, imgMap, (value, response) -> {
if (value) {
setResult(RESULTCODE_MODIFIED);
}
reloadProduct();
});
}
private void deleteLocalFiles() {
if (lastViewedImage != null) {
boolean deleted = lastViewedImage.delete();
if (!deleted) {
Log.w(ImagesManageActivity.class.getSimpleName(),
String.format("Cannot delete file %s.", lastViewedImage.getAbsolutePath()));
} else {
lastViewedImage = null;
}
}
}
/**
* For scheduling a postponed transition after the proper measures of the view are done
* and the view has been properly laid out in the View hierarchy
*/
private void scheduleStartPostponedTransition(@NonNull final View sharedElement) {
sharedElement.getViewTreeObserver().addOnPreDrawListener(
new ViewTreeObserver.OnPreDrawListener() {
@Override
public boolean onPreDraw() {
sharedElement.getViewTreeObserver().removeOnPreDrawListener(this);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
startPostponedEnterTransition();
}
return true;
}
});
}
/**
* @param newPhotoFile photo selected by the user to be sent to the server.
*/
public void onPhotoReturned(File newPhotoFile) {
startRefresh(getString(R.string.uploading_image));
ProductImage image = new ProductImage(getProduct().getCode(), getSelectedType(), newPhotoFile, getCurrentLanguage());
image.setFilePath(newPhotoFile.getAbsolutePath());
disp.add(client.postImg(image, true).observeOn(AndroidSchedulers.mainThread()).subscribe(() -> {
reloadProduct();
setResult(RESULTCODE_MODIFIED);
}, throwable -> {
Toast.makeText(ImagesManageActivity.this, throwable.getMessage(), Toast.LENGTH_LONG).show();
Log.e(ImagesManageActivity.class.getSimpleName(), throwable.getMessage(), throwable);
stopRefresh();
}));
}
}
| 1 | 68,386 | I would rename the field to IMAGE_TYPES and make it an array. I don't think we need list operations. Also, if possible, I would move the field to the ApiFields class. What do you think? | openfoodfacts-openfoodfacts-androidapp | java |
@@ -117,6 +117,17 @@ func verifyContainerRunningStateChangeWithRuntimeID(t *testing.T, taskEngine Tas
"Expected container runtimeID should not empty")
}
+func verifyExecAgentRunningStateChange(t *testing.T, taskEngine TaskEngine) {
+ stateChangeEvents := taskEngine.StateChangeEvents()
+ event := <-stateChangeEvents
+ containerEvent := event.(api.ContainerStateChange)
+ assert.NotEmpty(t, containerEvent.ManagedAgents, "Expected exec-agent event has no managed agents")
+ if containerEvent.ManagedAgents != nil {
+ assert.Equal(t, apicontainerstatus.ManagedAgentRunning, containerEvent.ManagedAgents[0].Status,
+ "expected managedAgent container state change event did not match actual event")
+ }
+}
+
func verifyContainerStoppedStateChange(t *testing.T, taskEngine TaskEngine) {
stateChangeEvents := taskEngine.StateChangeEvents()
event := <-stateChangeEvents | 1 | // +build sudo integration
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/data"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/engine/execcmd"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/statechange"
log "github.com/cihub/seelog"
"github.com/stretchr/testify/assert"
)
var (
sdkClientFactory sdkclientfactory.Factory
)
func init() {
sdkClientFactory = sdkclientfactory.NewFactory(context.TODO(), dockerEndpoint)
}
func defaultTestConfigIntegTest() *config.Config {
cfg, _ := config.NewConfig(ec2.NewBlackholeEC2MetadataClient())
cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled
cfg.ImagePullBehavior = config.ImagePullPreferCachedBehavior
return cfg
}
func createTestTask(arn string) *apitask.Task {
return &apitask.Task{
Arn: arn,
Family: "family",
Version: "1",
DesiredStatusUnsafe: apitaskstatus.TaskRunning,
Containers: []*apicontainer.Container{createTestContainer()},
}
}
func setupIntegTestLogs(t *testing.T) string {
// Create a directory for storing test logs.
testLogDir, err := ioutil.TempDir("", "ecs-integ-test")
require.NoError(t, err, "Unable to create directory for storing test logs")
logger, err := log.LoggerFromConfigAsString(loggerConfigIntegrationTest(testLogDir))
assert.NoError(t, err, "initialisation failed")
err = log.ReplaceLogger(logger)
assert.NoError(t, err, "unable to replace logger")
return testLogDir
}
func loggerConfigIntegrationTest(logfile string) string {
config := fmt.Sprintf(`
<seelog type="asyncloop" minlevel="debug">
<outputs formatid="main">
<console />
<rollingfile filename="%s/ecs-agent-log.log" type="date"
datepattern="2006-01-02-15" archivetype="none" maxrolls="24" />
</outputs>
<formats>
<format id="main" format="%%UTCDate(2006-01-02T15:04:05Z07:00) [%%LEVEL] %%Msg%%n" />
<format id="windows" format="%%Msg" />
</formats>
</seelog>`, logfile)
return config
}
func verifyContainerRunningStateChange(t *testing.T, taskEngine TaskEngine) {
stateChangeEvents := taskEngine.StateChangeEvents()
event := <-stateChangeEvents
assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning,
"Expected container to be RUNNING")
}
func verifyContainerRunningStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) {
stateChangeEvents := taskEngine.StateChangeEvents()
event := <-stateChangeEvents
assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning,
"Expected container to be RUNNING")
assert.NotEqual(t, "", event.(api.ContainerStateChange).RuntimeID,
"Expected container runtimeID should not empty")
}
func verifyContainerStoppedStateChange(t *testing.T, taskEngine TaskEngine) {
stateChangeEvents := taskEngine.StateChangeEvents()
event := <-stateChangeEvents
assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped,
"Expected container to be STOPPED")
}
func verifyContainerStoppedStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) {
stateChangeEvents := taskEngine.StateChangeEvents()
event := <-stateChangeEvents
assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped,
"Expected container to be STOPPED")
assert.NotEqual(t, "", event.(api.ContainerStateChange).RuntimeID,
"Expected container runtimeID should not empty")
}
func setup(cfg *config.Config, state dockerstate.TaskEngineState, t *testing.T) (TaskEngine, func(), credentials.Manager) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
skipIntegTestIfApplicable(t)
sdkClientFactory := sdkclientfactory.NewFactory(ctx, dockerEndpoint)
dockerClient, err := dockerapi.NewDockerGoClient(sdkClientFactory, cfg, context.Background())
if err != nil {
t.Fatalf("Error creating Docker client: %v", err)
}
credentialsManager := credentials.NewManager()
if state == nil {
state = dockerstate.NewTaskEngineState()
}
imageManager := NewImageManager(cfg, dockerClient, state)
imageManager.SetDataClient(data.NewNoopClient())
metadataManager := containermetadata.NewManager(dockerClient, cfg)
taskEngine := NewDockerTaskEngine(cfg, dockerClient, credentialsManager,
eventstream.NewEventStream("ENGINEINTEGTEST", context.Background()), imageManager, state, metadataManager,
nil, execcmd.NewManager())
taskEngine.MustInit(context.TODO())
return taskEngine, func() {
taskEngine.Shutdown()
}, credentialsManager
}
func skipIntegTestIfApplicable(t *testing.T) {
if os.Getenv("ECS_SKIP_ENGINE_INTEG_TEST") != "" {
t.Skip("ECS_SKIP_ENGINE_INTEG_TEST")
}
if !isDockerRunning() {
t.Skip("Docker not running")
}
}
func createTestContainerWithImageAndName(image string, name string) *apicontainer.Container {
return &apicontainer.Container{
Name: name,
Image: image,
Command: []string{},
Essential: true,
DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
CPU: 1024,
Memory: 128,
}
}
func waitForTaskCleanup(t *testing.T, taskEngine TaskEngine, taskArn string, seconds int) {
for i := 0; i < seconds; i++ {
_, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn)
if !ok {
return
}
time.Sleep(1 * time.Second)
}
t.Fatalf("timed out waiting for task to be clean up, task: %s", taskArn)
}
// A map that stores statusChangeEvents for both Tasks and Containers
// Organized first by EventType (Task or Container),
// then by StatusType (i.e. RUNNING, STOPPED, etc)
// then by Task/Container identifying string (TaskARN or ContainerName)
// EventType
// / \
// TaskEvent ContainerEvent
// / \ / \
// RUNNING STOPPED RUNNING STOPPED
// / \ / \ | |
// ARN1 ARN2 ARN3 ARN4 ARN:Cont1 ARN:Cont2
type EventSet map[statechange.EventType]statusToName
// Type definition for mapping a Status to a TaskARN/ContainerName
type statusToName map[string]nameSet
// Type definition for a generic set implemented as a map
type nameSet map[string]bool
// Holds the Events Map described above with a RW mutex
type TestEvents struct {
RecordedEvents EventSet
StateChangeEvents <-chan statechange.Event
}
// Initializes the TestEvents using the TaskEngine. Abstracts the overhead required to set up
// collecting TaskEngine stateChangeEvents.
// We must use the Golang assert library and NOT the require library to ensure the Go routine is
// stopped at the end of our tests
func InitEventCollection(taskEngine TaskEngine) *TestEvents {
stateChangeEvents := taskEngine.StateChangeEvents()
recordedEvents := make(EventSet)
testEvents := &TestEvents{
RecordedEvents: recordedEvents,
StateChangeEvents: stateChangeEvents,
}
return testEvents
}
// This method queries the TestEvents struct to check a Task Status.
// This method will block if there are no more stateChangeEvents from the DockerTaskEngine but is expected
func VerifyTaskStatus(status apitaskstatus.TaskStatus, taskARN string, testEvents *TestEvents, t *testing.T) error {
for {
if _, found := testEvents.RecordedEvents[statechange.TaskEvent][status.String()][taskARN]; found {
return nil
}
event := <-testEvents.StateChangeEvents
RecordEvent(testEvents, event)
}
}
// This method queries the TestEvents struct to check a Task Status.
// This method will block if there are no more stateChangeEvents from the DockerTaskEngine but is expected
func VerifyContainerStatus(status apicontainerstatus.ContainerStatus, ARNcontName string, testEvents *TestEvents, t *testing.T) error {
for {
if _, found := testEvents.RecordedEvents[statechange.ContainerEvent][status.String()][ARNcontName]; found {
return nil
}
event := <-testEvents.StateChangeEvents
RecordEvent(testEvents, event)
}
}
// Will record the event that was just collected into the TestEvents struct's RecordedEvents map
func RecordEvent(testEvents *TestEvents, event statechange.Event) {
switch event.GetEventType() {
case statechange.TaskEvent:
taskEvent := event.(api.TaskStateChange)
if _, exists := testEvents.RecordedEvents[statechange.TaskEvent]; !exists {
testEvents.RecordedEvents[statechange.TaskEvent] = make(statusToName)
}
if _, exists := testEvents.RecordedEvents[statechange.TaskEvent][taskEvent.Status.String()]; !exists {
testEvents.RecordedEvents[statechange.TaskEvent][taskEvent.Status.String()] = make(map[string]bool)
}
testEvents.RecordedEvents[statechange.TaskEvent][taskEvent.Status.String()][taskEvent.TaskARN] = true
case statechange.ContainerEvent:
containerEvent := event.(api.ContainerStateChange)
if _, exists := testEvents.RecordedEvents[statechange.ContainerEvent]; !exists {
testEvents.RecordedEvents[statechange.ContainerEvent] = make(statusToName)
}
if _, exists := testEvents.RecordedEvents[statechange.ContainerEvent][containerEvent.Status.String()]; !exists {
testEvents.RecordedEvents[statechange.ContainerEvent][containerEvent.Status.String()] = make(map[string]bool)
}
testEvents.RecordedEvents[statechange.ContainerEvent][containerEvent.Status.String()][containerEvent.TaskArn+":"+containerEvent.ContainerName] = true
}
}
| 1 | 25,471 | I'd prefer to timeout on this, but I know it's not a pattern that is being followed | aws-amazon-ecs-agent | go |
@@ -37,10 +37,7 @@ import org.apache.lucene.store.BufferedChecksumIndexInput;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.StringHelper;
+import org.apache.lucene.util.*;
/**
* Reads vector values from a simple text format. All vectors are read up front and cached in RAM in | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.codecs.simpletext;
import static org.apache.lucene.codecs.simpletext.SimpleTextVectorWriter.*;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.codecs.VectorReader;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.RandomAccessVectorValues;
import org.apache.lucene.index.RandomAccessVectorValuesProducer;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.VectorValues;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.BufferedChecksumIndexInput;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
/**
* Reads vector values from a simple text format. All vectors are read up front and cached in RAM in
* order to support random access. <b>FOR RECREATIONAL USE ONLY</b>
*
* @lucene.experimental
*/
public class SimpleTextVectorReader extends VectorReader {
private static final BytesRef EMPTY = new BytesRef("");
private final SegmentReadState readState;
private final IndexInput dataIn;
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final Map<String, FieldEntry> fieldEntries = new HashMap<>();
SimpleTextVectorReader(SegmentReadState readState) throws IOException {
this.readState = readState;
String metaFileName =
IndexFileNames.segmentFileName(
readState.segmentInfo.name,
readState.segmentSuffix,
SimpleTextVectorFormat.META_EXTENSION);
String vectorFileName =
IndexFileNames.segmentFileName(
readState.segmentInfo.name,
readState.segmentSuffix,
SimpleTextVectorFormat.VECTOR_EXTENSION);
boolean success = false;
try (ChecksumIndexInput in =
readState.directory.openChecksumInput(metaFileName, IOContext.DEFAULT)) {
int fieldNumber = readInt(in, FIELD_NUMBER);
while (fieldNumber != -1) {
String fieldName = readString(in, FIELD_NAME);
String scoreFunctionName = readString(in, SCORE_FUNCTION);
VectorValues.SearchStrategy searchStrategy =
VectorValues.SearchStrategy.valueOf(scoreFunctionName);
long vectorDataOffset = readLong(in, VECTOR_DATA_OFFSET);
long vectorDataLength = readLong(in, VECTOR_DATA_LENGTH);
int dimension = readInt(in, VECTOR_DIMENSION);
int size = readInt(in, SIZE);
int[] docIds = new int[size];
for (int i = 0; i < size; i++) {
docIds[i] = readInt(in, EMPTY);
}
assert fieldEntries.containsKey(fieldName) == false;
fieldEntries.put(
fieldName,
new FieldEntry(dimension, searchStrategy, vectorDataOffset, vectorDataLength, docIds));
fieldNumber = readInt(in, FIELD_NUMBER);
}
SimpleTextUtil.checkFooter(in);
dataIn = readState.directory.openInput(vectorFileName, IOContext.DEFAULT);
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(this);
}
}
}
@Override
public VectorValues getVectorValues(String field) throws IOException {
FieldInfo info = readState.fieldInfos.fieldInfo(field);
if (info == null) {
// mirror the handling in Lucene90VectorReader#getVectorValues
// needed to pass TestSimpleTextVectorFormat#testDeleteAllVectorDocs
return null;
}
int dimension = info.getVectorDimension();
if (dimension == 0) {
return VectorValues.EMPTY;
}
FieldEntry fieldEntry = fieldEntries.get(field);
if (fieldEntry == null) {
// mirror the handling in Lucene90VectorReader#getVectorValues
// needed to pass TestSimpleTextVectorFormat#testDeleteAllVectorDocs
return null;
}
if (dimension != fieldEntry.dimension) {
throw new IllegalStateException(
"Inconsistent vector dimension for field=\""
+ field
+ "\"; "
+ dimension
+ " != "
+ fieldEntry.dimension);
}
IndexInput bytesSlice =
dataIn.slice("vector-data", fieldEntry.vectorDataOffset, fieldEntry.vectorDataLength);
return new SimpleTextVectorValues(fieldEntry, bytesSlice);
}
@Override
public void checkIntegrity() throws IOException {
IndexInput clone = dataIn.clone();
clone.seek(0);
// checksum is fixed-width encoded with 20 bytes, plus 1 byte for newline (the space is included
// in SimpleTextUtil.CHECKSUM):
long footerStartPos = dataIn.length() - (SimpleTextUtil.CHECKSUM.length + 21);
ChecksumIndexInput input = new BufferedChecksumIndexInput(clone);
// when there's no actual vector data written (e.g. tested in
// TestSimpleTextVectorFormat#testDeleteAllVectorDocs)
// the first line in dataInput will be, checksum 00000000000000000000
if (footerStartPos == 0) {
SimpleTextUtil.checkFooter(input);
return;
}
while (true) {
SimpleTextUtil.readLine(input, scratch);
if (input.getFilePointer() >= footerStartPos) {
// Make sure we landed at precisely the right location:
if (input.getFilePointer() != footerStartPos) {
throw new CorruptIndexException(
"SimpleText failure: footer does not start at expected position current="
+ input.getFilePointer()
+ " vs expected="
+ footerStartPos,
input);
}
SimpleTextUtil.checkFooter(input);
break;
}
}
}
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public void close() throws IOException {
dataIn.close();
}
private static class FieldEntry {
final int dimension;
final VectorValues.SearchStrategy searchStrategy;
final long vectorDataOffset;
final long vectorDataLength;
final int[] ordToDoc;
FieldEntry(
int dimension,
VectorValues.SearchStrategy searchStrategy,
long vectorDataOffset,
long vectorDataLength,
int[] ordToDoc) {
this.dimension = dimension;
this.searchStrategy = searchStrategy;
this.vectorDataOffset = vectorDataOffset;
this.vectorDataLength = vectorDataLength;
this.ordToDoc = ordToDoc;
}
int size() {
return ordToDoc.length;
}
}
private static class SimpleTextVectorValues extends VectorValues
implements RandomAccessVectorValues, RandomAccessVectorValuesProducer {
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final FieldEntry entry;
private final IndexInput in;
private final BytesRef binaryValue;
private final float[][] values;
int curOrd;
SimpleTextVectorValues(FieldEntry entry, IndexInput in) throws IOException {
this.entry = entry;
this.in = in;
values = new float[entry.size()][entry.dimension];
binaryValue = new BytesRef(entry.dimension * Float.BYTES);
binaryValue.length = binaryValue.bytes.length;
curOrd = -1;
readAllVectors();
}
@Override
public int dimension() {
return entry.dimension;
}
@Override
public int size() {
return entry.size();
}
@Override
public SearchStrategy searchStrategy() {
return entry.searchStrategy;
}
@Override
public float[] vectorValue() {
return values[curOrd];
}
@Override
public BytesRef binaryValue() {
ByteBuffer.wrap(binaryValue.bytes).asFloatBuffer().get(values[curOrd]);
return binaryValue;
}
@Override
public RandomAccessVectorValues randomAccess() {
return this;
}
@Override
public int docID() {
if (curOrd == -1) {
return -1;
} else if (curOrd >= entry.size()) {
// when call to advance / nextDoc below already returns NO_MORE_DOCS, calling docID
// immediately afterward should also return NO_MORE_DOCS
// this is needed for TestSimpleTextVectorFormat.testAdvance test case
return NO_MORE_DOCS;
}
return entry.ordToDoc[curOrd];
}
@Override
public int nextDoc() throws IOException {
if (++curOrd < entry.size()) {
return docID();
}
return NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
return slowAdvance(target);
}
@Override
public long cost() {
return size();
}
private void readAllVectors() throws IOException {
for (float[] value : values) {
readVector(value);
}
}
private void readVector(float[] value) throws IOException {
SimpleTextUtil.readLine(in, scratch);
// skip leading "[" and strip trailing "]"
String s = new BytesRef(scratch.bytes(), 1, scratch.length() - 2).utf8ToString();
String[] floatStrings = s.split(",");
assert floatStrings.length == value.length
: " read " + s + " when expecting " + value.length + " floats";
for (int i = 0; i < floatStrings.length; i++) {
value[i] = Float.parseFloat(floatStrings[i]);
}
}
@Override
public float[] vectorValue(int targetOrd) throws IOException {
return values[targetOrd];
}
@Override
public BytesRef binaryValue(int targetOrd) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public TopDocs search(float[] target, int k, int fanout) throws IOException {
throw new UnsupportedOperationException();
}
}
private int readInt(IndexInput in, BytesRef field) throws IOException {
SimpleTextUtil.readLine(in, scratch);
return parseInt(field);
}
private long readLong(IndexInput in, BytesRef field) throws IOException {
SimpleTextUtil.readLine(in, scratch);
return parseLong(field);
}
private String readString(IndexInput in, BytesRef field) throws IOException {
SimpleTextUtil.readLine(in, scratch);
return stripPrefix(field);
}
private boolean startsWith(BytesRef prefix) {
return StringHelper.startsWith(scratch.get(), prefix);
}
private int parseInt(BytesRef prefix) {
assert startsWith(prefix);
return Integer.parseInt(stripPrefix(prefix));
}
private long parseLong(BytesRef prefix) {
assert startsWith(prefix);
return Long.parseLong(stripPrefix(prefix));
}
private String stripPrefix(BytesRef prefix) {
int prefixLen = prefix.length;
return new String(
scratch.bytes(), prefixLen, scratch.length() - prefixLen, StandardCharsets.UTF_8);
}
}
| 1 | 40,671 | hmm let's not use * imports please | apache-lucene-solr | java |
@@ -497,6 +497,10 @@ func generateAlertmanagerConfig(version semver.Version, am v1.AlertmanagerEndpoi
cfg = append(cfg, k8sSDWithNamespaces([]string{am.Namespace}))
}
+ if am.BearerTokenFile != "" {
+ cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: am.BearerTokenFile})
+ }
+
var relabelings []yaml.MapSlice
relabelings = append(relabelings, yaml.MapSlice{ | 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"regexp"
"sort"
"strings"
"github.com/blang/semver"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
)
var (
invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
)
func sanitizeLabelName(name string) string {
return invalidLabelCharRE.ReplaceAllString(name, "_")
}
func configMapRuleFileFolder(configMapNumber int) string {
return fmt.Sprintf("/etc/prometheus/rules/rules-%d/", configMapNumber)
}
func stringMapToMapSlice(m map[string]string) yaml.MapSlice {
res := yaml.MapSlice{}
ks := make([]string, 0)
for k, _ := range m {
ks = append(ks, k)
}
sort.Strings(ks)
for _, k := range ks {
res = append(res, yaml.MapItem{Key: k, Value: m[k]})
}
return res
}
func addTLStoYaml(cfg yaml.MapSlice, tls *v1.TLSConfig) yaml.MapSlice {
if tls != nil {
tlsConfig := yaml.MapSlice{
{Key: "insecure_skip_verify", Value: tls.InsecureSkipVerify},
}
if tls.CAFile != "" {
tlsConfig = append(tlsConfig, yaml.MapItem{Key: "ca_file", Value: tls.CAFile})
}
if tls.CertFile != "" {
tlsConfig = append(tlsConfig, yaml.MapItem{Key: "cert_file", Value: tls.CertFile})
}
if tls.KeyFile != "" {
tlsConfig = append(tlsConfig, yaml.MapItem{Key: "key_file", Value: tls.KeyFile})
}
if tls.ServerName != "" {
tlsConfig = append(tlsConfig, yaml.MapItem{Key: "server_name", Value: tls.ServerName})
}
cfg = append(cfg, yaml.MapItem{Key: "tls_config", Value: tlsConfig})
}
return cfg
}
func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleConfigMaps int, basicAuthSecrets map[string]BasicAuthCredentials) ([]byte, error) {
versionStr := p.Spec.Version
if versionStr == "" {
versionStr = DefaultVersion
}
version, err := semver.Parse(strings.TrimLeft(versionStr, "v"))
if err != nil {
return nil, errors.Wrap(err, "parse version")
}
cfg := yaml.MapSlice{}
scrapeInterval := "30s"
if p.Spec.ScrapeInterval != "" {
scrapeInterval = p.Spec.ScrapeInterval
}
evaluationInterval := "30s"
if p.Spec.EvaluationInterval != "" {
evaluationInterval = p.Spec.EvaluationInterval
}
cfg = append(cfg, yaml.MapItem{
Key: "global",
Value: yaml.MapSlice{
{Key: "evaluation_interval", Value: evaluationInterval},
{Key: "scrape_interval", Value: scrapeInterval},
{Key: "external_labels", Value: stringMapToMapSlice(p.Spec.ExternalLabels)},
},
})
if ruleConfigMaps > 0 {
configMaps := make([]string, ruleConfigMaps)
for i := 0; i < ruleConfigMaps; i++ {
configMaps[i] = configMapRuleFileFolder(i) + "*"
}
cfg = append(cfg, yaml.MapItem{
Key: "rule_files",
Value: configMaps,
})
}
identifiers := make([]string, len(mons))
i := 0
for k, _ := range mons {
identifiers[i] = k
i++
}
// Sorting ensures, that we always generate the config in the same order.
sort.Strings(identifiers)
var scrapeConfigs []yaml.MapSlice
for _, identifier := range identifiers {
for i, ep := range mons[identifier].Spec.Endpoints {
scrapeConfigs = append(scrapeConfigs, generateServiceMonitorConfig(version, mons[identifier], ep, i, basicAuthSecrets))
}
}
var alertmanagerConfigs []yaml.MapSlice
if p.Spec.Alerting != nil {
for _, am := range p.Spec.Alerting.Alertmanagers {
alertmanagerConfigs = append(alertmanagerConfigs, generateAlertmanagerConfig(version, am))
}
}
cfg = append(cfg, yaml.MapItem{
Key: "scrape_configs",
Value: scrapeConfigs,
})
cfg = append(cfg, yaml.MapItem{
Key: "alerting",
Value: yaml.MapSlice{
{
Key: "alertmanagers",
Value: alertmanagerConfigs,
},
},
})
if len(p.Spec.RemoteWrite) > 0 && version.Major >= 2 {
cfg = append(cfg, generateRemoteWriteConfig(version, p.Spec.RemoteWrite, basicAuthSecrets))
}
if len(p.Spec.RemoteRead) > 0 && version.Major >= 2 {
cfg = append(cfg, generateRemoteReadConfig(version, p.Spec.RemoteRead, basicAuthSecrets))
}
return yaml.Marshal(cfg)
}
func generateServiceMonitorConfig(version semver.Version, m *v1.ServiceMonitor, ep v1.Endpoint, i int, basicAuthSecrets map[string]BasicAuthCredentials) yaml.MapSlice {
cfg := yaml.MapSlice{
{
Key: "job_name",
Value: fmt.Sprintf("%s/%s/%d", m.Namespace, m.Name, i),
},
{
Key: "honor_labels",
Value: ep.HonorLabels,
},
}
switch version.Major {
case 1:
if version.Minor < 7 {
cfg = append(cfg, k8sSDAllNamespaces())
} else {
cfg = append(cfg, k8sSDFromServiceMonitor(m))
}
case 2:
cfg = append(cfg, k8sSDFromServiceMonitor(m))
}
if ep.Interval != "" {
cfg = append(cfg, yaml.MapItem{Key: "scrape_interval", Value: ep.Interval})
}
if ep.ScrapeTimeout != "" {
cfg = append(cfg, yaml.MapItem{Key: "scrape_timeout", Value: ep.ScrapeTimeout})
}
if ep.Path != "" {
cfg = append(cfg, yaml.MapItem{Key: "metrics_path", Value: ep.Path})
}
if ep.Params != nil {
cfg = append(cfg, yaml.MapItem{Key: "params", Value: ep.Params})
}
if ep.Scheme != "" {
cfg = append(cfg, yaml.MapItem{Key: "scheme", Value: ep.Scheme})
}
cfg = addTLStoYaml(cfg, ep.TLSConfig)
if ep.BearerTokenFile != "" {
cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: ep.BearerTokenFile})
}
if ep.BasicAuth != nil {
if s, ok := basicAuthSecrets[fmt.Sprintf("serviceMonitor/%s/%s/%d", m.Namespace, m.Name, i)]; ok {
cfg = append(cfg, yaml.MapItem{
Key: "basic_auth", Value: yaml.MapSlice{
{Key: "username", Value: s.username},
{Key: "password", Value: s.password},
},
})
}
}
var relabelings []yaml.MapSlice
// Filter targets by services selected by the monitor.
// Exact label matches.
labelKeys := make([]string, len(m.Spec.Selector.MatchLabels))
i = 0
for k, _ := range m.Spec.Selector.MatchLabels {
labelKeys[i] = k
i++
}
sort.Strings(labelKeys)
for i := range labelKeys {
k := labelKeys[i]
v := m.Spec.Selector.MatchLabels[k]
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(k)}},
{Key: "regex", Value: v},
})
}
// Set based label matching. We have to map the valid relations
// `In`, `NotIn`, `Exists`, and `DoesNotExist`, into relabeling rules.
for _, exp := range m.Spec.Selector.MatchExpressions {
switch exp.Operator {
case metav1.LabelSelectorOpIn:
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(exp.Key)}},
{Key: "regex", Value: strings.Join(exp.Values, "|")},
})
case metav1.LabelSelectorOpNotIn:
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "drop"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(exp.Key)}},
{Key: "regex", Value: strings.Join(exp.Values, "|")},
})
case metav1.LabelSelectorOpExists:
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(exp.Key)}},
{Key: "regex", Value: ".+"},
})
case metav1.LabelSelectorOpDoesNotExist:
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "drop"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(exp.Key)}},
{Key: "regex", Value: ".+"},
})
}
}
if version.Major == 1 && version.Minor < 7 {
// Filter targets based on the namespace selection configuration.
// By default we only discover services within the namespace of the
// ServiceMonitor.
// Selections allow extending this to all namespaces or to a subset
// of them specified by label or name matching.
//
// Label selections are not supported yet as they require either supported
// in the upstream SD integration or require out-of-band implementation
// in the operator with configuration reload.
//
// There's no explicit nil for the selector, we decide for the default
// case if it's all zero values.
nsel := m.Spec.NamespaceSelector
if !nsel.Any && len(nsel.MatchNames) == 0 {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}},
{Key: "regex", Value: m.Namespace},
})
} else if len(nsel.MatchNames) > 0 {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}},
{Key: "regex", Value: strings.Join(nsel.MatchNames, "|")},
})
}
}
// Filter targets based on correct port for the endpoint.
if ep.Port != "" {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_endpoint_port_name"}},
{Key: "regex", Value: ep.Port},
})
} else if ep.TargetPort.StrVal != "" {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_name"}},
{Key: "regex", Value: ep.TargetPort.String()},
})
} else if ep.TargetPort.IntVal != 0 {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_pod_container_port_number"}},
{Key: "regex", Value: ep.TargetPort.String()},
})
}
// Relabel namespace and pod and service labels into proper labels.
relabelings = append(relabelings, []yaml.MapSlice{
yaml.MapSlice{
{Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}},
{Key: "target_label", Value: "namespace"},
},
yaml.MapSlice{
{Key: "source_labels", Value: []string{"__meta_kubernetes_pod_name"}},
{Key: "target_label", Value: "pod"},
},
yaml.MapSlice{
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_name"}},
{Key: "target_label", Value: "service"},
},
}...)
// By default, generate a safe job name from the service name. We also keep
// this around if a jobLabel is set in case the targets don't actually have a
// value for it. A single service may potentially have multiple metrics
// endpoints, therefore the endpoints labels is filled with the ports name or
// as a fallback the port number.
relabelings = append(relabelings, yaml.MapSlice{
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_name"}},
{Key: "target_label", Value: "job"},
{Key: "replacement", Value: "${1}"},
})
if m.Spec.JobLabel != "" {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(m.Spec.JobLabel)}},
{Key: "target_label", Value: "job"},
{Key: "regex", Value: "(.+)"},
{Key: "replacement", Value: "${1}"},
})
}
if ep.Port != "" {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "target_label", Value: "endpoint"},
{Key: "replacement", Value: ep.Port},
})
} else if ep.TargetPort.String() != "" {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "target_label", Value: "endpoint"},
{Key: "replacement", Value: ep.TargetPort.String()},
})
}
cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: relabelings})
if ep.MetricRelabelConfigs != nil {
var metricRelabelings []yaml.MapSlice
for _, c := range ep.MetricRelabelConfigs {
relabeling := yaml.MapSlice{
{Key: "source_labels", Value: c.SourceLabels},
}
if c.Separator != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "separator", Value: c.Separator})
}
if c.TargetLabel != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "target_label", Value: c.TargetLabel})
}
if c.Regex != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "regex", Value: c.Regex})
}
if c.Modulus != uint64(0) {
relabeling = append(relabeling, yaml.MapItem{Key: "modulus", Value: c.Modulus})
}
if c.Replacement != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "replacement", Value: c.Replacement})
}
if c.Action != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "action", Value: c.Action})
}
metricRelabelings = append(metricRelabelings, relabeling)
}
cfg = append(cfg, yaml.MapItem{Key: "metric_relabel_configs", Value: metricRelabelings})
}
return cfg
}
func k8sSDFromServiceMonitor(m *v1.ServiceMonitor) yaml.MapItem {
nsel := m.Spec.NamespaceSelector
namespaces := []string{}
if !nsel.Any && len(nsel.MatchNames) == 0 {
namespaces = append(namespaces, m.Namespace)
}
if !nsel.Any && len(nsel.MatchNames) > 0 {
for i := range nsel.MatchNames {
namespaces = append(namespaces, nsel.MatchNames[i])
}
}
return k8sSDWithNamespaces(namespaces)
}
func k8sSDWithNamespaces(namespaces []string) yaml.MapItem {
return yaml.MapItem{
Key: "kubernetes_sd_configs",
Value: []yaml.MapSlice{
yaml.MapSlice{
{
Key: "role",
Value: "endpoints",
},
{
Key: "namespaces",
Value: yaml.MapSlice{
{
Key: "names",
Value: namespaces,
},
},
},
},
},
}
}
func k8sSDAllNamespaces() yaml.MapItem {
return yaml.MapItem{
Key: "kubernetes_sd_configs",
Value: []yaml.MapSlice{
yaml.MapSlice{
{
Key: "role",
Value: "endpoints",
},
},
},
}
}
func generateAlertmanagerConfig(version semver.Version, am v1.AlertmanagerEndpoints) yaml.MapSlice {
if am.Scheme == "" {
am.Scheme = "http"
}
if am.PathPrefix == "" {
am.PathPrefix = "/"
}
cfg := yaml.MapSlice{
{Key: "path_prefix", Value: am.PathPrefix},
{Key: "scheme", Value: am.Scheme},
}
cfg = addTLStoYaml(cfg, am.TLSConfig)
switch version.Major {
case 1:
if version.Minor < 7 {
cfg = append(cfg, k8sSDAllNamespaces())
} else {
cfg = append(cfg, k8sSDWithNamespaces([]string{am.Namespace}))
}
case 2:
cfg = append(cfg, k8sSDWithNamespaces([]string{am.Namespace}))
}
var relabelings []yaml.MapSlice
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_name"}},
{Key: "regex", Value: am.Name},
})
if am.Port.StrVal != "" {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_endpoint_port_name"}},
{Key: "regex", Value: am.Port.String()},
})
} else if am.Port.IntVal != 0 {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_container_port_number"}},
{Key: "regex", Value: am.Port.String()},
})
}
if version.Major == 1 && version.Minor < 7 {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_namespace"}},
{Key: "regex", Value: am.Namespace},
})
}
cfg = append(cfg, yaml.MapItem{Key: "relabel_configs", Value: relabelings})
return cfg
}
func generateRemoteReadConfig(version semver.Version, specs []v1.RemoteReadSpec, basicAuthSecrets map[string]BasicAuthCredentials) yaml.MapItem {
cfgs := []yaml.MapSlice{}
for i, spec := range specs {
//defaults
if spec.RemoteTimeout == "" {
spec.RemoteTimeout = "30s"
}
cfg := yaml.MapSlice{
{Key: "url", Value: spec.URL},
{Key: "remote_timeout", Value: spec.RemoteTimeout},
}
if len(spec.RequiredMatchers) > 0 {
cfg = append(cfg, yaml.MapItem{Key: "required_matchers", Value: stringMapToMapSlice(spec.RequiredMatchers)})
}
if spec.ReadRecent {
cfg = append(cfg, yaml.MapItem{Key: "read_recent", Value: spec.ReadRecent})
}
if spec.BasicAuth != nil {
if s, ok := basicAuthSecrets[fmt.Sprintf("remoteRead/%d", i)]; ok {
cfg = append(cfg, yaml.MapItem{
Key: "basic_auth", Value: yaml.MapSlice{
{Key: "username", Value: s.username},
{Key: "password", Value: s.password},
},
})
}
}
if spec.BearerTokenFile != "" {
cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: spec.BearerTokenFile})
}
cfg = addTLStoYaml(cfg, spec.TLSConfig)
if spec.ProxyURL != "" {
cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: spec.ProxyURL})
}
cfgs = append(cfgs, cfg)
}
return yaml.MapItem{
Key: "remote_read",
Value: cfgs,
}
}
func generateRemoteWriteConfig(version semver.Version, specs []v1.RemoteWriteSpec, basicAuthSecrets map[string]BasicAuthCredentials) yaml.MapItem {
cfgs := []yaml.MapSlice{}
for i, spec := range specs {
//defaults
if spec.RemoteTimeout == "" {
spec.RemoteTimeout = "30s"
}
cfg := yaml.MapSlice{
{Key: "url", Value: spec.URL},
{Key: "remote_timeout", Value: spec.RemoteTimeout},
}
if spec.WriteRelabelConfigs != nil {
relabelings := []yaml.MapSlice{}
for _, c := range spec.WriteRelabelConfigs {
relabeling := yaml.MapSlice{
{Key: "source_labels", Value: c.SourceLabels},
}
if c.Separator != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "separator", Value: c.Separator})
}
if c.TargetLabel != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "target_label", Value: c.TargetLabel})
}
if c.Regex != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "regex", Value: c.Regex})
}
if c.Modulus != uint64(0) {
relabeling = append(relabeling, yaml.MapItem{Key: "modulus", Value: c.Modulus})
}
if c.Replacement != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "replacement", Value: c.Replacement})
}
if c.Action != "" {
relabeling = append(relabeling, yaml.MapItem{Key: "action", Value: c.Action})
}
relabelings = append(relabelings, relabeling)
}
cfg = append(cfg, yaml.MapItem{Key: "write_relabel_configs", Value: relabelings})
}
if spec.BasicAuth != nil {
if s, ok := basicAuthSecrets[fmt.Sprintf("remoteWrite/%d", i)]; ok {
cfg = append(cfg, yaml.MapItem{
Key: "basic_auth", Value: yaml.MapSlice{
{Key: "username", Value: s.username},
{Key: "password", Value: s.password},
},
})
}
}
if spec.BearerToken != "" {
cfg = append(cfg, yaml.MapItem{Key: "bearer_token", Value: spec.BearerToken})
}
if spec.BearerTokenFile != "" {
cfg = append(cfg, yaml.MapItem{Key: "bearer_token_file", Value: spec.BearerTokenFile})
}
cfg = addTLStoYaml(cfg, spec.TLSConfig)
if spec.ProxyURL != "" {
cfg = append(cfg, yaml.MapItem{Key: "proxy_url", Value: spec.ProxyURL})
}
cfgs = append(cfgs, cfg)
}
return yaml.MapItem{
Key: "remote_write",
Value: cfgs,
}
}
| 1 | 9,896 | is this configuration key already exist in prometheus ? | prometheus-operator-prometheus-operator | go |
@@ -2,6 +2,7 @@ require "test_helper"
class DiaryEntryControllerTest < ActionController::TestCase
include ActionView::Helpers::NumberHelper
+ api_fixtures
def setup
# Create the default language for diary entries | 1 | require "test_helper"
class DiaryEntryControllerTest < ActionController::TestCase
include ActionView::Helpers::NumberHelper
def setup
# Create the default language for diary entries
create(:language, :code => "en")
# Stub nominatim response for diary entry locations
stub_request(:get, %r{^http://nominatim\.openstreetmap\.org/reverse\?})
.to_return(:status => 404)
end
##
# test all routes which lead to this controller
def test_routes
assert_routing(
{ :path => "/diary", :method => :get },
{ :controller => "diary_entry", :action => "list" }
)
assert_routing(
{ :path => "/diary/language", :method => :get },
{ :controller => "diary_entry", :action => "list", :language => "language" }
)
assert_routing(
{ :path => "/user/username/diary", :method => :get },
{ :controller => "diary_entry", :action => "list", :display_name => "username" }
)
assert_routing(
{ :path => "/diary/friends", :method => :get },
{ :controller => "diary_entry", :action => "list", :friends => true }
)
assert_routing(
{ :path => "/diary/nearby", :method => :get },
{ :controller => "diary_entry", :action => "list", :nearby => true }
)
assert_routing(
{ :path => "/diary/rss", :method => :get },
{ :controller => "diary_entry", :action => "rss", :format => :rss }
)
assert_routing(
{ :path => "/diary/language/rss", :method => :get },
{ :controller => "diary_entry", :action => "rss", :language => "language", :format => :rss }
)
assert_routing(
{ :path => "/user/username/diary/rss", :method => :get },
{ :controller => "diary_entry", :action => "rss", :display_name => "username", :format => :rss }
)
assert_routing(
{ :path => "/user/username/diary/comments", :method => :get },
{ :controller => "diary_entry", :action => "comments", :display_name => "username" }
)
assert_routing(
{ :path => "/user/username/diary/comments/1", :method => :get },
{ :controller => "diary_entry", :action => "comments", :display_name => "username", :page => "1" }
)
assert_routing(
{ :path => "/diary/new", :method => :get },
{ :controller => "diary_entry", :action => "new" }
)
assert_routing(
{ :path => "/diary/new", :method => :post },
{ :controller => "diary_entry", :action => "new" }
)
assert_routing(
{ :path => "/user/username/diary/1", :method => :get },
{ :controller => "diary_entry", :action => "view", :display_name => "username", :id => "1" }
)
assert_routing(
{ :path => "/user/username/diary/1/edit", :method => :get },
{ :controller => "diary_entry", :action => "edit", :display_name => "username", :id => "1" }
)
assert_routing(
{ :path => "/user/username/diary/1/edit", :method => :post },
{ :controller => "diary_entry", :action => "edit", :display_name => "username", :id => "1" }
)
assert_routing(
{ :path => "/user/username/diary/1/newcomment", :method => :post },
{ :controller => "diary_entry", :action => "comment", :display_name => "username", :id => "1" }
)
assert_routing(
{ :path => "/user/username/diary/1/hide", :method => :post },
{ :controller => "diary_entry", :action => "hide", :display_name => "username", :id => "1" }
)
assert_routing(
{ :path => "/user/username/diary/1/hidecomment/2", :method => :post },
{ :controller => "diary_entry", :action => "hidecomment", :display_name => "username", :id => "1", :comment => "2" }
)
assert_routing(
{ :path => "/user/username/diary/1/subscribe", :method => :post },
{ :controller => "diary_entry", :action => "subscribe", :display_name => "username", :id => "1" }
)
assert_routing(
{ :path => "/user/username/diary/1/unsubscribe", :method => :post },
{ :controller => "diary_entry", :action => "unsubscribe", :display_name => "username", :id => "1" }
)
end
def test_new_no_login
# Make sure that you are redirected to the login page when you
# are not logged in
get :new
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => "/diary/new"
end
def test_new_form
# Now try again when logged in
get :new, {}, { :user => create(:user) }
assert_response :success
assert_select "title", :text => /New Diary Entry/, :count => 1
assert_select "div.content-heading", :count => 1 do
assert_select "h1", :text => /New Diary Entry/, :count => 1
end
assert_select "div#content", :count => 1 do
assert_select "form[action='/diary/new'][method=post]", :count => 1 do
assert_select "input#diary_entry_title[name='diary_entry[title]']", :count => 1
assert_select "textarea#diary_entry_body[name='diary_entry[body]']", :text => "", :count => 1
assert_select "select#diary_entry_language_code", :count => 1
assert_select "input#latitude[name='diary_entry[latitude]']", :count => 1
assert_select "input#longitude[name='diary_entry[longitude]']", :count => 1
assert_select "input[name=commit][type=submit][value=Publish]", :count => 1
assert_select "input[name=commit][type=submit][value=Edit]", :count => 1
assert_select "input[name=commit][type=submit][value=Preview]", :count => 1
assert_select "input", :count => 7
end
end
end
def test_new_get_with_params
# Now try creating a diary entry using get
assert_difference "DiaryEntry.count", 0 do
get :new, { :commit => "save",
:diary_entry => { :title => "New Title", :body => "This is a new body for the diary entry", :latitude => "1.1",
:longitude => "2.2", :language_code => "en" } },
{ :user => create(:user).id }
end
assert_response :success
assert_template :edit
end
def test_new_no_body
# Now try creating a invalid diary entry with an empty body
user = create(:user)
assert_no_difference "DiaryEntry.count" do
post :new, { :commit => "save",
:diary_entry => { :title => "New Title", :body => "", :latitude => "1.1",
:longitude => "2.2", :language_code => "en" } },
{ :user => user.id }
end
assert_response :success
assert_template :edit
assert_nil UserPreference.where(:user_id => user.id, :k => "diary.default_language").first
end
def test_new_post
# Now try creating a diary entry
user = create(:user)
assert_difference "DiaryEntry.count", 1 do
post :new, { :commit => "save",
:diary_entry => { :title => "New Title", :body => "This is a new body for the diary entry", :latitude => "1.1",
:longitude => "2.2", :language_code => "en" } },
{ :user => user.id }
end
assert_response :redirect
assert_redirected_to :action => :list, :display_name => user.display_name
entry = DiaryEntry.order(:id).last
assert_equal user.id, entry.user_id
assert_equal "New Title", entry.title
assert_equal "This is a new body for the diary entry", entry.body
assert_equal "1.1".to_f, entry.latitude
assert_equal "2.2".to_f, entry.longitude
assert_equal "en", entry.language_code
# checks if user was subscribed
assert_equal 1, entry.subscribers.length
assert_equal "en", UserPreference.where(:user_id => user.id, :k => "diary.default_language").first.v
end
def test_new_german
create(:language, :code => "de")
user = create(:user)
# Now try creating a diary entry in a different language
assert_difference "DiaryEntry.count", 1 do
post :new, { :commit => "save",
:diary_entry => { :title => "New Title", :body => "This is a new body for the diary entry", :latitude => "1.1",
:longitude => "2.2", :language_code => "de" } },
{ :user => user.id }
end
assert_response :redirect
assert_redirected_to :action => :list, :display_name => user.display_name
entry = DiaryEntry.order(:id).last
assert_equal user.id, entry.user_id
assert_equal "New Title", entry.title
assert_equal "This is a new body for the diary entry", entry.body
assert_equal "1.1".to_f, entry.latitude
assert_equal "2.2".to_f, entry.longitude
assert_equal "de", entry.language_code
# checks if user was subscribed
assert_equal 1, entry.subscribers.length
assert_equal "de", UserPreference.where(:user_id => user.id, :k => "diary.default_language").first.v
end
def test_new_spammy
user = create(:user)
# Generate some spammy content
spammy_title = "Spam Spam Spam Spam Spam"
spammy_body = 1.upto(50).map { |n| "http://example.com/spam#{n}" }.join(" ")
# Try creating a spammy diary entry
assert_difference "DiaryEntry.count", 1 do
post :new, { :commit => "save",
:diary_entry => { :title => spammy_title, :body => spammy_body, :language_code => "en" } },
{ :user => user.id }
end
assert_response :redirect
assert_redirected_to :action => :list, :display_name => user.display_name
entry = DiaryEntry.order(:id).last
assert_equal user.id, entry.user_id
assert_equal spammy_title, entry.title
assert_equal spammy_body, entry.body
assert_equal "en", entry.language_code
assert_equal "suspended", User.find(user.id).status
# Follow the redirect
get :list, { :display_name => user.display_name }, { :user => user }
assert_response :redirect
assert_redirected_to :controller => :user, :action => :suspended
end
def test_edit
user = create(:user)
other_user = create(:user)
entry = create(:diary_entry, :user => user)
# Make sure that you are redirected to the login page when you are
# not logged in, without and with the id of the entry you want to edit
get :edit, :display_name => entry.user.display_name, :id => entry.id
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => "/user/#{URI.encode(entry.user.display_name)}/diary/#{entry.id}/edit"
# Verify that you get a not found error, when you pass a bogus id
get :edit, { :display_name => entry.user.display_name, :id => 9999 }, { :user => entry.user }
assert_response :not_found
assert_select "div.content-heading", :count => 1 do
assert_select "h2", :text => "No entry with the id: 9999", :count => 1
end
# Verify that you get redirected to view if you are not the user
# that created the entry
get :edit, { :display_name => entry.user.display_name, :id => entry.id }, { :user => other_user }
assert_response :redirect
assert_redirected_to :action => :view, :display_name => entry.user.display_name, :id => entry.id
# Now pass the id, and check that you can edit it, when using the same
# user as the person who created the entry
get :edit, { :display_name => entry.user.display_name, :id => entry.id }, { :user => entry.user }
assert_response :success
assert_select "title", :text => /Edit diary entry/, :count => 1
assert_select "div.content-heading", :count => 1 do
assert_select "h1", :text => /Edit diary entry/, :count => 1
end
assert_select "div#content", :count => 1 do
assert_select "form[action='/user/#{URI.encode(entry.user.display_name)}/diary/#{entry.id}/edit'][method=post]", :count => 1 do
assert_select "input#diary_entry_title[name='diary_entry[title]'][value='#{entry.title}']", :count => 1
assert_select "textarea#diary_entry_body[name='diary_entry[body]']", :text => entry.body, :count => 1
assert_select "select#diary_entry_language_code", :count => 1
assert_select "input#latitude[name='diary_entry[latitude]']", :count => 1
assert_select "input#longitude[name='diary_entry[longitude]']", :count => 1
assert_select "input[name=commit][type=submit][value=Save]", :count => 1
assert_select "input[name=commit][type=submit][value=Edit]", :count => 1
assert_select "input[name=commit][type=submit][value=Preview]", :count => 1
assert_select "input", :count => 7
end
end
# Now lets see if you can edit the diary entry
new_title = "New Title"
new_body = "This is a new body for the diary entry"
new_latitude = "1.1"
new_longitude = "2.2"
new_language_code = "en"
post :edit, { :display_name => entry.user.display_name, :id => entry.id, :commit => "save",
:diary_entry => { :title => new_title, :body => new_body, :latitude => new_latitude,
:longitude => new_longitude, :language_code => new_language_code } },
{ :user => entry.user.id }
assert_response :redirect
assert_redirected_to :action => :view, :display_name => entry.user.display_name, :id => entry.id
# Now check that the new data is rendered, when logged in
get :view, { :display_name => entry.user.display_name, :id => entry.id }, { :user => entry.user }
assert_response :success
assert_template "diary_entry/view"
assert_select "title", :text => /Users' diaries | /, :count => 1
assert_select "div.content-heading", :count => 1 do
assert_select "h2", :text => /#{entry.user.display_name}'s diary/, :count => 1
end
assert_select "div#content", :count => 1 do
assert_select "div.post_heading", :text => /#{new_title}/, :count => 1
# This next line won't work if the text has been run through the htmlize function
# due to formatting that could be introduced
assert_select "p", :text => /#{new_body}/, :count => 1
assert_select "abbr[class='geo'][title='#{number_with_precision(new_latitude, :precision => 4)}; #{number_with_precision(new_longitude, :precision => 4)}']", :count => 1
# As we're not logged in, check that you cannot edit
# print @response.body
assert_select "a[href='/user/#{URI.encode(entry.user.display_name)}/diary/#{entry.id}/edit']", :text => "Edit this entry", :count => 1
end
# and when not logged in as the user who wrote the entry
get :view, { :display_name => entry.user.display_name, :id => entry.id }, { :user => entry.user }
assert_response :success
assert_template "diary_entry/view"
assert_select "title", :text => /Users' diaries | /, :count => 1
assert_select "div.content-heading", :count => 1 do
assert_select "h2", :text => /#{entry.user.display_name}'s diary/, :count => 1
end
assert_select "div#content", :count => 1 do
assert_select "div.post_heading", :text => /#{new_title}/, :count => 1
# This next line won't work if the text has been run through the htmlize function
# due to formatting that could be introduced
assert_select "p", :text => /#{new_body}/, :count => 1
assert_select "abbr[class=geo][title='#{number_with_precision(new_latitude, :precision => 4)}; #{number_with_precision(new_longitude, :precision => 4)}']", :count => 1
# As we're not logged in, check that you cannot edit
assert_select "li[class='hidden show_if_user_#{entry.user.id}']", :count => 1 do
assert_select "a[href='/user/#{URI.encode(entry.user.display_name)}/diary/#{entry.id}/edit']", :text => "Edit this entry", :count => 1
end
end
end
def test_edit_i18n
user = create(:user)
diary_entry = create(:diary_entry, :language_code => "en", :user => user)
get :edit, { :display_name => user.display_name, :id => diary_entry.id }, { :user => user }
assert_response :success
assert_select "span[class=translation_missing]", false, "Missing translation in edit diary entry"
end
def test_comment
user = create(:user)
other_user = create(:user)
entry = create(:diary_entry, :user => user)
# Make sure that you are denied when you are not logged in
post :comment, :display_name => entry.user.display_name, :id => entry.id
assert_response :forbidden
# Verify that you get a not found error, when you pass a bogus id
post :comment, { :display_name => entry.user.display_name, :id => 9999 }, { :user => other_user }
assert_response :not_found
assert_select "div.content-heading", :count => 1 do
assert_select "h2", :text => "No entry with the id: 9999", :count => 1
end
post :subscribe, { :id => entry.id, :display_name => entry.user.display_name }, { :user => user }
# Now try an invalid comment with an empty body
assert_no_difference "ActionMailer::Base.deliveries.size" do
assert_no_difference "DiaryComment.count" do
assert_no_difference "entry.subscribers.count" do
post :comment, { :display_name => entry.user.display_name, :id => entry.id, :diary_comment => { :body => "" } }, { :user => other_user }
end
end
end
assert_response :success
assert_template :view
# Now try again with the right id
assert_difference "ActionMailer::Base.deliveries.size", entry.subscribers.count do
assert_difference "DiaryComment.count", 1 do
assert_difference "entry.subscribers.count", 1 do
post :comment, { :display_name => entry.user.display_name, :id => entry.id, :diary_comment => { :body => "New comment" } }, { :user => other_user }
end
end
end
assert_response :redirect
assert_redirected_to :action => :view, :display_name => entry.user.display_name, :id => entry.id
email = ActionMailer::Base.deliveries.first
assert_equal [user.email], email.to
assert_equal "[OpenStreetMap] #{other_user.display_name} commented on a diary entry", email.subject
assert_match /New comment/, email.text_part.decoded
assert_match /New comment/, email.html_part.decoded
ActionMailer::Base.deliveries.clear
comment = DiaryComment.order(:id).last
assert_equal entry.id, comment.diary_entry_id
assert_equal other_user.id, comment.user_id
assert_equal "New comment", comment.body
# Now view the diary entry, and check the new comment is present
get :view, :display_name => entry.user.display_name, :id => entry.id
assert_response :success
assert_select ".diary-comment", :count => 1 do
assert_select "#comment#{comment.id}", :count => 1 do
assert_select "a[href='/user/#{URI.encode(other_user.display_name)}']", :text => other_user.display_name, :count => 1
end
assert_select ".richtext", :text => /New comment/, :count => 1
end
end
def test_comment_spammy
user = create(:user)
other_user = create(:user)
# Find the entry to comment on
entry = create(:diary_entry, :user => user)
post :subscribe, { :id => entry.id, :display_name => entry.user.display_name }, { :user => user }
# Generate some spammy content
spammy_text = 1.upto(50).map { |n| "http://example.com/spam#{n}" }.join(" ")
# Try creating a spammy comment
assert_difference "ActionMailer::Base.deliveries.size", 1 do
assert_difference "DiaryComment.count", 1 do
post :comment, { :display_name => entry.user.display_name, :id => entry.id, :diary_comment => { :body => spammy_text } }, { :user => other_user }
end
end
assert_response :redirect
assert_redirected_to :action => :view, :display_name => entry.user.display_name, :id => entry.id
email = ActionMailer::Base.deliveries.first
assert_equal [user.email], email.to
assert_equal "[OpenStreetMap] #{other_user.display_name} commented on a diary entry", email.subject
assert_match %r{http://example.com/spam}, email.text_part.decoded
assert_match %r{http://example.com/spam}, email.html_part.decoded
ActionMailer::Base.deliveries.clear
comment = DiaryComment.order(:id).last
assert_equal entry.id, comment.diary_entry_id
assert_equal other_user.id, comment.user_id
assert_equal spammy_text, comment.body
assert_equal "suspended", User.find(other_user.id).status
# Follow the redirect
get :list, { :display_name => user.display_name }, { :user => other_user }
assert_response :redirect
assert_redirected_to :controller => :user, :action => :suspended
# Now view the diary entry, and check the new comment is not present
get :view, :display_name => entry.user.display_name, :id => entry.id
assert_response :success
assert_select ".diary-comment", :count => 0
end
def test_list_all
diary_entry = create(:diary_entry)
geo_entry = create(:diary_entry, :latitude => 51.50763, :longitude => -0.10781)
public_entry = create(:diary_entry, :user => create(:user))
# Try a list of all diary entries
get :list
check_diary_list diary_entry, geo_entry, public_entry
end
def test_list_user
user = create(:user)
other_user = create(:user)
diary_entry = create(:diary_entry, :user => user)
geo_entry = create(:diary_entry, :user => user, :latitude => 51.50763, :longitude => -0.10781)
_other_entry = create(:diary_entry, :user => other_user)
# Try a list of diary entries for a valid user
get :list, :display_name => user.display_name
check_diary_list diary_entry, geo_entry
# Try a list of diary entries for an invalid user
get :list, :display_name => "No Such User"
assert_response :not_found
assert_template "user/no_such_user"
end
def test_list_friends
user = create(:user)
other_user = create(:user)
friend = create(:friend, :befriender => user)
diary_entry = create(:diary_entry, :user => friend.befriendee)
_other_entry = create(:diary_entry, :user => other_user)
# Try a list of diary entries for your friends when not logged in
get :list, :friends => true
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => "/diary/friends"
# Try a list of diary entries for your friends when logged in
get :list, { :friends => true }, { :user => user }
check_diary_list diary_entry
get :list, { :friends => true }, { :user => other_user }
check_diary_list
end
def test_list_nearby
user = create(:user, :home_lat => 12, :home_lon => 12)
nearby_user = create(:user, :home_lat => 11.9, :home_lon => 12.1)
diary_entry = create(:diary_entry, :user => user)
# Try a list of diary entries for nearby users when not logged in
get :list, :nearby => true
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => "/diary/nearby"
# Try a list of diary entries for nearby users when logged in
get :list, { :nearby => true }, { :user => nearby_user }
check_diary_list diary_entry
get :list, { :nearby => true }, { :user => user }
check_diary_list
end
def test_list_language
create(:language, :code => "de")
create(:language, :code => "sl")
diary_entry_en = create(:diary_entry, :language_code => "en")
diary_entry_en2 = create(:diary_entry, :language_code => "en")
diary_entry_de = create(:diary_entry, :language_code => "de")
# Try a list of diary entries in english
get :list, :language => "en"
check_diary_list diary_entry_en, diary_entry_en2
# Try a list of diary entries in german
get :list, :language => "de"
check_diary_list diary_entry_de
# Try a list of diary entries in slovenian
get :list, :language => "sl"
check_diary_list
end
def test_rss
create(:language, :code => "de")
create(:diary_entry, :language_code => "en")
create(:diary_entry, :language_code => "en")
create(:diary_entry, :language_code => "de")
get :rss, :format => :rss
assert_response :success, "Should be able to get a diary RSS"
assert_select "rss", :count => 1 do
assert_select "channel", :count => 1 do
assert_select "channel>title", :count => 1
assert_select "image", :count => 1
assert_select "channel>item", :count => 3
end
end
end
def test_rss_language
create(:language, :code => "de")
create(:diary_entry, :language_code => "en")
create(:diary_entry, :language_code => "en")
create(:diary_entry, :language_code => "de")
get :rss, :language => "en", :format => :rss
assert_response :success, "Should be able to get a specific language diary RSS"
assert_select "rss>channel>item", :count => 2 # , "Diary entries should be filtered by language"
end
# def test_rss_nonexisting_language
# get :rss, {:language => 'xx', :format => :rss}
# assert_response :not_found, "Should not be able to get a nonexisting language diary RSS"
# end
def test_rss_language_with_no_entries
create(:language, :code => "sl")
create(:diary_entry, :language_code => "en")
get :rss, :language => "sl", :format => :rss
assert_response :success, "Should be able to get a specific language diary RSS"
assert_select "rss>channel>item", :count => 0 # , "Diary entries should be filtered by language"
end
def test_rss_user
user = create(:user)
other_user = create(:user)
create(:diary_entry, :user => user)
create(:diary_entry, :user => user)
create(:diary_entry, :user => other_user)
get :rss, :display_name => user.display_name, :format => :rss
assert_response :success, "Should be able to get a specific users diary RSS"
assert_select "rss>channel>item", :count => 2 # , "Diary entries should be filtered by user"
end
def test_rss_nonexisting_user
# Try a user that has never existed
get :rss, :display_name => "fakeUsername76543", :format => :rss
assert_response :not_found, "Should not be able to get a nonexisting users diary RSS"
# Try a suspended user
get :rss, :display_name => create(:user, :suspended).display_name, :format => :rss
assert_response :not_found, "Should not be able to get a suspended users diary RSS"
# Try a deleted user
get :rss, :display_name => create(:user, :deleted).display_name, :format => :rss
assert_response :not_found, "Should not be able to get a deleted users diary RSS"
end
def test_rss_character_escaping
create(:diary_entry, :title => "<script>")
get :rss, :format => :rss
assert_match "<title><script></title>", response.body
end
def test_view
user = create(:user)
suspended_user = create(:user, :suspended)
deleted_user = create(:user, :deleted)
# Try a normal entry that should work
diary_entry = create(:diary_entry, :user => user)
get :view, :display_name => user.display_name, :id => diary_entry.id
assert_response :success
assert_template :view
# Try a deleted entry
diary_entry_deleted = create(:diary_entry, :user => user, :visible => false)
get :view, :display_name => user.display_name, :id => diary_entry_deleted.id
assert_response :not_found
# Try an entry by a suspended user
diary_entry_suspended = create(:diary_entry, :user => suspended_user)
get :view, :display_name => suspended_user.display_name, :id => diary_entry_suspended.id
assert_response :not_found
# Try an entry by a deleted user
diary_entry_deleted = create(:diary_entry, :user => deleted_user)
get :view, :display_name => deleted_user.display_name, :id => diary_entry_deleted.id
assert_response :not_found
end
def test_view_hidden_comments
# Get a diary entry that has hidden comments
user = create(:user)
diary_entry = create(:diary_entry, :user => user)
visible_comment = create(:diary_comment, :diary_entry => diary_entry)
suspended_user_comment = create(:diary_comment, :diary_entry => diary_entry, :user => create(:user, :suspended))
deleted_user_comment = create(:diary_comment, :diary_entry => diary_entry, :user => create(:user, :deleted))
hidden_comment = create(:diary_comment, :diary_entry => diary_entry, :visible => false)
get :view, :display_name => user.display_name, :id => diary_entry.id
assert_response :success
assert_template :view
assert_select "div.comments" do
assert_select "p#comment#{visible_comment.id}", :count => 1
assert_select "p#comment#{suspended_user_comment.id}", :count => 0
assert_select "p#comment#{deleted_user_comment.id}", :count => 0
assert_select "p#comment#{hidden_comment.id}", :count => 0
end
end
def test_hide
user = create(:user)
# Try without logging in
diary_entry = create(:diary_entry, :user => user)
post :hide, :display_name => user.display_name, :id => diary_entry.id
assert_response :forbidden
assert_equal true, DiaryEntry.find(diary_entry.id).visible
# Now try as a normal user
post :hide, { :display_name => user.display_name, :id => diary_entry.id }, { :user => user }
assert_response :redirect
assert_redirected_to :action => :view, :display_name => user.display_name, :id => diary_entry.id
assert_equal true, DiaryEntry.find(diary_entry.id).visible
# Finally try as an administrator
post :hide, { :display_name => user.display_name, :id => diary_entry.id }, { :user => create(:administrator_user) }
assert_response :redirect
assert_redirected_to :action => :list, :display_name => user.display_name
assert_equal false, DiaryEntry.find(diary_entry.id).visible
end
def test_hidecomment
user = create(:user)
administrator_user = create(:administrator_user)
diary_entry = create(:diary_entry, :user => user)
diary_comment = create(:diary_comment, :diary_entry => diary_entry)
# Try without logging in
post :hidecomment, :display_name => user.display_name, :id => diary_entry.id, :comment => diary_comment.id
assert_response :forbidden
assert_equal true, DiaryComment.find(diary_comment.id).visible
# Now try as a normal user
post :hidecomment, { :display_name => user.display_name, :id => diary_entry.id, :comment => diary_comment.id }, { :user => user }
assert_response :redirect
assert_redirected_to :action => :view, :display_name => user.display_name, :id => diary_entry.id
assert_equal true, DiaryComment.find(diary_comment.id).visible
# Finally try as an administrator
post :hidecomment, { :display_name => user.display_name, :id => diary_entry.id, :comment => diary_comment.id }, { :user => administrator_user }
assert_response :redirect
assert_redirected_to :action => :view, :display_name => user.display_name, :id => diary_entry.id
assert_equal false, DiaryComment.find(diary_comment.id).visible
end
def test_comments
user = create(:user)
other_user = create(:user)
suspended_user = create(:user, :suspended)
deleted_user = create(:user, :deleted)
# Test a user with no comments
get :comments, :display_name => user.display_name
assert_response :success
assert_template :comments
assert_select "table.messages" do
assert_select "tr", :count => 1 # header, no comments
end
# Test a user with a comment
create(:diary_comment, :user => other_user)
get :comments, :display_name => other_user.display_name
assert_response :success
assert_template :comments
assert_select "table.messages" do
assert_select "tr", :count => 2 # header and one comment
end
# Test a suspended user
get :comments, :display_name => suspended_user.display_name
assert_response :not_found
# Test a deleted user
get :comments, :display_name => deleted_user.display_name
assert_response :not_found
end
def test_subscribe_success
user = create(:user)
other_user = create(:user)
diary_entry = create(:diary_entry, :user => user)
assert_difference "diary_entry.subscribers.count", 1 do
post :subscribe, { :id => diary_entry.id, :display_name => diary_entry.user.display_name }, { :user => other_user }
end
assert_response :redirect
end
def test_subscribe_fail
user = create(:user)
other_user = create(:user)
diary_entry = create(:diary_entry, :user => user)
# not signed in
assert_no_difference "diary_entry.subscribers.count" do
post :subscribe, :id => diary_entry.id, :display_name => diary_entry.user.display_name
end
assert_response :forbidden
# bad diary id
post :subscribe, { :id => 999111, :display_name => "username" }, { :user => other_user }
assert_response :not_found
# trying to subscribe when already subscribed
post :subscribe, { :id => diary_entry.id, :display_name => diary_entry.user.display_name }, { :user => other_user }
assert_no_difference "diary_entry.subscribers.count" do
post :subscribe, { :id => diary_entry.id, :display_name => diary_entry.user.display_name }, { :user => other_user }
end
end
def test_unsubscribe_success
user = create(:user)
other_user = create(:user)
diary_entry = create(:diary_entry, :user => user)
post :subscribe, { :id => diary_entry.id, :display_name => diary_entry.user.display_name }, { :user => other_user }
assert_difference "diary_entry.subscribers.count", -1 do
post :unsubscribe, { :id => diary_entry.id, :display_name => diary_entry.user.display_name }, { :user => other_user }
end
assert_response :redirect
end
def test_unsubscribe_fail
user = create(:user)
other_user = create(:user)
diary_entry = create(:diary_entry, :user => user)
# not signed in
assert_no_difference "diary_entry.subscribers.count" do
post :unsubscribe, :id => diary_entry.id, :display_name => diary_entry.user.display_name
end
assert_response :forbidden
# bad diary id
post :unsubscribe, { :id => 999111, :display_name => "username" }, { :user => other_user }
assert_response :not_found
# trying to unsubscribe when not subscribed
assert_no_difference "diary_entry.subscribers.count" do
post :unsubscribe, { :id => diary_entry.id, :display_name => diary_entry.user.display_name }, { :user => other_user }
end
end
private
def check_diary_list(*entries)
assert_response :success
assert_template "list"
assert_no_missing_translations
assert_select "div.diary_post", entries.count
entries.each do |entry|
assert_select "a[href=?]", "/user/#{URI.encode(entry.user.display_name)}/diary/#{entry.id}"
end
end
end
| 1 | 10,675 | As @gravitystorm said new tests need to be using factories, not fixtures. | openstreetmap-openstreetmap-website | rb |
@@ -171,8 +171,8 @@ bool CliManager::readLine(std::string &line, bool linebreak) {
if (!isInteractive_) {
break;
}
- auto purePrompt = folly::stringPrintf("(%s@%s:%d) [%s]> ",
- username_.c_str(), addr_.c_str(), port_,
+ auto purePrompt = folly::stringPrintf("(%s@) [%s]> ",
+ username_.c_str(),
cmdProcessor_->getSpaceName().c_str());
if (linebreak) {
purePrompt.assign(purePrompt.size() - 3, ' '); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "base/Status.h"
#include <termios.h>
#include <unistd.h>
#include "readline/readline.h"
#include "readline/history.h"
#include "console/CliManager.h"
#include "client/cpp/GraphClient.h"
#include "fs/FileUtils.h"
DECLARE_string(u);
DECLARE_string(p);
DEFINE_bool(enable_history, false, "Whether to force saving the command history");
namespace nebula {
namespace graph {
const int32_t kMaxAuthInfoRetries = 3;
const int32_t kMaxUsernameLen = 16;
const int32_t kMaxPasswordLen = 24;
CliManager::CliManager() {
if (!fs::FileUtils::isStdinTTY()) {
enableHistroy_ = false;
isInteractive_ = false;
}
if (FLAGS_enable_history) {
enableHistroy_ = true;
}
if (enableHistroy_) {
::using_history();
}
if (isInteractive_) {
initAutoCompletion();
}
}
bool CliManager::connect(const std::string& addr,
uint16_t port,
const std::string& username,
const std::string& password) {
char user[kMaxUsernameLen + 1];
char pass[kMaxPasswordLen + 1];
strncpy(user, username.c_str(), kMaxUsernameLen);
user[kMaxUsernameLen] = '\0';
strncpy(pass, password.c_str(), kMaxPasswordLen);
pass[kMaxPasswordLen] = '\0';
// Make sure username is not empty
if (FLAGS_u.empty()) {
for (int32_t i = 0; i < kMaxAuthInfoRetries && !strlen(user); i++) {
// Need to interactively get the username
std::cout << "Username: ";
std::cin.getline(user, kMaxUsernameLen);
user[kMaxUsernameLen] = '\0';
}
} else {
strcpy(user, FLAGS_u.c_str()); // NOLINT
}
if (!strlen(user)) {
std::cout << "Authentication failed: "
"Need a valid username to authenticate\n\n";
return false;
}
// Make sure password is not empty
if (FLAGS_p.empty()) {
for (int32_t i = 0; i < kMaxAuthInfoRetries && !strlen(pass); i++) {
// Need to interactively get the password
std::cout << "Password: ";
termios oldTerminal;
tcgetattr(STDIN_FILENO, &oldTerminal);
termios newTerminal = oldTerminal;
newTerminal.c_lflag &= ~ECHO;
tcsetattr(STDIN_FILENO, TCSANOW, &newTerminal);
std::cin.getline(pass, kMaxPasswordLen);
pass[kMaxPasswordLen] = '\0';
tcsetattr(STDIN_FILENO, TCSANOW, &oldTerminal);
}
} else {
strcpy(pass, FLAGS_p.c_str()); // NOLINT
}
if (!strlen(pass)) {
std::cout << "Authentication failed: "
"Need a valid password\n\n";
return false;
}
addr_ = addr;
port_ = port;
username_ = user;
auto client = std::make_unique<GraphClient>(addr_, port_);
cpp2::ErrorCode res = client->connect(user, pass);
if (res == cpp2::ErrorCode::SUCCEEDED) {
#if defined(NEBULA_BUILD_VERSION)
std::cerr << "\nWelcome to Nebula Graph (Version "
<< NEBULA_STRINGIFY(NEBULA_BUILD_VERSION) << ")\n\n";
#else
std::cerr << "\nWelcome to Nebula Graph\n\n";
#endif
cmdProcessor_ = std::make_unique<CmdProcessor>(std::move(client));
return true;
} else {
// There is an error
std::cout << "Connection failed\n";
return false;
}
}
void CliManager::batch(const std::string& filename) {
UNUSED(filename);
}
void CliManager::loop() {
loadHistory();
while (true) {
std::string cmd;
std::string line;
auto quit = !this->readLine(line, false/*linebreak*/);
// EOF
if (quit) {
break;
}
// Empty line
if (line.empty()) {
continue;
}
// Line break
while (!quit && !line.empty() && line.back() == '\\') {
line.resize(line.size() - 1);
cmd += line;
quit = !this->readLine(line, true/*linebreak*/);
continue;
}
// EOF
if (quit) {
break;
}
// Execute the whole command
cmd += line;
if (!cmdProcessor_->process(cmd)) {
break;
}
}
saveHistory();
fprintf(stderr, "Bye!\n");
}
bool CliManager::readLine(std::string &line, bool linebreak) {
// Setup the prompt
std::string prompt;
static auto color = 0u;
do {
if (!isInteractive_) {
break;
}
auto purePrompt = folly::stringPrintf("(%s@%s:%d) [%s]> ",
username_.c_str(), addr_.c_str(), port_,
cmdProcessor_->getSpaceName().c_str());
if (linebreak) {
purePrompt.assign(purePrompt.size() - 3, ' ');
purePrompt += "-> ";
} else {
color++;
}
prompt = folly::stringPrintf(
"\001" // RL_PROMPT_START_IGNORE
"\033[1;%um" // color codes start
"\002" // RL_PROMPT_END_IGNORE
"%s" // prompt "(user@host:port) [spaceName]"
"\001" // RL_PROMPT_START_IGNORE
"\033[0m" // restore color code
"\002", // RL_PROMPT_END_IGNORE
color % 6 + 31,
purePrompt.c_str());
} while (false);
// Read one line
auto *input = ::readline(prompt.c_str());
auto ok = true;
do {
// EOF
if (input == nullptr) {
fprintf(stdout, "\n");
ok = false;
break;
}
// Empty line
if (input[0] == '\0') {
line.clear();
break;
}
line = folly::trimWhitespace(input).str();
if (!line.empty()) {
// Update command history
updateHistory(input);
}
} while (false);
::free(input);
return ok;
}
void CliManager::updateHistory(const char *line) {
if (!enableHistroy_) {
return;
}
auto **hists = ::history_list();
auto i = 0;
// Search in history
for (; i < ::history_length; i++) {
auto *hist = hists[i];
if (::strcmp(line, hist->line) == 0) {
break;
}
}
// New command
if (i == ::history_length) {
::add_history(line);
return;
}
// Found in history, make it lastest
auto *hist = hists[i];
for (; i < ::history_length - 1; i++) {
hists[i] = hists[i + 1];
}
hists[i] = hist;
}
void CliManager::saveHistory() {
if (!enableHistroy_) {
return;
}
std::string histfile;
histfile += ::getenv("HOME");
histfile += "/.nebula_history";
auto *file = ::fopen(histfile.c_str(), "w+");
if (file == nullptr) {
return; // fail silently
}
auto **hists = ::history_list();
for (auto i = 0; i < ::history_length; i++) {
fprintf(file, "%s\n", hists[i]->line);
}
::fflush(file);
::fclose(file);
}
void CliManager::loadHistory() {
if (!enableHistroy_) {
return;
}
std::string histfile;
histfile += ::getenv("HOME");
histfile += "/.nebula_history";
auto *file = ::fopen(histfile.c_str(), "r");
if (file == nullptr) {
return; // fail silently
}
char *line = nullptr;
size_t size = 0;
ssize_t read = 0;
while ((read = ::getline(&line, &size, file)) != -1) {
line[read - 1] = '\0'; // remove the trailing newline
updateHistory(line);
}
::free(line);
::fclose(file);
}
struct StringCaseEqual {
bool operator()(const std::string &lhs, const std::string &rhs) const {
return ::strcasecmp(lhs.c_str(), rhs.c_str()) == 0;
}
};
struct StringCaseHash {
size_t operator()(const std::string &lhs) const {
std::string upper;
upper.resize(lhs.size());
auto toupper = [] (auto c) { return ::toupper(c); };
std::transform(lhs.begin(), lhs.end(), upper.begin(), toupper);
return std::hash<std::string>()(upper);
}
};
// Primary keywords, like `GO' `CREATE', etc.
static std::vector<std::string> primaryKeywords;
// Keywords along with their sub-keywords, like `SHOW': `TAGS', `SPACES'
static std::unordered_map<std::string, std::vector<std::string>,
StringCaseHash, StringCaseEqual> subKeywords;
// Typenames, like `int', `double', `string', etc.
static std::vector<std::string> typeNames;
// To fill the containers above from a json file.
static Status loadCompletions();
static Status parseKeywordsFromJson(const folly::dynamic &json);
// To retrieve matches from within the `primaryKeywords'
static std::vector<std::string>
matchFromPrimaryKeywords(const std::string &text);
// To retrieve matches from within the `subKeywords'
static std::vector<std::string> matchFromSubKeywords(const std::string &text,
const std::string &primaryKeyword);
// Given a collection of keywords, retrieve matches that prefixed with `text'
static std::vector<std::string> matchFromKeywords(const std::string &text,
const std::vector<std::string> &keywords);
// To tell if the current `text' is at the start position of a statement.
// If so, we should do completion with primary keywords.
// Otherwise, the primary keyword of the current statement
// will be set, thus we will do completion with its sub keywords.
static bool isStartOfStatement(std::string &primaryKeyword);
// Given the prefix and a collection of keywords, retrieve the longest common prefix
// e.g. given `u' as the prefix and [USE, USER, USERS] as the collection, will return `USE'
static auto longestCommonPrefix(std::string prefix,
const std::vector<std::string>& words);
// Callback by realine if an auto completion is triggered
static char** completer(const char *text, int start, int end);
auto longestCommonPrefix(std::string prefix,
const std::vector<std::string>& words) {
if (words.size() == 1) {
return words[0];
}
while (true) {
char nextChar = 0;
for (auto &word : words) {
if (word.size() <= prefix.size()) {
return word;
}
if (nextChar == 0) {
nextChar = word[prefix.size()];
continue;
}
if (::toupper(nextChar) != ::toupper(word[prefix.size()])) {
return word.substr(0, prefix.size());
}
}
prefix = words[0].substr(0, prefix.size() + 1);
}
}
char** completer(const char *text, int start, int end) {
UNUSED(start);
UNUSED(end);
// Dont do filename completion even there is no match.
::rl_attempted_completion_over = 1;
// Dont do completion if in quotes
if (::rl_completion_quote_character != 0) {
return nullptr;
}
std::vector<std::string> matches;
std::string primaryKeyword; // The current primary keyword
if (isStartOfStatement(primaryKeyword)) {
matches = matchFromPrimaryKeywords(text);
} else {
matches = matchFromSubKeywords(text, primaryKeyword);
}
if (matches.empty()) {
return nullptr;
}
char **results = reinterpret_cast<char**>(malloc((2 + matches.size()) * sizeof(char*)));
// Get the longest common prefix of all matches as the echo back of this completion action
results[0] = ::strdup(longestCommonPrefix(text, matches).c_str());
auto i = 1;
for (auto &word : matches) {
results[i++] = ::strdup(word.c_str());
}
results[i] = nullptr;
return results;
}
bool isStartOfStatement(std::string &primaryKeyword) {
// If there is no input
if (::rl_line_buffer == nullptr || *::rl_line_buffer == '\0') {
return true;
}
std::string line = ::rl_line_buffer;
auto piece = folly::trimWhitespace(line);
// If the inputs are all white spaces
if (piece.empty()) {
return true;
}
// If the inputs are terminated with ';' or '|', i.e. complete statements
// Additionally, there is an incomplete primary keyword for the next statement
{
static const std::regex pattern(R"((\s*\w+[^;|]*[;|]\s*)*(\w+)?)");
std::smatch result;
if (std::regex_match(line, result, pattern)) {
return true;
}
}
// The same to the occasion above, except that the primary keyword is complete
// This is where sub keywords shall be completed
{
static const std::regex pattern(R"((\s*\w+[^;|]*[;|]\s*)*(\w+)[^;|]+)");
std::smatch result;
if (std::regex_match(line, result, pattern)) {
primaryKeyword = result[result.size() - 1].str();
return false;
}
}
// TODO(dutor) There are still many scenarios we cannot cover with regular expressions.
// We have to accomplish this with the help of the actual parser.
return false;
}
std::vector<std::string> matchFromPrimaryKeywords(const std::string &text) {
return matchFromKeywords(text, primaryKeywords);
}
std::vector<std::string> matchFromSubKeywords(const std::string &text,
const std::string &primaryKeyword) {
std::vector<std::string> matches = typeNames;
auto iter = subKeywords.find(primaryKeyword);
if (iter != subKeywords.end()) {
matches.insert(matches.end(), iter->second.begin(), iter->second.end());
}
return matchFromKeywords(text, matches);
}
std::vector<std::string>
matchFromKeywords(const std::string &text, const std::vector<std::string> &keywords) {
if (keywords.empty()) {
return {};
}
std::vector<std::string> matches;
for (auto &word : keywords) {
if (text.size() > word.size()) {
continue;
}
if (::strncasecmp(text.c_str(), word.c_str(), text.size()) == 0) {
matches.emplace_back(word);
}
}
return matches;
}
Status loadCompletions() {
using fs::FileUtils;
auto dir = FileUtils::readLink("/proc/self/exe").value();
dir = FileUtils::dirname(dir.c_str()) + "/../share/resources";
std::string file = dir + "/" + "completion.json";
auto status = Status::OK();
int fd = -1;
do {
fd = ::open(file.c_str(), O_RDONLY);
if (fd == -1) {
status = Status::Error("Failed to open `%s': %s",
file.c_str(), ::strerror(errno));
break;
}
auto len = ::lseek(fd, 0, SEEK_END);
if (len == 0) {
status = Status::Error("File `%s' is empty", file.c_str());
break;
}
auto buffer = std::make_unique<char[]>(len + 1);
::lseek(fd, 0, SEEK_SET);
auto ll = ::read(fd, buffer.get(), len);
UNUSED(ll);
buffer[len] = '\0';
std::string content;
content.assign(buffer.get(), len);
try {
status = parseKeywordsFromJson(folly::parseJson(content));
} catch (const std::exception &e) {
status = Status::Error("Illegal json `%s': %s", file.c_str(), e.what());
break;
}
if (!status.ok()) {
break;
}
} while (false);
if (fd != -1) {
::close(fd);
}
return status;
}
Status parseKeywordsFromJson(const folly::dynamic &json) {
auto iter = json.find("keywords");
if (iter == json.items().end()) {
fprintf(stderr, "completions: no `keywords' found\n");
return Status::OK();
}
for (auto &pair : iter->second.items()) {
auto &pkw = pair.first;
primaryKeywords.emplace_back(pkw.asString());
auto subIter = pair.second.find("sub_keywords");
if (subIter == pair.second.items().end()) {
continue;
}
if (!subIter->second.isArray()) {
fprintf(stderr, "sub-keywords for `%s' should be an array\n",
pkw.asString().c_str());
continue;
}
for (auto &subKey : subIter->second) {
if (!subKey.isString()) {
fprintf(stderr, "keyword name should be of type string\n");
break;
}
subKeywords[pkw.asString()].emplace_back(subKey.asString());
}
}
iter = json.find("typenames");
if (iter == json.items().end()) {
fprintf(stderr, "completions: no `typenames' found\n");
return Status::OK();
}
for (auto &tname : iter->second) {
typeNames.emplace_back(tname.asString());
}
return Status::OK();
}
void CliManager::initAutoCompletion() {
// The completion function
::rl_attempted_completion_function = completer;
// Characters that indicates begin or end of a quote
::rl_completer_quote_characters = "\"";
// Allow conditional parsing of the ~/.inputrc file
::rl_readline_name = "nebula-graph";
auto status = loadCompletions();
if (!status.ok()) {
fprintf(stderr, "%s\n", status.toString().c_str());
}
}
} // namespace graph
} // namespace nebula
| 1 | 28,362 | You can replace IP by hostname. | vesoft-inc-nebula | cpp |
@@ -123,13 +123,14 @@ def getDriversForConnectedUsbDevices():
for port in deviceInfoFetcher.comPorts if "usbID" in port)
)
for match in usbDevs:
- # check for the Braille HID protocol before any other device matching.
- if match.type == KEY_HID and match.deviceInfo.get('HIDUsagePage') == HID_USAGE_PAGE_BRAILLE:
- yield ("hid", match)
for driver, devs in _driverDevices.items():
for type, ids in devs.items():
if match.type==type and match.id in ids:
yield driver, match
+ # check for the Braille HID protocol after any other device matching.
+ if match.type == KEY_HID and match.deviceInfo.get('HIDUsagePage') == HID_USAGE_PAGE_BRAILLE:
+ yield ("hid", match)
+
def getDriversForPossibleBluetoothDevices():
"""Get any matching drivers for possible Bluetooth devices. | 1 | #bdDetect.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2013-2017 NV Access Limited
"""Support for braille display detection.
This allows devices to be automatically detected and used when they become available,
as well as providing utilities to query for possible devices for a particular driver.
To support detection for a driver, devices need to be associated
using the C{add*} functions.
Drivers distributed with NVDA do this at the bottom of this module.
For drivers in add-ons, this must be done in a global plugin.
"""
import itertools
from collections import namedtuple, defaultdict, OrderedDict
import threading
from typing import Iterable
import wx
import hwPortUtils
import braille
import winKernel
import winUser
import core
import ctypes
from logHandler import log
import config
import time
import appModuleHandler
from baseObject import AutoPropertyObject
import re
HID_USAGE_PAGE_BRAILLE = 0x41
DBT_DEVNODES_CHANGED=7
_driverDevices = OrderedDict()
USB_ID_REGEX = re.compile(r"^VID_[0-9A-F]{4}&PID_[0-9A-F]{4}$", re.U)
class DeviceMatch(
namedtuple("DeviceMatch", ("type","id", "port", "deviceInfo"))
):
"""Represents a detected device.
@ivar id: The identifier of the device.
@type id: str
@ivar port: The port that can be used by a driver to communicate with a device.
@type port: str
@ivar deviceInfo: all known information about a device.
@type deviceInfo: dict
"""
__slots__ = ()
# Device type constants
#: Key constant for HID devices
KEY_HID = "hid"
#: Key for serial devices (COM ports)
KEY_SERIAL = "serial"
#: Key for devices with a manufacturer specific driver
KEY_CUSTOM = "custom"
#: Key for bluetooth devices
KEY_BLUETOOTH = "bluetooth"
# Constants for USB and bluetooth detection to be used by the background thread scanner.
DETECT_USB = 1
DETECT_BLUETOOTH = 2
def _isDebug():
return config.conf["debugLog"]["hwIo"]
def _getDriver(driver):
try:
return _driverDevices[driver]
except KeyError:
ret = _driverDevices[driver] = defaultdict(set)
return ret
def addUsbDevices(driver, type, ids):
"""Associate USB devices with a driver.
@param driver: The name of the driver.
@type driver: str
@param type: The type of the driver, either C{KEY_HID}, C{KEY_SERIAL} or C{KEY_CUSTOM}.
@type type: str
@param ids: A set of USB IDs in the form C{"VID_xxxx&PID_XXXX"}.
Note that alphabetical characters in hexadecimal numbers should be uppercase.
@type ids: set of str
@raise ValueError: When one of the provided IDs is malformed.
"""
malformedIds = [id for id in ids if not isinstance(id, str) or not USB_ID_REGEX.match(id)]
if malformedIds:
raise ValueError("Invalid IDs provided for driver %s, type %s: %s"
% (driver, type, u", ".join(malformedIds)))
devs = _getDriver(driver)
driverUsb = devs[type]
driverUsb.update(ids)
def addBluetoothDevices(driver, matchFunc):
"""Associate Bluetooth HID or COM ports with a driver.
@param driver: The name of the driver.
@type driver: str
@param matchFunc: A function which determines whether a given Bluetooth device matches.
It takes a L{DeviceMatch} as its only argument
and returns a C{bool} indicating whether it matched.
@type matchFunc: callable
"""
devs = _getDriver(driver)
devs[KEY_BLUETOOTH] = matchFunc
def getDriversForConnectedUsbDevices():
"""Get any matching drivers for connected USB devices.
@return: Pairs of drivers and device information.
@rtype: generator of (str, L{DeviceMatch}) tuples
"""
usbDevs = itertools.chain(
(DeviceMatch(KEY_CUSTOM, port["usbID"], port["devicePath"], port)
for port in deviceInfoFetcher.usbDevices),
(DeviceMatch(KEY_HID, port["usbID"], port["devicePath"], port)
for port in deviceInfoFetcher.hidDevices if port["provider"]=="usb"),
(DeviceMatch(KEY_SERIAL, port["usbID"], port["port"], port)
for port in deviceInfoFetcher.comPorts if "usbID" in port)
)
for match in usbDevs:
# check for the Braille HID protocol before any other device matching.
if match.type == KEY_HID and match.deviceInfo.get('HIDUsagePage') == HID_USAGE_PAGE_BRAILLE:
yield ("hid", match)
for driver, devs in _driverDevices.items():
for type, ids in devs.items():
if match.type==type and match.id in ids:
yield driver, match
def getDriversForPossibleBluetoothDevices():
"""Get any matching drivers for possible Bluetooth devices.
@return: Pairs of drivers and port information.
@rtype: generator of (str, L{DeviceMatch}) tuples
"""
btDevs = itertools.chain(
(DeviceMatch(KEY_SERIAL, port["bluetoothName"], port["port"], port)
for port in deviceInfoFetcher.comPorts
if "bluetoothName" in port),
(DeviceMatch(KEY_HID, port["hardwareID"], port["devicePath"], port)
for port in deviceInfoFetcher.hidDevices if port["provider"]=="bluetooth"),
)
for match in btDevs:
# check for the Braille HID protocol before any other device matching.
if match.type == KEY_HID and match.deviceInfo.get('HIDUsagePage') == HID_USAGE_PAGE_BRAILLE:
yield ("hid", match)
for driver, devs in _driverDevices.items():
matchFunc = devs[KEY_BLUETOOTH]
if not callable(matchFunc):
continue
if matchFunc(match):
yield driver, match
class _DeviceInfoFetcher(AutoPropertyObject):
"""Utility class that caches fetched info for available devices for the duration of one core pump cycle."""
cachePropertiesByDefault = True
def _get_comPorts(self):
return list(hwPortUtils.listComPorts(onlyAvailable=True))
def _get_usbDevices(self):
return list(hwPortUtils.listUsbDevices(onlyAvailable=True))
def _get_hidDevices(self):
return list(hwPortUtils.listHidDevices(onlyAvailable=True))
#: The single instance of the device info fetcher.
#: @type: L{_DeviceInfoFetcher}
deviceInfoFetcher = _DeviceInfoFetcher()
class Detector(object):
"""Detector class used to automatically detect braille displays.
This should only be used by the L{braille} module.
"""
def __init__(self, usb=True, bluetooth=True, limitToDevices=None):
"""Constructor.
The keyword arguments initialize the detector in a particular state.
On an initialized instance, these initial arguments can be overridden by calling L{_startBgScan} or L{rescan}.
@param usb: Whether this instance should detect USB devices initially.
@type usb: bool
@param bluetooth: Whether this instance should detect Bluetooth devices initially.
@type bluetooth: bool
@param limitToDevices: Drivers to which detection should be limited initially.
C{None} if no driver filtering should occur.
"""
self._BgScanApc = winKernel.PAPCFUNC(self._bgScan)
self._btDevsLock = threading.Lock()
self._btDevs = None
core.post_windowMessageReceipt.register(self.handleWindowMessage)
appModuleHandler.post_appSwitch.register(self.pollBluetoothDevices)
self._stopEvent = threading.Event()
self._queuedScanLock = threading.Lock()
self._scanQueued = False
self._detectUsb = usb
self._detectBluetooth = bluetooth
self._limitToDevices = limitToDevices
self._runningApcLock = threading.Lock()
# Perform initial scan.
self._startBgScan(usb=usb, bluetooth=bluetooth, limitToDevices=limitToDevices)
@property
def _scanQueuedSafe(self):
"""Returns L{_scanQueued} in a thread safe way by using L{_queuedScanLock}."""
with self._queuedScanLock:
return self._scanQueued
@_scanQueuedSafe.setter
def _scanQueuedSafe(self, state):
"""Sets L{_scanQueued} in a thread safe way by using L{_queuedScanLock}."""
with self._queuedScanLock:
self._scanQueued = state
def _startBgScan(self, usb=False, bluetooth=False, limitToDevices=None):
"""Starts a scan for devices.
If a scan is already in progress, a new scan will be queued after the current scan.
To explicitely cancel a scan in progress, use L{rescan}.
@param usb: Whether USB devices should be detected for this and subsequent scans.
@type usb: bool
@param bluetooth: Whether Bluetooth devices should be detected for this and subsequent scans.
@type bluetooth: bool
@param limitToDevices: Drivers to which detection should be limited for this and subsequent scans.
C{None} if no driver filtering should occur.
"""
with self._queuedScanLock:
self._detectUsb = usb
self._detectBluetooth = bluetooth
self._limitToDevices = limitToDevices
if not self._scanQueued:
self._scanQueued = True
if self._runningApcLock.locked():
# There's currently a scan in progress.
# Since the scan is embeded in a loop, it will automatically do another scan,
# unless a display has been found.
return
braille._BgThread.queueApc(self._BgScanApc)
def _stopBgScan(self):
"""Stops the current scan as soon as possible and prevents a queued scan to start."""
if not self._runningApcLock.locked():
# No scan to stop
return
self._stopEvent.set()
self._scanQueuedSafe = False
def _bgScan(self, param):
if self._runningApcLock.locked():
log.debugWarning("Braille display detection background scan APC executed while one is already running")
return
with self._runningApcLock:
while self._scanQueuedSafe:
# Clear the stop event before a scan is started.
# Since a scan can take some time to complete, another thread can set the stop event to cancel it.
self._stopEvent.clear()
with self._queuedScanLock:
self._scanQueued = False
detectUsb = self._detectUsb
detectBluetooth = self._detectBluetooth
limitToDevices = self._limitToDevices
if detectUsb:
if self._stopEvent.isSet():
continue
for driver, match in getDriversForConnectedUsbDevices():
if self._stopEvent.isSet() or (self._limitToDevices and driver not in self._limitToDevices):
continue
if braille.handler.setDisplayByName(driver, detected=match):
return
if detectBluetooth:
if self._stopEvent.isSet():
continue
with self._btDevsLock:
if self._btDevs is None:
btDevs = list(getDriversForPossibleBluetoothDevices())
# Cache Bluetooth devices for next time.
btDevsCache = []
else:
btDevs = self._btDevs
btDevsCache = btDevs
for driver, match in btDevs:
if self._stopEvent.isSet() or (self._limitToDevices and driver not in self._limitToDevices):
continue
if btDevsCache is not btDevs:
btDevsCache.append((driver, match))
if braille.handler.setDisplayByName(driver, detected=match):
return
if self._stopEvent.isSet():
continue
if btDevsCache is not btDevs:
with self._btDevsLock:
self._btDevs = btDevsCache
def rescan(self, usb=True, bluetooth=True, limitToDevices=None):
"""Stop a current scan when in progress, and start scanning from scratch.
@param usb: Whether USB devices should be detected for this and subsequent scans.
@type usb: bool
@param bluetooth: Whether Bluetooth devices should be detected for this and subsequent scans.
@type bluetooth: bool
@param limitToDevices: Drivers to which detection should be limited for this and subsequent scans.
C{None} if no driver filtering should occur.
"""
self._stopBgScan()
with self._btDevsLock:
# A Bluetooth com port or HID device might have been added.
self._btDevs = None
self._startBgScan(usb=usb, bluetooth=bluetooth, limitToDevices=limitToDevices)
def handleWindowMessage(self, msg=None, wParam=None):
if msg == winUser.WM_DEVICECHANGE and wParam == DBT_DEVNODES_CHANGED:
self.rescan(bluetooth=self._detectBluetooth, limitToDevices=self._limitToDevices)
def pollBluetoothDevices(self):
"""Poll bluetooth devices that might be in range.
This does not cancel the current scan."""
if not self._detectBluetooth:
# Do not poll bluetooth devices at all when bluetooth is disabled.
return
with self._btDevsLock:
if not self._btDevs:
return
self._startBgScan(bluetooth=self._detectBluetooth, limitToDevices=self._limitToDevices)
def terminate(self):
appModuleHandler.post_appSwitch.unregister(self.pollBluetoothDevices)
core.post_windowMessageReceipt.unregister(self.handleWindowMessage)
self._stopBgScan()
def getConnectedUsbDevicesForDriver(driver) -> Iterable[DeviceMatch]:
"""Get any connected USB devices associated with a particular driver.
@param driver: The name of the driver.
@type driver: str
@return: Device information for each device.
@raise LookupError: If there is no detection data for this driver.
"""
usbDevs = itertools.chain(
(DeviceMatch(KEY_CUSTOM, port["usbID"], port["devicePath"], port)
for port in deviceInfoFetcher.usbDevices),
(DeviceMatch(KEY_HID, port["usbID"], port["devicePath"], port)
for port in deviceInfoFetcher.hidDevices if port["provider"]=="usb"),
(DeviceMatch(KEY_SERIAL, port["usbID"], port["port"], port)
for port in deviceInfoFetcher.comPorts if "usbID" in port)
)
for match in usbDevs:
# check for the Braille HID protocol before any other device matching.
if driver == "hid":
if match.type == KEY_HID and match.deviceInfo.get('HIDUsagePage') == HID_USAGE_PAGE_BRAILLE:
yield match
else:
devs = _driverDevices[driver]
for type, ids in devs.items():
if match.type == type and match.id in ids:
yield match
def getPossibleBluetoothDevicesForDriver(driver) -> Iterable[DeviceMatch]:
"""Get any possible Bluetooth devices associated with a particular driver.
@param driver: The name of the driver.
@type driver: str
@return: Port information for each port.
@raise LookupError: If there is no detection data for this driver.
"""
if driver == "hid":
# check for the Braille HID protocol before any other device matching.
def matchFunc(match):
return match.type == KEY_HID and match.deviceInfo.get('HIDUsagePage') == HID_USAGE_PAGE_BRAILLE
else:
matchFunc = _driverDevices[driver][KEY_BLUETOOTH]
if not callable(matchFunc):
return
btDevs = itertools.chain(
(DeviceMatch(KEY_SERIAL, port["bluetoothName"], port["port"], port)
for port in deviceInfoFetcher.comPorts
if "bluetoothName" in port),
(DeviceMatch(KEY_HID, port["hardwareID"], port["devicePath"], port)
for port in deviceInfoFetcher.hidDevices if port["provider"]=="bluetooth"),
)
for match in btDevs:
if matchFunc(match):
yield match
def driverHasPossibleDevices(driver):
"""Determine whether there are any possible devices associated with a given driver.
@param driver: The name of the driver.
@type driver: str
@return: C{True} if there are possible devices, C{False} otherwise.
@rtype: bool
@raise LookupError: If there is no detection data for this driver.
"""
return bool(next(itertools.chain(
getConnectedUsbDevicesForDriver(driver),
getPossibleBluetoothDevicesForDriver(driver)
), None))
def driverSupportsAutoDetection(driver):
"""Returns whether the provided driver supports automatic detection of displays.
@param driver: The name of the driver.
@type driver: str
@return: C{True} if de driver supports auto detection, C{False} otherwise.
@rtype: bool
"""
return driver in _driverDevices
### Detection data
# alva
addUsbDevices("alva", KEY_HID, {
"VID_0798&PID_0640", # BC640
"VID_0798&PID_0680", # BC680
"VID_0798&PID_0699", # USB protocol converter
})
addBluetoothDevices("alva", lambda m: m.id.startswith("ALVA "))
# baum
addUsbDevices("baum", KEY_HID, {
"VID_0904&PID_3001", # RefreshaBraille 18
"VID_0904&PID_6101", # VarioUltra 20
"VID_0904&PID_6103", # VarioUltra 32
"VID_0904&PID_6102", # VarioUltra 40
"VID_0904&PID_4004", # Pronto! 18 V3
"VID_0904&PID_4005", # Pronto! 40 V3
"VID_0904&PID_4007", # Pronto! 18 V4
"VID_0904&PID_4008", # Pronto! 40 V4
"VID_0904&PID_6001", # SuperVario2 40
"VID_0904&PID_6002", # SuperVario2 24
"VID_0904&PID_6003", # SuperVario2 32
"VID_0904&PID_6004", # SuperVario2 64
"VID_0904&PID_6005", # SuperVario2 80
"VID_0904&PID_6006", # Brailliant2 40
"VID_0904&PID_6007", # Brailliant2 24
"VID_0904&PID_6008", # Brailliant2 32
"VID_0904&PID_6009", # Brailliant2 64
"VID_0904&PID_600A", # Brailliant2 80
"VID_0904&PID_6201", # Vario 340
"VID_0483&PID_A1D3", # Orbit Reader 20
"VID_0904&PID_6301", # Vario 4
})
addUsbDevices("baum", KEY_SERIAL, {
"VID_0403&PID_FE70", # Vario 40
"VID_0403&PID_FE71", # PocketVario
"VID_0403&PID_FE72", # SuperVario/Brailliant 40
"VID_0403&PID_FE73", # SuperVario/Brailliant 32
"VID_0403&PID_FE74", # SuperVario/Brailliant 64
"VID_0403&PID_FE75", # SuperVario/Brailliant 80
"VID_0904&PID_2001", # EcoVario 24
"VID_0904&PID_2002", # EcoVario 40
"VID_0904&PID_2007", # VarioConnect/BrailleConnect 40
"VID_0904&PID_2008", # VarioConnect/BrailleConnect 32
"VID_0904&PID_2009", # VarioConnect/BrailleConnect 24
"VID_0904&PID_2010", # VarioConnect/BrailleConnect 64
"VID_0904&PID_2011", # VarioConnect/BrailleConnect 80
"VID_0904&PID_2014", # EcoVario 32
"VID_0904&PID_2015", # EcoVario 64
"VID_0904&PID_2016", # EcoVario 80
"VID_0904&PID_3000", # RefreshaBraille 18
})
addBluetoothDevices("baum", lambda m: any(m.id.startswith(prefix) for prefix in (
"Baum SuperVario",
"Baum PocketVario",
"Baum SVario",
"HWG Brailliant",
"Refreshabraille",
"VarioConnect",
"BrailleConnect",
"Pronto!",
"VarioUltra",
"Orbit Reader 20",
"Vario 4",
)))
# brailleNote
addUsbDevices("brailleNote", KEY_SERIAL, {
"VID_1C71&PID_C004", # Apex
})
addBluetoothDevices("brailleNote", lambda m:
any(first <= m.deviceInfo.get("bluetoothAddress",0) <= last for first, last in (
(0x0025EC000000, 0x0025EC01869F), # Apex
)) or m.id.startswith("Braillenote"))
# brailliantB
addUsbDevices("brailliantB", KEY_HID, {
"VID_1C71&PID_C111", # Mantis Q 40
"VID_1C71&PID_C101", # Chameleon 20
"VID_1C71&PID_C121", # Humanware BrailleOne 20 HID
"VID_1C71&PID_CE01", # NLS eReader 20 HID
"VID_1C71&PID_C006", # Brailliant BI 32, 40 and 80
"VID_1C71&PID_C022", # Brailliant BI 14
"VID_1C71&PID_C131", # Brailliant BI 40X
"VID_1C71&PID_C141", # Brailliant BI 20X
"VID_1C71&PID_C00A", # BrailleNote Touch
"VID_1C71&PID_C00E", # BrailleNote Touch v2
})
addUsbDevices("brailliantB", KEY_SERIAL, {
"VID_1C71&PID_C005", # Brailliant BI 32, 40 and 80
"VID_1C71&PID_C021", # Brailliant BI 14
})
addBluetoothDevices(
"brailliantB", lambda m: (
m.type == KEY_SERIAL
and (
m.id.startswith("Brailliant B")
or m.id == "Brailliant 80"
or "BrailleNote Touch" in m.id
)
)
or (
m.type == KEY_HID
and m.deviceInfo.get("manufacturer") == "Humanware"
and m.deviceInfo.get("product") in (
"Brailliant HID",
"APH Chameleon 20",
"APH Mantis Q40",
"Humanware BrailleOne",
"NLS eReader",
"NLS eReader Humanware",
"Brailliant BI 40X",
"Brailliant BI 20X",
)
)
)
# eurobraille
addUsbDevices("eurobraille", KEY_HID, {
"VID_C251&PID_1122", # Esys (version < 3.0, no SD card
"VID_C251&PID_1123", # Esys (version >= 3.0, with HID keyboard, no SD card
"VID_C251&PID_1124", # Esys (version < 3.0, with SD card
"VID_C251&PID_1125", # Esys (version >= 3.0, with HID keyboard, with SD card
"VID_C251&PID_1126", # Esys (version >= 3.0, no SD card
"VID_C251&PID_1127", # Reserved
"VID_C251&PID_1128", # Esys (version >= 3.0, with SD card
"VID_C251&PID_1129", # Reserved
"VID_C251&PID_112A", # Reserved
"VID_C251&PID_112B", # Reserved
"VID_C251&PID_112C", # Reserved
"VID_C251&PID_112D", # Reserved
"VID_C251&PID_112E", # Reserved
"VID_C251&PID_112F", # Reserved
"VID_C251&PID_1130", # Esytime
"VID_C251&PID_1131", # Reserved
"VID_C251&PID_1132", # Reserved
})
addBluetoothDevices("eurobraille", lambda m: m.id.startswith("Esys"))
# freedomScientific
addUsbDevices("freedomScientific", KEY_CUSTOM, {
"VID_0F4E&PID_0100", # Focus 1
"VID_0F4E&PID_0111", # PAC Mate
"VID_0F4E&PID_0112", # Focus 2
"VID_0F4E&PID_0114", # Focus Blue
})
addBluetoothDevices("freedomScientific", lambda m: any(m.id.startswith(prefix) for prefix in (
"F14", "Focus 14 BT",
"Focus 40 BT",
"Focus 80 BT",
)))
# handyTech
addUsbDevices("handyTech", KEY_SERIAL, {
"VID_0403&PID_6001", # FTDI chip
"VID_0921&PID_1200", # GoHubs chip
})
# Newer Handy Tech displays have a native HID processor
addUsbDevices("handyTech", KEY_HID, {
"VID_1FE4&PID_0054", # Active Braille
"VID_1FE4&PID_0055", # Connect Braille
"VID_1FE4&PID_0061", # Actilino
"VID_1FE4&PID_0064", # Active Star 40
"VID_1FE4&PID_0081", # Basic Braille 16
"VID_1FE4&PID_0082", # Basic Braille 20
"VID_1FE4&PID_0083", # Basic Braille 32
"VID_1FE4&PID_0084", # Basic Braille 40
"VID_1FE4&PID_008A", # Basic Braille 48
"VID_1FE4&PID_0086", # Basic Braille 64
"VID_1FE4&PID_0087", # Basic Braille 80
"VID_1FE4&PID_008B", # Basic Braille 160
"VID_1FE4&PID_008C", # Basic Braille 84
"VID_1FE4&PID_0093", # Basic Braille Plus 32
"VID_1FE4&PID_0094", # Basic Braille Plus 40
})
# Some older HT displays use a HID converter and an internal serial interface
addUsbDevices("handyTech", KEY_HID, {
"VID_1FE4&PID_0003", # USB-HID adapter
"VID_1FE4&PID_0074", # Braille Star 40
"VID_1FE4&PID_0044", # Easy Braille
})
addBluetoothDevices("handyTech", lambda m: any(m.id.startswith(prefix) for prefix in (
"Actilino AL",
"Active Braille AB",
"Active Star AS",
"Basic Braille BB",
"Basic Braille Plus BP",
"Braille Star 40 BS",
"Braillino BL",
"Braille Wave BW",
"Easy Braille EBR",
)))
# hims
# Bulk devices
addUsbDevices("hims", KEY_CUSTOM, {
"VID_045E&PID_930A", # Braille Sense & Smart Beetle
"VID_045E&PID_930B", # Braille EDGE 40
})
# Sync Braille, serial device
addUsbDevices("hims", KEY_SERIAL, {
"VID_0403&PID_6001",
})
addBluetoothDevices("hims", lambda m: any(m.id.startswith(prefix) for prefix in (
"BrailleSense",
"BrailleEDGE",
"SmartBeetle",
)))
# NattiqBraille
addUsbDevices("nattiqbraille", KEY_SERIAL, {
"VID_2341&PID_8036", # Atmel-based USB Serial for Nattiq nBraille
})
# superBrl
addUsbDevices("superBrl", KEY_SERIAL, {
"VID_10C4&PID_EA60", # SuperBraille 3.2
})
| 1 | 34,656 | This should say "why" NVDA should do things in this order. | nvaccess-nvda | py |
@@ -114,7 +114,7 @@ func TestPopulateLocationConstraint(t *testing.T) {
func TestNoPopulateLocationConstraintIfProvided(t *testing.T) {
s := s3.New(unit.Session)
req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
- Bucket: aws.String("bucket"),
+ Bucket: aws.String("bucket"),
CreateBucketConfiguration: &s3.CreateBucketConfiguration{},
})
if err := req.Build(); err != nil { | 1 | package s3_test
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/service/s3"
)
var s3LocationTests = []struct {
body string
loc string
}{
{`<?xml version="1.0" encoding="UTF-8"?><LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`, ``},
{`<?xml version="1.0" encoding="UTF-8"?><LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">EU</LocationConstraint>`, `EU`},
}
func TestGetBucketLocation(t *testing.T) {
for _, test := range s3LocationTests {
s := s3.New(unit.Session)
s.Handlers.Send.Clear()
s.Handlers.Send.PushBack(func(r *request.Request) {
reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body)))
r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader}
})
resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")})
if err != nil {
t.Errorf("expected no error, but received %v", err)
}
if test.loc == "" {
if v := resp.LocationConstraint; v != nil {
t.Errorf("expect location constraint to be nil, got %s", *v)
}
} else {
if e, a := test.loc, *resp.LocationConstraint; e != a {
t.Errorf("expect %s location constraint, got %v", e, a)
}
}
}
}
func TestNormalizeBucketLocation(t *testing.T) {
cases := []struct {
In, Out string
}{
{"", "us-east-1"},
{"EU", "eu-west-1"},
{"us-east-1", "us-east-1"},
{"something", "something"},
}
for i, c := range cases {
actual := s3.NormalizeBucketLocation(c.In)
if e, a := c.Out, actual; e != a {
t.Errorf("%d, expect %s bucket location, got %s", i, e, a)
}
}
}
func TestWithNormalizeBucketLocation(t *testing.T) {
req := &request.Request{}
req.ApplyOptions(s3.WithNormalizeBucketLocation)
cases := []struct {
In, Out string
}{
{"", "us-east-1"},
{"EU", "eu-west-1"},
{"us-east-1", "us-east-1"},
{"something", "something"},
}
for i, c := range cases {
req.Data = &s3.GetBucketLocationOutput{
LocationConstraint: aws.String(c.In),
}
req.Handlers.Unmarshal.Run(req)
v := req.Data.(*s3.GetBucketLocationOutput).LocationConstraint
if e, a := c.Out, aws.StringValue(v); e != a {
t.Errorf("%d, expect %s bucket location, got %s", i, e, a)
}
}
}
func TestPopulateLocationConstraint(t *testing.T) {
s := s3.New(unit.Session)
in := &s3.CreateBucketInput{
Bucket: aws.String("bucket"),
}
req, _ := s.CreateBucketRequest(in)
if err := req.Build(); err != nil {
t.Fatalf("expect no error, got %v", err)
}
v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")
if e, a := "mock-region", *(v[0].(*string)); e != a {
t.Errorf("expect %s location constraint, got %s", e, a)
}
if v := in.CreateBucketConfiguration; v != nil {
// don't modify original params
t.Errorf("expect create bucket Configuration to be nil, got %s", *v)
}
}
func TestNoPopulateLocationConstraintIfProvided(t *testing.T) {
s := s3.New(unit.Session)
req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
Bucket: aws.String("bucket"),
CreateBucketConfiguration: &s3.CreateBucketConfiguration{},
})
if err := req.Build(); err != nil {
t.Fatalf("expect no error, got %v", err)
}
v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")
if l := len(v); l != 0 {
t.Errorf("expect no values, got %d", l)
}
}
func TestNoPopulateLocationConstraintIfClassic(t *testing.T) {
s := s3.New(unit.Session, &aws.Config{Region: aws.String("us-east-1")})
req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{
Bucket: aws.String("bucket"),
})
if err := req.Build(); err != nil {
t.Fatalf("expect no error, got %v", err)
}
v, _ := awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")
if l := len(v); l != 0 {
t.Errorf("expect no values, got %d", l)
}
}
| 1 | 9,370 | nit these will get changed back during next release. | aws-aws-sdk-go | go |
@@ -0,0 +1,8 @@
+// Copyright 2021 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package api
+
+// Version is set in the build process.
+var Version string | 1 | 1 | 15,911 | it is actually not needed to make the member public. the build tags can also set a package scoped variable. | ethersphere-bee | go |
|
@@ -38,6 +38,7 @@ trait ContentExtrasTrait
'excerpt' => $this->contentExtension->getExcerpt($content),
'link' => $this->contentExtension->getLink($content),
'editLink' => $this->contentExtension->getEditLink($content),
+ 'icon' => $this->contentExtension->getIcon($content),
];
}
} | 1 | <?php
declare(strict_types=1);
namespace Bolt\Entity;
use Bolt\Twig\ContentExtension;
use Symfony\Component\Serializer\Annotation\Groups;
/**
* @see \Bolt\Entity\Content
*/
trait ContentExtrasTrait
{
/**
* @var ContentExtension
*/
private $contentExtension;
public function setContentExtension(ContentExtension $contentExtension): void
{
$this->contentExtension = $contentExtension;
}
/**
* @internal This should not be used outside of API. Use ContentExtension or Twig filters instead.
*
* @Groups("get_content")
*/
public function getExtras(): array
{
/** @var Content $content */
$content = $this;
return [
'title' => $this->contentExtension->getTitle($content),
'image' => $this->contentExtension->getImage($content),
'excerpt' => $this->contentExtension->getExcerpt($content),
'link' => $this->contentExtension->getLink($content),
'editLink' => $this->contentExtension->getEditLink($content),
];
}
}
| 1 | 11,751 | We already have a method for it 'icon' => $content->getIcon() | bolt-core | php |
@@ -9,6 +9,7 @@ import (
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
+ "golang.org/x/net/context"
)
// WrapEntry is builder that return a proper key decrypter and error | 1 | package s3crypto
import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// WrapEntry is builder that return a proper key decrypter and error
type WrapEntry func(Envelope) (CipherDataDecrypter, error)
// CEKEntry is a builder thatn returns a proper content decrypter and error
type CEKEntry func(CipherData) (ContentCipher, error)
// DecryptionClient is an S3 crypto client. The decryption client
// will handle all get object requests from Amazon S3.
// Supported key wrapping algorithms:
// *AWS KMS
//
// Supported content ciphers:
// * AES/GCM
// * AES/CBC
type DecryptionClient struct {
S3Client s3iface.S3API
// LoadStrategy is used to load the metadata either from the metadata of the object
// or from a separate file in s3.
//
// Defaults to our default load strategy.
LoadStrategy LoadStrategy
WrapRegistry map[string]WrapEntry
CEKRegistry map[string]CEKEntry
PadderRegistry map[string]Padder
}
// NewDecryptionClient instantiates a new S3 crypto client
//
// Example:
// sess := session.New()
// svc := s3crypto.NewDecryptionClient(sess, func(svc *s3crypto.DecryptionClient{
// // Custom client options here
// }))
func NewDecryptionClient(prov client.ConfigProvider, options ...func(*DecryptionClient)) *DecryptionClient {
s3client := s3.New(prov)
client := &DecryptionClient{
S3Client: s3client,
LoadStrategy: defaultV2LoadStrategy{
client: s3client,
},
WrapRegistry: map[string]WrapEntry{
KMSWrap: (kmsKeyHandler{
kms: kms.New(prov),
}).decryptHandler,
},
CEKRegistry: map[string]CEKEntry{
AESGCMNoPadding: newAESGCMContentCipher,
strings.Join([]string{AESCBC, AESCBCPadder.Name()}, "/"): newAESCBCContentCipher,
},
PadderRegistry: map[string]Padder{
strings.Join([]string{AESCBC, AESCBCPadder.Name()}, "/"): AESCBCPadder,
"NoPadding": NoPadder,
},
}
for _, option := range options {
option(client)
}
return client
}
// GetObjectRequest will make a request to s3 and retrieve the object. In this process
// decryption will be done. The SDK only supports V2 reads of KMS and GCM.
//
// Example:
// sess := session.New()
// svc := s3crypto.NewDecryptionClient(sess)
// req, out := svc.GetObjectRequest(&s3.GetObjectInput {
// Key: aws.String("testKey"),
// Bucket: aws.String("testBucket"),
// })
// err := req.Send()
func (c *DecryptionClient) GetObjectRequest(input *s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) {
req, out := c.S3Client.GetObjectRequest(input)
req.Handlers.Unmarshal.PushBack(func(r *request.Request) {
env, err := c.LoadStrategy.Load(r)
if err != nil {
r.Error = err
out.Body.Close()
return
}
// If KMS should return the correct CEK algorithm with the proper
// KMS key provider
cipher, err := c.contentCipherFromEnvelope(env)
if err != nil {
r.Error = err
out.Body.Close()
return
}
reader, err := cipher.DecryptContents(out.Body)
if err != nil {
r.Error = err
out.Body.Close()
return
}
out.Body = reader
})
return req, out
}
// GetObject is a wrapper for GetObjectRequest
func (c *DecryptionClient) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
req, out := c.GetObjectRequest(input)
return out, req.Send()
}
// GetObjectWithContext is a wrapper for GetObjectRequest with the additional
// context, and request options support.
//
// GetObjectWithContext is the same as GetObject with the additional support for
// Context input parameters. The Context must not be nil. A nil Context will
// cause a panic. Use the Context to add deadlining, timeouts, etc. In the future
// this may create sub-contexts for individual underlying requests.
func (c *DecryptionClient) GetObjectWithContext(ctx aws.Context, input *s3.GetObjectInput, opts ...request.Option) (*s3.GetObjectOutput, error) {
req, out := c.GetObjectRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
| 1 | 9,855 | Instead of importing `golang.org/x/net/context` The SDK should use `aws.BackgroundContext()` instead of `context.Background()` | aws-aws-sdk-go | go |
@@ -38,7 +38,7 @@ class MediaController extends Controller
*/
public function getMedia($id)
{
- return $this->get('sonata.media.manager.media')->findOneBy(array('id' => $id));
+ return $this->get('sonata.media.manager.media')->find($id);
}
/** | 1 | <?php
/*
* This file is part of the Sonata package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Controller;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\HttpFoundation\BinaryFileResponse;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Provider\MediaProviderInterface;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\Security\Core\Exception\AccessDeniedException;
class MediaController extends Controller
{
/**
* @param MediaInterface $media
*
* @return MediaProviderInterface
*/
public function getProvider(MediaInterface $media)
{
return $this->get('sonata.media.pool')->getProvider($media->getProviderName());
}
/**
* @param string $id
*
* @return MediaInterface
*/
public function getMedia($id)
{
return $this->get('sonata.media.manager.media')->findOneBy(array('id' => $id));
}
/**
* @throws NotFoundHttpException
*
* @param string $id
* @param string $format
*
* @return Response
*/
public function downloadAction($id, $format = 'reference')
{
$media = $this->getMedia($id);
if (!$media) {
throw new NotFoundHttpException(sprintf('unable to find the media with the id : %s', $id));
}
if (!$this->get('sonata.media.pool')->getDownloadSecurity($media)->isGranted($media, $this->getRequest())) {
throw new AccessDeniedException();
}
$response = $this->getProvider($media)->getDownloadResponse($media, $format, $this->get('sonata.media.pool')->getDownloadMode($media));
if ($response instanceof BinaryFileResponse) {
$response->prepare($this->get('request'));
}
return $response;
}
/**
* @throws NotFoundHttpException
*
* @param string $id
* @param string $format
*
* @return Response
*/
public function viewAction($id, $format = 'reference')
{
$media = $this->getMedia($id);
if (!$media) {
throw new NotFoundHttpException(sprintf('unable to find the media with the id : %s', $id));
}
if (!$this->get('sonata.media.pool')->getDownloadSecurity($media)->isGranted($media, $this->getRequest())) {
throw new AccessDeniedException();
}
return $this->render('SonataMediaBundle:Media:view.html.twig', array(
'media' => $media,
'formats' => $this->get('sonata.media.pool')->getFormatNamesByContext($media->getContext()),
'format' => $format
));
}
/**
* This action applies a given filter to a given image,
* optionally saves the image and
* outputs it to the browser at the same time
*
* @param string $path
* @param string $filter
*
* @return Response
*/
public function liipImagineFilterAction($path, $filter)
{
if (!preg_match('@([^/]*)/(.*)/([0-9]*)_([a-z_A-Z]*).jpg@', $path, $matches)) {
throw new NotFoundHttpException();
}
$targetPath = $this->get('liip_imagine.cache.manager')->resolve($this->get('request'), $path, $filter);
if ($targetPath instanceof Response) {
return $targetPath;
}
// get the file
$media = $this->getMedia($matches[3]);
if (!$media) {
throw new NotFoundHttpException();
}
$provider = $this->getProvider($media);
$file = $provider->getReferenceFile($media);
// load the file content from the abstracted file system
$tmpFile = sprintf('%s.%s', tempnam(sys_get_temp_dir(), 'sonata_media_liip_imagine'), $media->getExtension());
file_put_contents($tmpFile, $file->getContent());
$image = $this->get('liip_imagine')->open($tmpFile);
$response = $this->get('liip_imagine.filter.manager')->get($this->get('request'), $filter, $image, $path);
if ($targetPath) {
$response = $this->get('liip_imagine.cache.manager')->store($response, $targetPath, $filter);
}
return $response;
}
}
| 1 | 6,381 | I think this is an agnostic change for the ORM right? Why was it doing this? - this does not work on PHPCR-ODM as the `id` is not a field - although I guess it could be mapped as such. | sonata-project-SonataMediaBundle | php |
@@ -73,7 +73,7 @@ class ApplicationController < ActionController::Base
helper_method :included_in_current_users_plan?
def topics
- Topic.top
+ Topic.all
end
helper_method :topics
| 1 | class ApplicationController < ActionController::Base
include Clearance::Controller
helper :all
protect_from_forgery with: :exception
before_filter :capture_campaign_params
protected
def must_be_admin
unless current_user_is_admin?
flash[:error] = 'You do not have permission to view that page.'
redirect_to root_url
end
end
def must_be_team_owner
require_login
if signed_in?
if current_team.blank? || !current_team.owner?(current_user)
deny_access("You must be the owner of the team.")
end
end
end
def must_be_subscription_owner
unless current_user_is_subscription_owner?
deny_access("You must be the owner of the subscription.")
end
end
def current_user_is_subscription_owner?
current_user_has_active_subscription? &&
current_user.subscription.owner?(current_user)
end
helper_method :current_user_is_subscription_owner?
def current_user_has_active_subscription?
current_user && current_user.has_active_subscription?
end
helper_method :current_user_has_active_subscription?
def current_user_is_eligible_for_annual_upgrade?
current_user_has_active_subscription? &&
current_user.eligible_for_annual_upgrade?
end
helper_method :current_user_is_eligible_for_annual_upgrade?
def current_user_has_access_to?(feature)
current_user && current_user.has_access_to?(feature)
end
helper_method :current_user_has_access_to?
def subscription_includes_mentor?
current_user.has_subscription_with_mentor?
end
helper_method :subscription_includes_mentor?
def current_user_is_admin?
current_user && (current_user.admin? || masquerading?)
end
def masquerading?
session[:admin_id].present?
end
helper_method :masquerading?
def included_in_current_users_plan?(licenseable)
licenseable.included_in_plan?(current_user.plan)
end
helper_method :included_in_current_users_plan?
def topics
Topic.top
end
helper_method :topics
def current_team
current_user.team
end
helper_method :current_team
def capture_campaign_params
session[:campaign_params] ||= {
utm_campaign: params[:utm_campaign],
utm_medium: params[:utm_medium],
utm_source: params[:utm_source],
}
end
end
| 1 | 14,216 | I'm not 100% sure, but I think this should be `explorable`. If not, I think `explorable` can be removed entirely. | thoughtbot-upcase | rb |
@@ -124,15 +124,13 @@ class MockTarget(target.FileSystemTarget):
self.wrapper = wrapper
def write(self, data):
- if six.PY3:
- stderrbytes = sys.stderr.buffer
- else:
- stderrbytes = sys.stderr
-
if mock_target._mirror_on_stderr:
if self._write_line:
sys.stderr.write(fn + ": ")
- stderrbytes.write(data)
+ if six.binary_type:
+ sys.stderr.write(data.decode('utf8'))
+ else:
+ sys.stderr.write(data)
if (data[-1]) == '\n':
self._write_line = True
else: | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provides a class :class:`MockTarget`, an implementation of :py:class:`~luigi.target.Target`.
:class:`MockTarget` contains all data in-memory.
The main purpose is unit testing workflows without writing to disk.
"""
import multiprocessing
from io import BytesIO
import sys
import warnings
from luigi import six
from luigi import target
from luigi.format import get_default_format, MixedUnicodeBytes
class MockFileSystem(target.FileSystem):
"""
MockFileSystem inspects/modifies _data to simulate file system operations.
"""
_data = None
def get_all_data(self):
# This starts a server in the background, so we don't want to do it in the global scope
if MockFileSystem._data is None:
MockFileSystem._data = multiprocessing.Manager().dict()
return MockFileSystem._data
def get_data(self, fn):
return self.get_all_data()[fn]
def exists(self, path):
return MockTarget(path).exists()
def remove(self, path, recursive=True, skip_trash=True):
"""
Removes the given mockfile. skip_trash doesn't have any meaning.
"""
if recursive:
to_delete = []
for s in self.get_all_data().keys():
if s.startswith(path):
to_delete.append(s)
for s in to_delete:
self.get_all_data().pop(s)
else:
self.get_all_data().pop(path)
def listdir(self, path):
"""
listdir does a prefix match of self.get_all_data(), but doesn't yet support globs.
"""
return [s for s in self.get_all_data().keys()
if s.startswith(path)]
def isdir(self, path):
return any(self.listdir(path))
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
mkdir is a noop.
"""
pass
def clear(self):
self.get_all_data().clear()
class MockTarget(target.FileSystemTarget):
fs = MockFileSystem()
def __init__(self, fn, is_tmp=None, mirror_on_stderr=False, format=None):
self._mirror_on_stderr = mirror_on_stderr
self._fn = fn
if format is None:
format = get_default_format()
# Allow to write unicode in file for retrocompatibility
if six.PY2:
format = format >> MixedUnicodeBytes
self.format = format
def exists(self,):
return self._fn in self.fs.get_all_data()
def rename(self, path, raise_if_exists=False):
if raise_if_exists and path in self.fs.get_all_data():
raise RuntimeError('Destination exists: %s' % path)
contents = self.fs.get_all_data().pop(self._fn)
self.fs.get_all_data()[path] = contents
@property
def path(self):
return self._fn
def open(self, mode):
fn = self._fn
mock_target = self
class Buffer(BytesIO):
# Just to be able to do writing + reading from the same buffer
_write_line = True
def set_wrapper(self, wrapper):
self.wrapper = wrapper
def write(self, data):
if six.PY3:
stderrbytes = sys.stderr.buffer
else:
stderrbytes = sys.stderr
if mock_target._mirror_on_stderr:
if self._write_line:
sys.stderr.write(fn + ": ")
stderrbytes.write(data)
if (data[-1]) == '\n':
self._write_line = True
else:
self._write_line = False
super(Buffer, self).write(data)
def close(self):
if mode == 'w':
try:
mock_target.wrapper.flush()
except AttributeError:
pass
mock_target.fs.get_all_data()[fn] = self.getvalue()
super(Buffer, self).close()
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.close()
def __enter__(self):
return self
def readable(self):
return mode == 'r'
def writeable(self):
return mode == 'w'
def seekable(self):
return False
if mode == 'w':
wrapper = self.format.pipe_writer(Buffer())
wrapper.set_wrapper(wrapper)
return wrapper
else:
return self.format.pipe_reader(Buffer(self.fs.get_all_data()[fn]))
class MockFile(MockTarget):
def __init__(self, *args, **kwargs):
warnings.warn("MockFile has been renamed MockTarget", DeprecationWarning, stacklevel=2)
super(MockFile, self).__init__(*args, **kwargs)
| 1 | 14,809 | Note to self: Why was this not originally not using `stderrbytes`? | spotify-luigi | py |
@@ -364,6 +364,14 @@ bool FileUtils::exist(const std::string& path) {
return access(path.c_str(), F_OK) == 0;
}
+// static
+bool FileUtils::rename(const std::string& src, const std::string& dst) {
+ auto status = ::rename(src.c_str(), dst.c_str());
+ LOG_IF(WARNING, status < 0) << "Rename " << src << " to " << dst << " failed, the errno: "
+ << ::strerror(errno);
+ return status == 0;
+}
+
std::vector<std::string> FileUtils::listAllTypedEntitiesInDir(
const char* dirpath,
FileType type, | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "fs/FileUtils.h"
#include <dirent.h>
#include <fnmatch.h>
#include <limits.h>
#include <stdlib.h>
namespace nebula {
namespace fs {
static const int32_t kMaxPathLen = 1024;
namespace detail {
bool removeDir(const char* path, bool recursively) {
// Assuming the path is a directory
DIR *dh = opendir(path);
if (!dh) {
LOG(ERROR) << "Failed to read the directory \"" << path
<< "\" (" << errno << "): " << strerror(errno);
return false;
}
bool succeeded = true;
struct dirent *dEnt;
errno = 0;
while (succeeded && !!(dEnt = readdir(dh))) {
if (!strcmp(dEnt->d_name, ".") || !strcmp(dEnt->d_name, "..")) {
// Skip "." and ".."
continue;
}
if (FileUtils::isDir(dEnt, path) && !recursively) {
LOG(ERROR) << "Cannot remove the directory \"" << path
<< "\" because it contains sub-directory \""
<< dEnt->d_name << "\"";
succeeded = false;
} else {
// Remove the directory entry, recursive call
succeeded = FileUtils::remove(
FileUtils::joinPath(path, dEnt->d_name).c_str(), recursively);
if (!succeeded) {
LOG(ERROR) << "Failed to remove \"" << dEnt->d_name
<< "\" in \"" << path
<< "\"";
} else {
VLOG(2) << "Succeeded removing \"" << dEnt->d_name << "\"";
}
}
}
if (succeeded && errno) {
// There is an error
LOG(ERROR) << "Failed to read the directory \"" << path
<< "\" (" << errno << "): " << strerror(errno);
succeeded = false;
}
if (closedir(dh)) {
// Failed to close the directory stream
LOG(ERROR) << "Failed to close the directory stream (" << errno
<< "): " << strerror(errno);
return false;
}
if (!succeeded) {
LOG(ERROR) << "Failed to remove the content of the directory \""
<< path << "\"";
return false;
}
// All content has been removed, now remove the directory itself
if (rmdir(path)) {
LOG(ERROR) << "Failed to remove the directory \"" << path
<< "\" (" << errno << "): " << strerror(errno);
return false;
}
return true;
}
} // namespace detail
StatusOr<std::string> FileUtils::readLink(const char *path) {
char buffer[kMaxPathLen];
auto len = ::readlink(path, buffer, kMaxPathLen);
if (len == -1) {
return Status::Error("readlink %s: %s", path, ::strerror(errno));
}
return std::string(buffer, len);
}
StatusOr<std::string> FileUtils::realPath(const char *path) {
char *buffer = ::realpath(path, NULL);
if (buffer == NULL) {
return Status::Error("realpath %s: %s", path, ::strerror(errno));
}
std::string truePath(buffer);
::free(buffer);
return truePath;
}
std::string FileUtils::dirname(const char *path) {
DCHECK(path != nullptr && *path != '\0');
if (::strcmp("/", path) == 0) { // root only
return "/";
}
static const std::regex pattern("(.*)/([^/]+)/?");
std::cmatch result;
if (std::regex_match(path, result, pattern)) {
if (result[1].first == result[1].second) { // "/path" or "/path/"
return "/";
}
return result[1].str(); // "/path/to", "path/to", or "path/to/"
}
return ".";
}
std::string FileUtils::basename(const char *path) {
DCHECK(path != nullptr && *path != '\0');
if (::strcmp("/", path) == 0) {
return "";
}
static const std::regex pattern("(/*([^/]+/+)*)([^/]+)/?");
std::cmatch result;
std::regex_match(path, result, pattern);
return result[3].str();
}
const char* FileUtils::getFileTypeName(FileType type) {
static const char* kTypeNames[] = {
"Unknown",
"NotExist",
"Regular",
"Directory",
"SoftLink",
"CharDevice",
"BlockDevice",
"FIFO",
"Socket"
};
return kTypeNames[static_cast<int>(type)];
}
size_t FileUtils::fileSize(const char* path) {
struct stat st;
if (lstat(path, &st)) {
// Failed o get file stat
VLOG(3) << "Failed to get information about \"" << path
<< "\" (" << errno << "): " << strerror(errno);
return 0;
}
return st.st_size;
}
FileType FileUtils::fileType(const char* path) {
struct stat st;
if (lstat(path, &st)) {
if (errno == ENOENT) {
VLOG(3) << "The path \"" << path << "\" does not exist";
return FileType::NOTEXIST;
} else {
// Failed o get file stat
VLOG(3) << "Failed to get information about \"" << path
<< "\" (" << errno << "): " << strerror(errno);
return FileType::UNKNOWN;
}
}
if (S_ISREG(st.st_mode)) {
return FileType::REGULAR;
} else if (S_ISDIR(st.st_mode)) {
return FileType::DIRECTORY;
} else if (S_ISLNK(st.st_mode)) {
return FileType::SYM_LINK;
} else if (S_ISCHR(st.st_mode)) {
return FileType::CHAR_DEV;
} else if (S_ISBLK(st.st_mode)) {
return FileType::BLOCK_DEV;
} else if (S_ISFIFO(st.st_mode)) {
return FileType::FIFO;
} else if (S_ISSOCK(st.st_mode)) {
return FileType::SOCKET;
}
return FileType::UNKNOWN;
}
int64_t FileUtils::fileLastUpdateTime(const char* path) {
struct stat st;
if (lstat(path, &st)) {
// Failed to get file stat
LOG(ERROR) << "Failed to get file information for \"" << path
<< "\" (" << errno << "): " << strerror(errno);
return -1;
}
return st.st_mtime;
}
bool FileUtils::isStdinTTY() {
return isFdTTY(::fileno(stdin));
}
bool FileUtils::isStdoutTTY() {
return isFdTTY(::fileno(stdout));
}
bool FileUtils::isStderrTTY() {
return isFdTTY(::fileno(stderr));
}
bool FileUtils::isFdTTY(int fd) {
return ::isatty(fd) == 1;
}
std::string FileUtils::joinPath(const folly::StringPiece dir,
const folly::StringPiece filename) {
std::string buf;
std::size_t len = dir.size();
if (len == 0) {
buf.resize(filename.size() + 2);
strcpy(&(buf[0]), "./"); // NOLINT
strncpy(&(buf[2]), filename.begin(), filename.size());
return buf;
}
if (dir[len-1] == '/') {
buf.resize(len + filename.size());
strncpy(&(buf[0]), dir.data(), len);
} else {
buf.resize(len + filename.size() + 1);
strncpy(&(buf[0]), dir.data(), len);
buf[len++] = '/';
}
strncpy(&(buf[len]), filename.data(), filename.size());
return buf;
}
void FileUtils::dividePath(const folly::StringPiece path,
folly::StringPiece& parent,
folly::StringPiece& child) {
if (path.empty() || path == "/") {
// The given string is empty or just "/"
parent = folly::StringPiece();
child = path;
return;
}
folly::StringPiece pathToLook =
(path.back() == '/') ? folly::StringPiece(path.begin(), path.size() - 1)
: path;
auto pos = pathToLook.rfind('/');
if (pos == std::string::npos) {
// Not found
parent = folly::StringPiece();
child = pathToLook;
return;
}
// Found the last "/"
child = folly::StringPiece(pathToLook.begin() + pos + 1,
pathToLook.size() - pos - 1);
if (pos == 0) {
// In the root directory
parent = folly::StringPiece(pathToLook.begin(), 1);
} else {
parent = folly::StringPiece(pathToLook.begin(), pos);
}
}
bool FileUtils::remove(const char* path, bool recursively) {
auto type = fileType(path);
switch (type) {
case FileType::REGULAR:
case FileType::SYM_LINK:
// Regular file or link
if (unlink(path)) {
// Failed
LOG(ERROR) << "Failed to remove the file \"" << path
<< "\" (" << errno << "): " << strerror(errno);
return false;
}
return true;
case FileType::DIRECTORY:
// Directory
return detail::removeDir(path, recursively);
case FileType::CHAR_DEV:
case FileType::BLOCK_DEV:
case FileType::FIFO:
case FileType::SOCKET:
LOG(ERROR) << "Only a directory, a regular file, or a soft link"
<< " can be removed. But \"" << path << "\" is a "
<< getFileTypeName(type);
return false;
case FileType::NOTEXIST:
VLOG(2) << "The path \"" << path << "\" does not exist";
return true;
default:
LOG(ERROR) << "We don't know the type of \"" << path << "\"";
return false;
}
}
bool FileUtils::makeDir(const std::string& dir, uint32_t mode) {
if (dir.empty()) {
return false;
}
FileType type = fileType(dir.c_str());
if (type == FileType::DIRECTORY) {
// The directory already exists
return true;
} else if (type != FileType::NOTEXIST) {
// A file has existed, cannot create the directory
return false;
}
folly::StringPiece parent;
folly::StringPiece child;
dividePath(dir, parent, child);
// create parent if it is not empty
if (!parent.empty()) {
bool ret = makeDir(parent.toString(), mode);
if (!ret) {
return false;
}
}
int err = mkdir(dir.c_str(), mode);
if (err != 0) {
return fileType(dir.c_str()) == FileType::DIRECTORY;
}
return true;
}
bool FileUtils::exist(const std::string& path) {
if (path.empty()) {
return false;
}
return access(path.c_str(), F_OK) == 0;
}
std::vector<std::string> FileUtils::listAllTypedEntitiesInDir(
const char* dirpath,
FileType type,
bool returnFullPath,
const char* namePattern) {
std::vector<std::string> entities;
struct dirent *dirInfo;
DIR *dir = opendir(dirpath);
if (dir == nullptr) {
LOG(ERROR)<< "Failed to read the directory \"" << dirpath
<< "\" (" << errno << "): " << strerror(errno);
return entities;
}
while ((dirInfo = readdir(dir)) != nullptr) {
if ((type == FileType::REGULAR && FileUtils::isReg(dirInfo, dirpath)) ||
(type == FileType::DIRECTORY && FileUtils::isDir(dirInfo, dirpath)) ||
(type == FileType::SYM_LINK && FileUtils::isLink(dirInfo, dirpath)) ||
(type == FileType::CHAR_DEV && FileUtils::isChr(dirInfo, dirpath)) ||
(type == FileType::BLOCK_DEV && FileUtils::isBlk(dirInfo, dirpath)) ||
(type == FileType::FIFO && FileUtils::isFifo(dirInfo, dirpath)) ||
(type == FileType::SOCKET && FileUtils::isSock(dirInfo, dirpath))) {
if (!strcmp(dirInfo->d_name, ".") || !strcmp(dirInfo->d_name, "..")) {
// Skip the "." and ".."
continue;
}
if (namePattern &&
fnmatch(namePattern, dirInfo->d_name, FNM_FILE_NAME | FNM_PERIOD)) {
// Mismatched
continue;
}
// We found one entity
entities.emplace_back(
returnFullPath ? joinPath(dirpath, std::string(dirInfo->d_name))
: std::string(dirInfo->d_name));
}
}
closedir(dir);
return entities;
}
std::vector<std::string> FileUtils::listAllFilesInDir(
const char* dirpath,
bool returnFullPath,
const char* namePattern) {
return listAllTypedEntitiesInDir(dirpath,
FileType::REGULAR,
returnFullPath,
namePattern);
}
std::vector<std::string> FileUtils::listAllDirsInDir(
const char* dirpath,
bool returnFullPath,
const char* namePattern) {
return listAllTypedEntitiesInDir(dirpath,
FileType::DIRECTORY,
returnFullPath,
namePattern);
}
FileUtils::Iterator::Iterator(std::string path, const std::regex *pattern)
: path_(std::move(path)) {
pattern_ = pattern;
openFileOrDirectory();
if (status_.ok()) {
next();
}
}
FileUtils::Iterator::~Iterator() {
if (fstream_ != nullptr && fstream_->is_open()) {
fstream_->close();
}
if (dir_ != nullptr) {
::closedir(dir_);
dir_ = nullptr;
}
}
void FileUtils::Iterator::next() {
CHECK(valid());
CHECK(type_ != FileType::UNKNOWN);
while (true) {
if (type_ == FileType::DIRECTORY) {
dirNext();
} else {
fileNext();
}
if (!status_.ok()) {
return;
}
if (pattern_ != nullptr) {
if (!std::regex_search(entry_, matched_, *pattern_)) {
continue;
}
}
break;
}
}
void FileUtils::Iterator::dirNext() {
CHECK(type_ == FileType::DIRECTORY);
CHECK(dir_ != nullptr);
struct dirent *dent;
while ((dent = ::readdir(dir_)) != nullptr) {
if (dent->d_name[0] == '.') {
continue;
}
break;
}
if (dent == nullptr) {
status_ = Status::Error("EOF");
return;
}
entry_ = dent->d_name;
}
void FileUtils::Iterator::fileNext() {
CHECK(type_ == FileType::REGULAR);
CHECK(fstream_ != nullptr);
if (!std::getline(*fstream_, entry_)) {
status_ = Status::Error("EOF");
}
}
void FileUtils::Iterator::openFileOrDirectory() {
type_ = FileUtils::fileType(path_.c_str());
if (type_ == FileType::DIRECTORY) {
if ((dir_ = ::opendir(path_.c_str())) == nullptr) {
status_ = Status::Error("opendir `%s': %s", path_.c_str(), ::strerror(errno));
return;
}
} else if (type_ == FileType::REGULAR) {
fstream_ = std::make_unique<std::ifstream>();
fstream_->open(path_);
if (!fstream_->is_open()) {
status_ = Status::Error("open `%s': %s", path_.c_str(), ::strerror(errno));
return;
}
} else if (type_ == FileType::SYM_LINK) {
auto result = FileUtils::realPath(path_.c_str());
if (!result.ok()) {
status_ = std::move(result).status();
return;
}
path_ = std::move(result).value();
openFileOrDirectory();
} else {
status_ = Status::Error("Filetype not supported `%s': %s",
path_.c_str(), FileUtils::getFileTypeName(type_));
return;
}
status_ = Status::OK();
}
CHECK_TYPE(Reg, REGULAR, REG)
CHECK_TYPE(Dir, DIRECTORY, DIR)
CHECK_TYPE(Link, SYM_LINK, LNK)
CHECK_TYPE(Chr, CHAR_DEV, CHR)
CHECK_TYPE(Blk, BLOCK_DEV, BLK)
CHECK_TYPE(Fifo, FIFO, FIFO)
CHECK_TYPE(Sock, SOCKET, SOCK)
} // namespace fs
} // namespace nebula
| 1 | 25,618 | Is betterLOG_IF(ERROR, status != 0) ? | vesoft-inc-nebula | cpp |
@@ -38,6 +38,9 @@ class BaseLoadTest(TestCase):
self.session.auth = self.auth
self.session.headers.update({'Content-Type': 'application/json'})
+ self.bucket = 'default'
+ self.collection = 'default'
+
# Keep track of created objects.
self._collections_created = {}
| 1 | import json
import os
import uuid
from requests.auth import HTTPBasicAuth, AuthBase
from loads.case import TestCase
from konfig import Config
class RawAuth(AuthBase):
def __init__(self, authorization):
self.authorization = authorization
def __call__(self, r):
r.headers['Authorization'] = self.authorization
return r
class BaseLoadTest(TestCase):
def __init__(self, *args, **kwargs):
"""Initialization that happens once per user.
:note:
This method is called as many times as number of users.
"""
super(BaseLoadTest, self).__init__(*args, **kwargs)
self.conf = self._get_configuration()
if self.conf.get('smoke', False):
self.random_user = "[email protected]"
self.auth = RawAuth("Bearer %s" % self.conf.get('token'))
else:
self.random_user = uuid.uuid4().hex
self.auth = HTTPBasicAuth(self.random_user, 'secret')
self.session.auth = self.auth
self.session.headers.update({'Content-Type': 'application/json'})
# Keep track of created objects.
self._collections_created = {}
def _get_configuration(self):
# Loads is removing the extra information contained in the ini files,
# so we need to parse it again.
config_file = self.config['config']
# When copying the configuration files, we lose the config/ prefix so,
# try to read from this folder in case the file doesn't exist.
if not os.path.isfile(config_file):
config_file = os.path.basename(config_file)
if not os.path.isfile(config_file):
msg = 'Unable to locate the configuration file, aborting.'
raise LookupError(msg)
return Config(config_file).get_map('loads')
def api_url(self, path):
url = "{0}/v1/{1}".format(self.server_url.rstrip('/'), path)
return url
def bucket_url(self, bucket=None, prefix=True):
url = 'buckets/%s' % (bucket or self.bucket)
return self.api_url(url) if prefix else '/' + url
def group_url(self, bucket=None, group=None, prefix=True):
bucket_url = self.bucket_url(bucket, prefix)
group = group or self.group
return '%s/groups/%s' % (bucket_url, group)
def collection_url(self, bucket=None, collection=None, prefix=True):
bucket_url = self.bucket_url(bucket, prefix)
collection = collection or self.collection
collection_url = bucket_url + '/collections/%s' % collection
# Create collection objects.
if collection not in self._collections_created:
self.session.put(bucket_url,
data=json.dumps({'data': {}}),
headers={'If-None-Match': '*'})
self.session.put(collection_url,
data=json.dumps({'data': {}}),
headers={'If-None-Match': '*'})
self._collections_created[collection] = True
return collection_url + '/records'
def record_url(self, record_id, bucket=None, collection=None, prefix=True):
collection_url = self.collection_url(bucket, collection, prefix)
return collection_url + '/%s' % record_id
| 1 | 8,057 | I wouldn't call it default too. | Kinto-kinto | py |
@@ -2,6 +2,7 @@
* and will be replaced soon by a Vue component.
*/
+/* eslint-disable no-var */
import browser from 'browser';
import dom from 'dom';
import 'css!./navdrawer'; | 1 | /* Cleaning this file properly is not neecessary, since it's an outdated library
* and will be replaced soon by a Vue component.
*/
import browser from 'browser';
import dom from 'dom';
import 'css!./navdrawer';
import 'scrollStyles';
export default function (options) {
function getTouches(e) {
return e.changedTouches || e.targetTouches || e.touches;
}
function onMenuTouchStart(e) {
options.target.classList.remove('transition');
const touches = getTouches(e);
const touch = touches[0] || {};
menuTouchStartX = touch.clientX;
menuTouchStartY = touch.clientY;
menuTouchStartTime = new Date().getTime();
}
function setVelocity(deltaX) {
const time = new Date().getTime() - (menuTouchStartTime || 0);
velocity = Math.abs(deltaX) / time;
}
function onMenuTouchMove(e) {
const isOpen = self.visible;
const touches = getTouches(e);
const touch = touches[0] || {};
const endX = touch.clientX || 0;
const endY = touch.clientY || 0;
const deltaX = endX - (menuTouchStartX || 0);
const deltaY = endY - (menuTouchStartY || 0);
setVelocity(deltaX);
if (isOpen && dragMode !== 1 && deltaX > 0) {
dragMode = 2;
}
if (dragMode === 0 && (!isOpen || Math.abs(deltaX) >= 10) && Math.abs(deltaY) < 5) {
dragMode = 1;
scrollContainer.addEventListener('scroll', disableEvent);
self.showMask();
} else if (dragMode === 0 && Math.abs(deltaY) >= 5) {
dragMode = 2;
}
if (dragMode === 1) {
newPos = currentPos + deltaX;
self.changeMenuPos();
}
}
function onMenuTouchEnd(e) {
options.target.classList.add('transition');
scrollContainer.removeEventListener('scroll', disableEvent);
dragMode = 0;
const touches = getTouches(e);
const touch = touches[0] || {};
const endX = touch.clientX || 0;
const endY = touch.clientY || 0;
const deltaX = endX - (menuTouchStartX || 0);
const deltaY = endY - (menuTouchStartY || 0);
currentPos = deltaX;
self.checkMenuState(deltaX, deltaY);
}
function onEdgeTouchStart(e) {
if (isPeeking) {
onMenuTouchMove(e);
} else {
if (((getTouches(e)[0] || {}).clientX || 0) <= options.handleSize) {
isPeeking = true;
if (e.type === 'touchstart') {
dom.removeEventListener(edgeContainer, 'touchmove', onEdgeTouchMove, {});
dom.addEventListener(edgeContainer, 'touchmove', onEdgeTouchMove, {});
}
onMenuTouchStart(e);
}
}
}
function onEdgeTouchMove(e) {
e.preventDefault();
e.stopPropagation();
onEdgeTouchStart(e);
}
function onEdgeTouchEnd(e) {
if (isPeeking) {
isPeeking = false;
dom.removeEventListener(edgeContainer, 'touchmove', onEdgeTouchMove, {});
onMenuTouchEnd(e);
}
}
function disableEvent(e) {
e.preventDefault();
e.stopPropagation();
}
function onBackgroundTouchStart(e) {
const touches = getTouches(e);
const touch = touches[0] || {};
backgroundTouchStartX = touch.clientX;
backgroundTouchStartTime = new Date().getTime();
}
function onBackgroundTouchMove(e) {
const touches = getTouches(e);
const touch = touches[0] || {};
const endX = touch.clientX || 0;
if (endX <= options.width && self.isVisible) {
countStart++;
const deltaX = endX - (backgroundTouchStartX || 0);
if (countStart == 1) {
startPoint = deltaX;
}
if (deltaX < 0 && dragMode !== 2) {
dragMode = 1;
newPos = deltaX - startPoint + options.width;
self.changeMenuPos();
const time = new Date().getTime() - (backgroundTouchStartTime || 0);
velocity = Math.abs(deltaX) / time;
}
}
e.preventDefault();
e.stopPropagation();
}
function onBackgroundTouchEnd(e) {
const touches = getTouches(e);
const touch = touches[0] || {};
const endX = touch.clientX || 0;
const deltaX = endX - (backgroundTouchStartX || 0);
self.checkMenuState(deltaX);
countStart = 0;
}
function onMaskTransitionEnd() {
const classList = mask.classList;
if (!classList.contains('backdrop')) {
classList.add('hide');
}
}
let self;
let defaults;
let mask;
var newPos = 0;
var currentPos = 0;
var startPoint = 0;
var countStart = 0;
var velocity = 0;
options.target.classList.add('transition');
var dragMode = 0;
var scrollContainer = options.target.querySelector('.mainDrawer-scrollContainer');
scrollContainer.classList.add('scrollY');
const TouchMenuLA = function () {
self = this;
defaults = {
width: 260,
handleSize: 10,
disableMask: false,
maxMaskOpacity: 0.5
};
this.isVisible = false;
this.initialize();
};
TouchMenuLA.prototype.initElements = function () {
options.target.classList.add('touch-menu-la');
options.target.style.width = options.width + 'px';
options.target.style.left = -options.width + 'px';
if (!options.disableMask) {
mask = document.createElement('div');
mask.className = 'tmla-mask hide';
document.body.appendChild(mask);
dom.addEventListener(mask, dom.whichTransitionEvent(), onMaskTransitionEnd, {
passive: true
});
}
};
let menuTouchStartX;
let menuTouchStartY;
let menuTouchStartTime;
var edgeContainer = document.querySelector('.mainDrawerHandle');
var isPeeking = false;
TouchMenuLA.prototype.animateToPosition = function (pos) {
requestAnimationFrame(function () {
options.target.style.transform = pos ? 'translateX(' + pos + 'px)' : 'none';
});
};
TouchMenuLA.prototype.changeMenuPos = function () {
if (newPos <= options.width) {
this.animateToPosition(newPos);
}
};
TouchMenuLA.prototype.clickMaskClose = function () {
mask.addEventListener('click', function () {
self.close();
});
};
TouchMenuLA.prototype.checkMenuState = function (deltaX, deltaY) {
if (velocity >= 0.4) {
if (deltaX >= 0 || Math.abs(deltaY || 0) >= 70) {
self.open();
} else {
self.close();
}
} else {
if (newPos >= 100) {
self.open();
} else {
if (newPos) {
self.close();
}
}
}
};
TouchMenuLA.prototype.open = function () {
this.animateToPosition(options.width);
currentPos = options.width;
this.isVisible = true;
options.target.classList.add('drawer-open');
self.showMask();
self.invoke(options.onChange);
};
TouchMenuLA.prototype.close = function () {
this.animateToPosition(0);
currentPos = 0;
self.isVisible = false;
options.target.classList.remove('drawer-open');
self.hideMask();
self.invoke(options.onChange);
};
TouchMenuLA.prototype.toggle = function () {
if (self.isVisible) {
self.close();
} else {
self.open();
}
};
let backgroundTouchStartX;
let backgroundTouchStartTime;
TouchMenuLA.prototype.showMask = function () {
mask.classList.remove('hide');
mask.classList.add('backdrop');
};
TouchMenuLA.prototype.hideMask = function () {
mask.classList.add('hide');
mask.classList.remove('backdrop');
};
TouchMenuLA.prototype.invoke = function (fn) {
if (fn) {
fn.apply(self);
}
};
let _edgeSwipeEnabled;
TouchMenuLA.prototype.setEdgeSwipeEnabled = function (enabled) {
if (!options.disableEdgeSwipe) {
if (browser.touch) {
if (enabled) {
if (!_edgeSwipeEnabled) {
_edgeSwipeEnabled = true;
dom.addEventListener(edgeContainer, 'touchstart', onEdgeTouchStart, {
passive: true
});
dom.addEventListener(edgeContainer, 'touchend', onEdgeTouchEnd, {
passive: true
});
dom.addEventListener(edgeContainer, 'touchcancel', onEdgeTouchEnd, {
passive: true
});
}
} else {
if (_edgeSwipeEnabled) {
_edgeSwipeEnabled = false;
dom.removeEventListener(edgeContainer, 'touchstart', onEdgeTouchStart, {
passive: true
});
dom.removeEventListener(edgeContainer, 'touchend', onEdgeTouchEnd, {
passive: true
});
dom.removeEventListener(edgeContainer, 'touchcancel', onEdgeTouchEnd, {
passive: true
});
}
}
}
}
};
TouchMenuLA.prototype.initialize = function () {
options = Object.assign(defaults, options || {});
if (browser.edge) {
options.disableEdgeSwipe = true;
}
self.initElements();
if (browser.touch) {
dom.addEventListener(options.target, 'touchstart', onMenuTouchStart, {
passive: true
});
dom.addEventListener(options.target, 'touchmove', onMenuTouchMove, {
passive: true
});
dom.addEventListener(options.target, 'touchend', onMenuTouchEnd, {
passive: true
});
dom.addEventListener(options.target, 'touchcancel', onMenuTouchEnd, {
passive: true
});
dom.addEventListener(mask, 'touchstart', onBackgroundTouchStart, {
passive: true
});
dom.addEventListener(mask, 'touchmove', onBackgroundTouchMove, {});
dom.addEventListener(mask, 'touchend', onBackgroundTouchEnd, {
passive: true
});
dom.addEventListener(mask, 'touchcancel', onBackgroundTouchEnd, {
passive: true
});
}
self.clickMaskClose();
};
return new TouchMenuLA();
}
| 1 | 17,726 | Why disable the rule for this file? | jellyfin-jellyfin-web | js |
@@ -76,6 +76,7 @@ CTA.propTypes = {
'aria-label': PropTypes.string,
error: PropTypes.bool,
onClick: PropTypes.func,
+ ctaLinkExternal: PropTypes.bool,
};
CTA.defaultProps = { | 1 | /**
* CTA component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import PropTypes from 'prop-types';
import classnames from 'classnames';
/**
* Internal dependencies
*/
import Link from '../Link';
const CTA = ( {
title,
description,
ctaLink,
ctaLabel,
ctaLinkExternal,
error,
onClick,
'aria-label': ariaLabel,
} ) => (
<div
className={ classnames( 'googlesitekit-cta', {
'googlesitekit-cta--error': error,
} ) }
>
{ title && <h3 className="googlesitekit-cta__title">{ title }</h3> }
{ description && typeof description === 'string' && (
<p className="googlesitekit-cta__description">{ description }</p>
) }
{ description && typeof description !== 'string' && (
<div className="googlesitekit-cta__description">
{ description }
</div>
) }
{ ctaLabel && (
<Link
href={ ctaLink }
onClick={ onClick }
inverse={ ! error }
caps
arrow
aria-label={ ariaLabel }
external={ ctaLinkExternal }
hideExternalIndicator={ ctaLinkExternal }
>
{ ctaLabel }
</Link>
) }
</div>
);
CTA.propTypes = {
title: PropTypes.string.isRequired,
description: PropTypes.oneOfType( [ PropTypes.string, PropTypes.node ] ),
ctaLink: PropTypes.string,
ctaLabel: PropTypes.string,
'aria-label': PropTypes.string,
error: PropTypes.bool,
onClick: PropTypes.func,
};
CTA.defaultProps = {
title: '',
description: '',
ctaLink: '',
ctaLabel: '',
error: false,
onClick: () => {},
};
export default CTA;
| 1 | 42,186 | Thanks for adding this :+1:. Could you please move it to go after the `ctaLink` prop? | google-site-kit-wp | js |
@@ -193,7 +193,7 @@ namespace Nethermind.Core.Collections
public T Current => _array[_index];
- object IEnumerator.Current => Current;
+ object IEnumerator.Current => Current!;
public void Dispose() { }
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using System;
using System.Buffers;
using System.Collections;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
namespace Nethermind.Core.Collections
{
public class ArrayPoolList<T> : IList<T>, IDisposable
{
private readonly ArrayPool<T> _arrayPool;
private T[] _array;
private int _count = 0;
private int _capacity;
private bool _disposed;
public ArrayPoolList(int capacity) : this(ArrayPool<T>.Shared, capacity)
{
}
public ArrayPoolList(ArrayPool<T> arrayPool, int capacity)
{
_arrayPool = arrayPool;
_array = arrayPool.Rent(capacity);
_capacity = _array.Length;
}
public IEnumerator<T> GetEnumerator()
{
GuardDispose();
return new ArrayPoolListEnumerator(_array, _count);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void GuardDispose()
{
if (_disposed) throw new ObjectDisposedException(nameof(ArrayPoolList<T>));
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
public void Add(T item)
{
GuardResize();
_array[_count++] = item;
}
public void Clear()
{
_count = 0;
}
public bool Contains(T item)
{
GuardDispose();
int indexOf = Array.IndexOf(_array, item);
return indexOf >= 0 && indexOf < _count;
}
public void CopyTo(T[] array, int arrayIndex)
{
GuardDispose();
_array.AsMemory(0, _count).CopyTo(array.AsMemory(arrayIndex));
}
public int Count => _count;
public int Capacity => _capacity;
public bool IsReadOnly => false;
public int IndexOf(T item)
{
GuardDispose();
int indexOf = Array.IndexOf(_array, item);
return indexOf < _count ? indexOf : -1;
}
public void Insert(int index, T item)
{
GuardResize();
GuardIndex(index, allowEqualToCount: true);
_array.AsMemory(index, _count - index).CopyTo(_array.AsMemory(index + 1));
_array[index] = item;
_count++;
}
private void GuardResize()
{
GuardDispose();
if (_count == _capacity)
{
int newCapacity = _capacity * 2;
T[] newArray = _arrayPool.Rent(newCapacity);
_array.CopyTo(newArray, 0);
T[] oldArray = Interlocked.Exchange(ref _array, newArray);
_capacity = newArray.Length;
_arrayPool.Return(oldArray);
}
}
public bool Remove(T item) => RemoveAtInternal(IndexOf(item), false);
public void RemoveAt(int index) => RemoveAtInternal(index, true);
private bool RemoveAtInternal(int index, bool shouldThrow)
{
bool isValid = GuardIndex(index, shouldThrow);
if (isValid)
{
int start = index + 1;
if (start < _count)
{
_array.AsMemory(start, _count - index).CopyTo(_array.AsMemory(index));
}
_count--;
}
return isValid;
}
public T this[int index]
{
get
{
GuardIndex(index);
return _array[index];
}
set
{
GuardIndex(index);
_array[index] = value;
}
}
private bool GuardIndex(int index, bool shouldThrow = true, bool allowEqualToCount = false)
{
GuardDispose();
if (index < 0)
{
return shouldThrow
? throw new ArgumentOutOfRangeException($"Index {index} is below 0.")
: false;
}
else if (index >= _count && (!allowEqualToCount || index > _count))
{
return shouldThrow
? throw new ArgumentOutOfRangeException($"Index {index} is above count {_count}.")
: false;
}
return true;
}
private struct ArrayPoolListEnumerator : IEnumerator<T>
{
private readonly T[] _array;
private readonly int _count;
private int _index;
public ArrayPoolListEnumerator(T[] array, int count)
{
_array = array;
_count = count;
_index = -1;
}
public bool MoveNext() => ++_index < _count;
public void Reset() => _index = -1;
public T Current => _array[_index];
object IEnumerator.Current => Current;
public void Dispose() { }
}
public void Dispose()
{
if (!_disposed)
{
_arrayPool.Return(_array);
_disposed = true;
}
}
}
}
| 1 | 26,301 | The other way around Current can be null. | NethermindEth-nethermind | .cs |
@@ -230,7 +230,8 @@ class SideMenuBuilder
$communicationMenu = $menu->addChild('communication', ['label' => t('Communication with customer')]);
$communicationMenu->addChild('mail_settings', ['route' => 'admin_mail_setting', 'label' => t('Email settings')]);
- $communicationMenu->addChild('mail_templates', ['route' => 'admin_mail_template', 'label' => t('Email templates')]);
+ $mailTemplates = $communicationMenu->addChild('mail_templates', ['route' => 'admin_mail_template', 'label' => t('Email templates')]);
+ $mailTemplates->addChild('edit_template', ['route' => 'admin_mail_edit', 'label' => t('Editing email template'), 'display' => false]);
$communicationMenu->addChild('order_confirmation', ['route' => 'admin_customercommunication_ordersubmitted', 'label' => t('Order confirmation page')]);
$listsMenu = $menu->addChild('lists', ['label' => t('Lists and nomenclatures')]); | 1 | <?php
namespace Shopsys\FrameworkBundle\Model\AdminNavigation;
use Knp\Menu\FactoryInterface;
use Knp\Menu\ItemInterface;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Model\Security\Roles;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
use Symfony\Component\Security\Core\Authorization\AuthorizationCheckerInterface;
class SideMenuBuilder
{
/**
* @var \Knp\Menu\FactoryInterface
*/
protected $menuFactory;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
protected $domain;
/**
* @var \Symfony\Component\Security\Core\Authorization\AuthorizationCheckerInterface
*/
protected $authorizationChecker;
/**
* @var \Symfony\Component\EventDispatcher\EventDispatcherInterface
*/
protected $eventDispatcher;
/**
* @param \Knp\Menu\FactoryInterface $menuFactory
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
* @param \Symfony\Component\Security\Core\Authorization\AuthorizationCheckerInterface $authorizationChecker
* @param \Symfony\Component\EventDispatcher\EventDispatcherInterface $eventDispatcher
*/
public function __construct(
FactoryInterface $menuFactory,
Domain $domain,
AuthorizationCheckerInterface $authorizationChecker,
EventDispatcherInterface $eventDispatcher
) {
$this->menuFactory = $menuFactory;
$this->domain = $domain;
$this->authorizationChecker = $authorizationChecker;
$this->eventDispatcher = $eventDispatcher;
}
/**
* @return \Knp\Menu\ItemInterface
*/
public function createMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('root');
$menu->addChild($this->createDashboardMenu());
$menu->addChild($this->createOrdersMenu());
$menu->addChild($this->createCustomersMenu());
$menu->addChild($this->createProductsMenu());
$menu->addChild($this->createPricingMenu());
$menu->addChild($this->createMarketingMenu());
$menu->addChild($this->createAdministratorsMenu());
$menu->addChild($this->createSettingsMenu());
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_ROOT, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createDashboardMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('dashboard', ['route' => 'admin_default_dashboard', 'label' => t('Dashboard')]);
$menu->setExtra('icon', 'house');
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_DASHBOARD, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createOrdersMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('orders', ['route' => 'admin_order_list', 'label' => t('Orders')]);
$menu->setExtra('icon', 'document-copy');
$menu->addChild('edit', ['route' => 'admin_order_edit', 'label' => t('Editing order'), 'display' => false]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_ORDERS, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createCustomersMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('customers', ['route' => 'admin_customer_list', 'label' => t('Customers')]);
$menu->setExtra('icon', 'person-public');
$menu->addChild('new', ['route' => 'admin_customer_new', 'label' => t('New customer'), 'display' => false]);
$menu->addChild('edit', ['route' => 'admin_customer_edit', 'label' => t('Editing customer'), 'display' => false]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_CUSTOMERS, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createProductsMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('products', ['label' => t('Products')]);
$menu->setExtra('icon', 'cart');
$productsMenu = $menu->addChild('products', ['route' => 'admin_product_list', 'label' => t('Products overview')]);
$productsMenu->addChild('new', ['route' => 'admin_product_new', 'label' => t('New product'), 'display' => false]);
$productsMenu->addChild('edit', ['route' => 'admin_product_edit', 'label' => t('Editing product'), 'display' => false]);
$productsMenu->addChild('new_variant', ['route' => 'admin_product_createvariant', 'label' => t('Create variant'), 'display' => false]);
$categoriesMenu = $menu->addChild('categories', ['route' => 'admin_category_list', 'label' => t('Categories')]);
$categoriesMenu->addChild('new', ['route' => 'admin_category_new', 'label' => t('New category'), 'display' => false]);
$categoriesMenu->addChild('edit', ['route' => 'admin_category_edit', 'label' => t('Editing category'), 'display' => false]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_PRODUCTS, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createPricingMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('pricing', ['label' => t('Pricing')]);
$menu->setExtra('icon', 'tag');
$menu->addChild('pricing_groups', ['route' => 'admin_pricinggroup_list', 'label' => t('Pricing groups')]);
$menu->addChild('vat', ['route' => 'admin_vat_list', 'label' => t('VAT')]);
$menu->addChild('free_transport_and_payment', ['route' => 'admin_transportandpayment_freetransportandpaymentlimit', 'label' => t('Free shipping and payment')]);
$menu->addChild('currencies', ['route' => 'admin_currency_list', 'label' => t('Currencies and rounding')]);
$promoCodesMenu = $menu->addChild('promo_codes', ['route' => 'admin_promocode_list', 'label' => t('Promo codes')]);
$promoCodesMenu->addChild('new', ['route' => 'admin_promocode_new', 'label' => t('New promo code'), 'display' => false]);
$promoCodesMenu->addChild('edit', ['route' => 'admin_promocode_edit', 'label' => t('Editing promo code'), 'display' => false]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_PRICING, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createMarketingMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('marketing', ['label' => t('Marketing')]);
$menu->setExtra('icon', 'chart-piece');
$articlesMenu = $menu->addChild('articles', ['route' => 'admin_article_list', 'label' => t('Articles overview')]);
$articlesMenu->addChild('new', ['route' => 'admin_article_new', 'label' => t('New article'), 'display' => false]);
$articlesMenu->addChild('edit', ['route' => 'admin_article_edit', 'label' => t('Editing article'), 'display' => false]);
$sliderMenu = $menu->addChild('slider', ['route' => 'admin_slider_list', 'label' => t('Slider on main page')]);
$sliderMenu->addChild('new_page', ['route' => 'admin_slider_new', 'label' => t('New page'), 'display' => false]);
$sliderMenu->addChild('edit_page', ['route' => 'admin_slider_edit', 'label' => t('Editing page'), 'display' => false]);
$menu->addChild('top_products', ['route' => 'admin_topproduct_list', 'label' => t('Main page products')]);
$menu->addChild('top_categories', ['route' => 'admin_topcategory_list', 'label' => t('Popular categories')]);
$advertsMenu = $menu->addChild('adverts', ['route' => 'admin_advert_list', 'label' => t('Advertising system')]);
$advertsMenu->addChild('new', ['route' => 'admin_advert_new', 'label' => t('New advertising'), 'display' => false]);
$advertsMenu->addChild('edit', ['route' => 'admin_advert_edit', 'label' => t('Editing advertising'), 'display' => false]);
$menu->addChild('feeds', ['route' => 'admin_feed_list', 'label' => t('XML Feeds')]);
$bestsellingProductsMenu = $menu->addChild('bestselling_products', ['route' => 'admin_bestsellingproduct_list', 'label' => t('Bestsellers')]);
$bestsellingProductsMenu->addChild('edit', ['route' => 'admin_bestsellingproduct_detail', 'label' => t('Editing bestseller'), 'display' => false]);
$menu->addChild('newsletter', ['route' => 'admin_newsletter_list', 'label' => t('Email newsletter')]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_MARKETING, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createAdministratorsMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('administrators', ['route' => 'admin_administrator_list', 'label' => t('Administrators')]);
$menu->setExtra('icon', 'person-door-man');
$menu->addChild('new', ['route' => 'admin_administrator_new', 'label' => t('New administrator'), 'display' => false]);
$menu->addChild('edit', ['route' => 'admin_administrator_edit', 'label' => t('Editing administrator'), 'display' => false]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_ADMINISTRATORS, $menu);
return $menu;
}
/**
* @return \Knp\Menu\ItemInterface
*/
protected function createSettingsMenu(): ItemInterface
{
$menu = $this->menuFactory->createItem('settings', ['label' => t('Settings')]);
$menu->setExtra('icon', 'gear');
$identificationMenu = $menu->addChild('identification', ['label' => t('E-shop identification')]);
if ($this->domain->isMultidomain()) {
$domainsMenu = $identificationMenu->addChild('domains', ['route' => 'admin_domain_list', 'label' => t('E-shop identification')]);
$domainsMenu->addChild('edit', ['route' => 'admin_domain_edit', 'label' => t('Editing domain'), 'display' => false]);
}
$identificationMenu->addChild('shop_info', ['route' => 'admin_shopinfo_setting', 'label' => t('Operator information')]);
$legalMenu = $menu->addChild('legal', ['label' => t('Legal conditions')]);
$legalMenu->addChild('legal_conditions', ['route' => 'admin_legalconditions_setting', 'label' => t('Legal conditions')]);
$legalMenu->addChild('personal_data', ['route' => 'admin_personaldata_setting', 'label' => t('Personal data access')]);
$legalMenu->addChild('cookies', ['route' => 'admin_cookies_setting', 'label' => t('Cookies information')]);
$communicationMenu = $menu->addChild('communication', ['label' => t('Communication with customer')]);
$communicationMenu->addChild('mail_settings', ['route' => 'admin_mail_setting', 'label' => t('Email settings')]);
$communicationMenu->addChild('mail_templates', ['route' => 'admin_mail_template', 'label' => t('Email templates')]);
$communicationMenu->addChild('order_confirmation', ['route' => 'admin_customercommunication_ordersubmitted', 'label' => t('Order confirmation page')]);
$listsMenu = $menu->addChild('lists', ['label' => t('Lists and nomenclatures')]);
$transportsAndPaymentsMenu = $listsMenu->addChild('transports_and_payments', ['route' => 'admin_transportandpayment_list', 'label' => t('Shippings and payments')]);
$transportsAndPaymentsMenu->addChild('new_transport', ['route' => 'admin_transport_new', 'label' => t('New shipping'), 'display' => false]);
$transportsAndPaymentsMenu->addChild('edit_transport', ['route' => 'admin_transport_edit', 'label' => t('Editing shipping'), 'display' => false]);
$transportsAndPaymentsMenu->addChild('new_payment', ['route' => 'admin_payment_new', 'label' => t('New payment'), 'display' => false]);
$transportsAndPaymentsMenu->addChild('edit_payment', ['route' => 'admin_payment_edit', 'label' => t('Editing payment'), 'display' => false]);
$listsMenu->addChild('availabilities', ['route' => 'admin_availability_list', 'label' => t('Availability')]);
$listsMenu->addChild('flags', ['route' => 'admin_flag_list', 'label' => t('Flags')]);
$listsMenu->addChild('parameters', ['route' => 'admin_parameter_list', 'label' => t('Parameters')]);
$listsMenu->addChild('order_statuses', ['route' => 'admin_orderstatus_list', 'label' => t('Status of orders')]);
$brandsMenu = $listsMenu->addChild('brands', ['route' => 'admin_brand_list', 'label' => t('Brands')]);
$brandsMenu->addChild('new', ['route' => 'admin_brand_new', 'label' => t('New brand'), 'display' => false]);
$brandsMenu->addChild('edit', ['route' => 'admin_brand_edit', 'label' => t('Editing brand'), 'display' => false]);
$listsMenu->addChild('units', ['route' => 'admin_unit_list', 'label' => t('Units')]);
$countriesMenu = $listsMenu->addChild('countries', ['route' => 'admin_country_list', 'label' => t('Countries')]);
$countriesMenu->addChild('new', ['route' => 'admin_country_new', 'label' => t('New country'), 'display' => false]);
$countriesMenu->addChild('edit', ['route' => 'admin_country_edit', 'label' => t('Editing country'), 'display' => false]);
$imagesMenu = $menu->addChild('images', ['label' => t('Image size')]);
$imagesMenu->addChild('sizes', ['route' => 'admin_image_overview', 'label' => t('Image size')]);
$seoMenu = $menu->addChild('seo', ['label' => t('SEO')]);
$seoMenu->addChild('seo', ['route' => 'admin_seo_index', 'label' => t('SEO')]);
$contactFormSettingsMenu = $menu->addChild('contact_form_settings', ['label' => t('Contact form')]);
$contactFormSettingsMenu->addChild('contact_form_settings', ['route' => 'admin_contactformsettings_index', 'label' => t('Contact form')]);
if ($this->authorizationChecker->isGranted(Roles::ROLE_SUPER_ADMIN)) {
$superadminMenu = $menu->addChild('superadmin', ['label' => t('Superadmin')]);
$superadminMenu->setExtra('superadmin', true);
$superadminMenu->addChild('modules', ['route' => 'admin_superadmin_modules', 'label' => t('Modules')]);
$superadminMenu->addChild('errors', ['route' => 'admin_superadmin_errors', 'label' => t('Error messages')]);
$superadminMenu->addChild('pricing', ['route' => 'admin_superadmin_pricing', 'label' => t('Sales including/excluding VAT settings')]);
$superadminMenu->addChild('css_docs', ['route' => 'admin_superadmin_cssdocumentation', 'label' => t('CSS documentation')]);
$superadminMenu->addChild('urls', ['route' => 'admin_superadmin_urls', 'label' => t('URL addresses')]);
}
$externalScriptsMenu = $menu->addChild('external_scripts', ['label' => t('External scripts')]);
$scriptsMenu = $externalScriptsMenu->addChild('scripts', ['route' => 'admin_script_list', 'label' => t('Scripts overview')]);
$scriptsMenu->addChild('new', ['route' => 'admin_script_new', 'label' => t('New script'), 'display' => false]);
$scriptsMenu->addChild('edit', ['route' => 'admin_script_edit', 'label' => t('Editing script'), 'display' => false]);
$externalScriptsMenu->addChild('google_analytics', ['route' => 'admin_script_googleanalytics', 'label' => t('Google analytics')]);
$heurekaMenu = $menu->addChild('heureka', ['label' => t('Heureka - Verified by Customer')]);
$heurekaMenu->addChild('settings', ['route' => 'admin_heureka_setting', 'label' => t('Heureka - Verified by Customer')]);
$this->dispatchConfigureMenuEvent(ConfigureMenuEvent::SIDE_MENU_SETTINGS, $menu);
return $menu;
}
/**
* @param string $eventName
* @param \Knp\Menu\ItemInterface $menu
* @return \Shopsys\FrameworkBundle\Model\AdminNavigation\ConfigureMenuEvent
*/
protected function dispatchConfigureMenuEvent(string $eventName, ItemInterface $menu): ConfigureMenuEvent
{
$event = new ConfigureMenuEvent($this->menuFactory, $menu);
/** @var \Shopsys\FrameworkBundle\Model\AdminNavigation\ConfigureMenuEvent $configureMenuEvent */
$configureMenuEvent = $this->eventDispatcher->dispatch($event, $eventName);
return $configureMenuEvent;
}
}
| 1 | 22,304 | why does it have to be here? Due to breadcrumb navigation? | shopsys-shopsys | php |
@@ -21,10 +21,18 @@ import (
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/util"
+ "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
+//TODO: Move to some common function
+func StrToQuantity(capacity string) resource.Quantity {
+ qntCapacity, _ := resource.ParseQuantity(capacity)
+ // fmt.Printf("Error: %v", err)
+ return qntCapacity
+}
+
// TestCreateVolumeTarget is to test cStorVolume creation.
func TestCreateVolumeTarget(t *testing.T) {
testVolumeResource := map[string]struct { | 1 | // Copyright © 2017-2019 The OpenEBS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package volume
import (
"fmt"
"reflect"
"testing"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/util"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// TestCreateVolumeTarget is to test cStorVolume creation.
func TestCreateVolumeTarget(t *testing.T) {
testVolumeResource := map[string]struct {
expectedError error
test *apis.CStorVolume
}{
"img1VolumeResource": {
expectedError: nil,
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID("abc"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "5G",
Status: "init",
ReplicationFactor: 3,
ConsistencyFactor: 2,
},
},
},
}
FileOperatorVar = util.TestFileOperator{}
UnixSockVar = util.TestUnixSock{}
obtainedErr := CreateVolumeTarget(testVolumeResource["img1VolumeResource"].test)
if testVolumeResource["img1VolumeResource"].expectedError != obtainedErr {
t.Fatalf("Expected: %v, Got: %v", testVolumeResource["img1VolumeResource"].expectedError, obtainedErr)
}
}
// TestCheckValidVolume tests volume related operations.
func TestCheckValidVolume(t *testing.T) {
testVolumeResource := map[string]struct {
expectedError error
test *apis.CStorVolume
}{
"Invalid-volumeResource": {
expectedError: fmt.Errorf("Invalid volume resource"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID(""),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "5G",
Status: "init",
ReplicationFactor: 3,
ConsistencyFactor: 2,
},
},
},
"Invalid-cstorControllerIPEmpty": {
expectedError: fmt.Errorf("targetIP cannot be empty"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID("123"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "",
Capacity: "5G",
Status: "init",
ReplicationFactor: 3,
ConsistencyFactor: 2,
},
},
},
"Invalid-volumeNameEmpty": {
expectedError: fmt.Errorf("volumeName cannot be empty"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "",
UID: types.UID("123"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "5G",
Status: "init",
ReplicationFactor: 3,
ConsistencyFactor: 2,
},
},
},
"Invalid-volumeCapacityEmpty": {
expectedError: fmt.Errorf("capacity cannot be empty"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID("123"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "",
Status: "init",
ReplicationFactor: 3,
ConsistencyFactor: 2,
},
},
},
"Invalid-ReplicationFactorEmpty": {
expectedError: fmt.Errorf("replicationFactor cannot be zero"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID("123"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "2G",
Status: "init",
ReplicationFactor: 0,
ConsistencyFactor: 2,
},
},
},
"Invalid-ConsistencyFactorEmpty": {
expectedError: fmt.Errorf("consistencyFactor cannot be zero"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID("123"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "2G",
Status: "init",
ReplicationFactor: 3,
ConsistencyFactor: 0,
},
},
},
"Invalid-ReplicationFactorLessThanConsistencyFactor": {
expectedError: fmt.Errorf("replicationFactor cannot be less than consistencyFactor"),
test: &apis.CStorVolume{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testvol1",
UID: types.UID("123"),
},
Spec: apis.CStorVolumeSpec{
TargetIP: "0.0.0.0",
Capacity: "2G",
Status: "init",
ReplicationFactor: 2,
ConsistencyFactor: 3,
},
},
},
}
for desc, ut := range testVolumeResource {
Obtainederr := CheckValidVolume(ut.test)
if Obtainederr != nil {
if Obtainederr.Error() != ut.expectedError.Error() {
t.Fatalf("Desc : %v, Expected error: %v, Got : %v",
desc, ut.expectedError, Obtainederr)
}
}
}
}
func TestExtractReplicaStatusFromJSON(t *testing.T) {
type args struct {
str string
}
tests := map[string]struct {
str string
resp *apis.CVStatus
wantErr bool
}{
"two replicas with one HEALTHY and one BAD status": {
`{
"volumeStatus":[
{
"name" : "pvc-c7f1a961-e0e3-11e8-b49d-42010a800233",
"status": "Healthy",
"replicaStatus" : [
{
"replicaId":"5523611450015704000",
"mode":"HEALTHY",
"checkpointedIOSeq":"0",
"inflightRead":"0",
"inflightWrite":"0",
"inflightSync":"0",
"upTime":1275
},
{
"replicaId":"23523553",
"mode":"BAD",
"checkpointedIOSeq":"0",
"inflightRead":"0",
"inflightWrite":"0",
"inflightSync":"0",
"upTime":1375
}
]
}
]
}`,
&apis.CVStatus{
Name: "pvc-c7f1a961-e0e3-11e8-b49d-42010a800233",
Status: "Healthy",
ReplicaStatuses: []apis.ReplicaStatus{
{
ID: "5523611450015704000",
Mode: "HEALTHY",
CheckpointedIOSeq: "0",
InflightRead: "0",
InflightWrite: "0",
InflightSync: "0",
UpTime: 1275,
},
{
ID: "23523553",
Mode: "BAD",
CheckpointedIOSeq: "0",
InflightRead: "0",
InflightWrite: "0",
InflightSync: "0",
UpTime: 1375,
},
},
},
false,
},
"incorrect value in replicaId": {
`{
"volumeStatus":[
{
"name" : "pvc-c7f1a961-e0e3-11e8-b49d-42010a800233",
"status": "Healthy",
"replicaStatus" : [
{
"replicaId":5523611450015704000,
"mode":"HEALTHY",
"checkpointedIOSeq":"0",
"inflightRead":"0",
"inflightWrite":"0",
"inflightSync":"0",
"upTime":1275
},
]
}
]
}`,
&apis.CVStatus{
Name: "pvc-c7f1a961-e0e3-11e8-b49d-42010a800233",
Status: "Healthy",
ReplicaStatuses: []apis.ReplicaStatus{
{
ID: "5523611450015704000",
Mode: "HEALTHY",
CheckpointedIOSeq: "0",
InflightRead: "0",
InflightWrite: "0",
InflightSync: "0",
UpTime: 1275,
},
},
},
true,
},
"valid single replica healthy status": {
`{
"volumeStatus":[
{
"name" : "pvc-c7f1a961-e0e3-11e8-b49d-42010a800233",
"status": "Healthy",
"replicaStatus" : [
{
"replicaId":5523611450015704000,
"Mode":"HEALTHY",
"checkpointedIOSeq":"0",
"Address" : "192.168.1.23",
"inflightRead":"0",
"inflightWrite":"0",
"inflightSync":"0",
"upTime":1275
},
]
}
]
}`,
&apis.CVStatus{
Name: "pvc-c7f1a961-e0e3-11e8-b49d-42010a800233",
Status: "Healthy",
ReplicaStatuses: []apis.ReplicaStatus{
{
ID: "5523611450015704000",
Mode: "HEALTHY",
CheckpointedIOSeq: "0",
InflightRead: "0",
InflightWrite: "0",
InflightSync: "0",
UpTime: 1275,
},
},
},
true,
},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
got, err := extractReplicaStatusFromJSON(mock.str)
if err != nil {
if !mock.wantErr {
t.Errorf("extractReplicaStatusFromJSON() error = %v, wantErr %v", err != nil, mock.wantErr)
}
} else {
if !reflect.DeepEqual(got, mock.resp) {
t.Errorf("extractReplicaStatusFromJSON() = %v, want %v", got, mock.resp)
}
}
})
}
}
| 1 | 17,092 | I fear moving this to some common func. We are swallowing the error here. This might be ok in UT but not in actual source code. | openebs-maya | go |
@@ -716,12 +716,9 @@ class TestMessagesStore(object):
with pytest.raises(InvalidMessageError) as cm:
store.add_renamed_message(
'W1234', 'old-msg-symbol', 'duplicate-keyword-arg')
- assert str(cm.value) == "Message id 'W1234' is already defined"
- # conflicting message symbol
- with pytest.raises(InvalidMessageError) as cm:
- store.add_renamed_message(
- 'W1337', 'msg-symbol', 'duplicate-keyword-arg')
- assert str(cm.value) == "Message symbol 'msg-symbol' is already defined"
+ expected = "Message id 'W1234' cannot have both 'msg-symbol' and \
+'old-msg-symbol' as symbolic name."
+ assert str(cm.value) == expected
def test_renamed_message_register(self, store):
assert 'msg-symbol' == store.check_message_id('W0001').symbol | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Charles Hebert <[email protected]>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 Kevin Jing Qiu <[email protected]>
# Copyright (c) 2012 Anthony VEREZ <[email protected]>
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Noam Yorav-Raphael <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2017 Craig Citro <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
from contextlib import contextmanager
import sys
import os
import re
import tempfile
from shutil import rmtree
from os import getcwd, chdir
from os.path import join, basename, dirname, isdir, abspath, sep
import six
from six.moves import reload_module
from pylint import config, lint
from pylint.lint import PyLinter, Run, preprocess_options, ArgumentPreprocessingError
from pylint.utils import MSG_STATE_SCOPE_CONFIG, MSG_STATE_SCOPE_MODULE, MSG_STATE_CONFIDENCE, \
MessagesStore, MessageDefinition, FileState, tokenize_module
from pylint.exceptions import InvalidMessageError, UnknownMessageError
import pylint.testutils as testutils
from pylint.reporters import text
from pylint import checkers
from pylint.checkers.utils import check_messages
from pylint import exceptions
from pylint import interfaces
import pytest
if os.name == 'java':
if os._name == 'nt':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
else:
if sys.platform == 'win32':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
try:
PYPY_VERSION_INFO = sys.pypy_version_info
except AttributeError:
PYPY_VERSION_INFO = None
@contextmanager
def fake_home():
folder = tempfile.mkdtemp('fake-home')
old_home = os.environ.get(HOME)
try:
os.environ[HOME] = folder
yield
finally:
os.environ.pop('PYLINTRC', '')
if old_home is None:
del os.environ[HOME]
else:
os.environ[HOME] = old_home
rmtree(folder, ignore_errors=True)
def remove(file):
try:
os.remove(file)
except OSError:
pass
HERE = abspath(dirname(__file__))
INPUTDIR = join(HERE, 'input')
@contextmanager
def tempdir():
"""Create a temp directory and change the current location to it.
This is supposed to be used with a *with* statement.
"""
tmp = tempfile.mkdtemp()
# Get real path of tempfile, otherwise test fail on mac os x
current_dir = getcwd()
chdir(tmp)
abs_tmp = abspath('.')
try:
yield abs_tmp
finally:
chdir(current_dir)
rmtree(abs_tmp)
def create_files(paths, chroot='.'):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = join(chroot, path)
filename = basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(dirname(path))
files.add(path)
for dirpath in dirs:
if not isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
@pytest.fixture
def fake_path():
orig = list(sys.path)
fake = [1, 2, 3]
sys.path[:] = fake
yield fake
sys.path[:] = orig
def test_no_args(fake_path):
with lint.fix_import_path([]):
assert sys.path == ["."] + fake_path
assert sys.path == fake_path
@pytest.mark.parametrize("case", [
['a/b/'],
['a/b'],
['a/b/__init__.py'],
['a/'],
['a'],
])
def test_one_arg(fake_path, case):
with tempdir() as chroot:
create_files(['a/b/__init__.py'])
expected = [join(chroot, 'a')] + ["."] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.mark.parametrize("case", [
['a/b', 'a/c'],
['a/c/', 'a/b/'],
['a/b/__init__.py', 'a/c/__init__.py'],
['a', 'a/c/__init__.py'],
])
def test_two_similar_args(fake_path, case):
with tempdir() as chroot:
create_files(['a/b/__init__.py', 'a/c/__init__.py'])
expected = [join(chroot, 'a')] + ["."] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.mark.parametrize("case", [
['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'],
['a/b/c', 'a', 'a/e'],
['a/b/c', 'a', 'a/b/c', 'a/e', 'a'],
])
def test_more_args(fake_path, case):
with tempdir() as chroot:
create_files(['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'])
expected = [
join(chroot, suffix)
for suffix in [sep.join(('a', 'b')), 'a', sep.join(('a', 'e'))]
] + ["."] + fake_path
assert sys.path == fake_path
with lint.fix_import_path(case):
assert sys.path == expected
assert sys.path == fake_path
@pytest.fixture(scope='module')
def disable(disable):
return ['I']
@pytest.fixture(scope='module')
def reporter(reporter):
return testutils.TestReporter
@pytest.fixture
def init_linter(linter):
linter.open()
linter.set_current_module('toto')
linter.file_state = FileState('toto')
return linter
def test_pylint_visit_method_taken_in_account(linter):
class CustomChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'custom'
msgs = {'W9999': ('', 'custom', '')}
@check_messages('custom')
def visit_class(self, _):
pass
linter.register_checker(CustomChecker(linter))
linter.open()
out = six.moves.StringIO()
linter.set_reporter(text.TextReporter(out))
linter.check('abc')
def test_enable_message(init_linter):
linter = init_linter
assert linter.is_message_enabled('W0101')
assert linter.is_message_enabled('W0102')
linter.disable('W0101', scope='package')
linter.disable('W0102', scope='module', line=1)
assert not linter.is_message_enabled('W0101')
assert not linter.is_message_enabled('W0102', 1)
linter.set_current_module('tutu')
assert not linter.is_message_enabled('W0101')
assert linter.is_message_enabled('W0102')
linter.enable('W0101', scope='package')
linter.enable('W0102', scope='module', line=1)
assert linter.is_message_enabled('W0101')
assert linter.is_message_enabled('W0102', 1)
def test_enable_message_category(init_linter):
linter = init_linter
assert linter.is_message_enabled('W0101')
assert linter.is_message_enabled('C0202')
linter.disable('W', scope='package')
linter.disable('C', scope='module', line=1)
assert not linter.is_message_enabled('W0101')
assert linter.is_message_enabled('C0202')
assert not linter.is_message_enabled('C0202', line=1)
linter.set_current_module('tutu')
assert not linter.is_message_enabled('W0101')
assert linter.is_message_enabled('C0202')
linter.enable('W', scope='package')
linter.enable('C', scope='module', line=1)
assert linter.is_message_enabled('W0101')
assert linter.is_message_enabled('C0202')
assert linter.is_message_enabled('C0202', line=1)
def test_message_state_scope(init_linter):
class FakeConfig(object):
confidence = ['HIGH']
linter = init_linter
linter.disable('C0202')
assert MSG_STATE_SCOPE_CONFIG == linter.get_message_state_scope('C0202')
linter.disable('W0101', scope='module', line=3)
assert MSG_STATE_SCOPE_CONFIG == linter.get_message_state_scope('C0202')
assert MSG_STATE_SCOPE_MODULE == linter.get_message_state_scope('W0101', 3)
linter.enable('W0102', scope='module', line=3)
assert MSG_STATE_SCOPE_MODULE == linter.get_message_state_scope('W0102', 3)
linter.config = FakeConfig()
assert MSG_STATE_CONFIDENCE == \
linter.get_message_state_scope('this-is-bad',
confidence=interfaces.INFERENCE)
def test_enable_message_block(init_linter):
linter = init_linter
linter.open()
filepath = join(INPUTDIR, 'func_block_disable_msg.py')
linter.set_current_module('func_block_disable_msg')
astroid = linter.get_ast(filepath, 'func_block_disable_msg')
linter.process_tokens(tokenize_module(astroid))
fs = linter.file_state
fs.collect_block_lines(linter.msgs_store, astroid)
# global (module level)
assert linter.is_message_enabled('W0613')
assert linter.is_message_enabled('E1101')
# meth1
assert linter.is_message_enabled('W0613', 13)
# meth2
assert not linter.is_message_enabled('W0613', 18)
# meth3
assert not linter.is_message_enabled('E1101', 24)
assert linter.is_message_enabled('E1101', 26)
# meth4
assert not linter.is_message_enabled('E1101', 32)
assert linter.is_message_enabled('E1101', 36)
# meth5
assert not linter.is_message_enabled('E1101', 42)
assert not linter.is_message_enabled('E1101', 43)
assert linter.is_message_enabled('E1101', 46)
assert not linter.is_message_enabled('E1101', 49)
assert not linter.is_message_enabled('E1101', 51)
# meth6
assert not linter.is_message_enabled('E1101', 57)
assert linter.is_message_enabled('E1101', 61)
assert not linter.is_message_enabled('E1101', 64)
assert not linter.is_message_enabled('E1101', 66)
assert linter.is_message_enabled('E0602', 57)
assert linter.is_message_enabled('E0602', 61)
assert not linter.is_message_enabled('E0602', 62)
assert linter.is_message_enabled('E0602', 64)
assert linter.is_message_enabled('E0602', 66)
# meth7
assert not linter.is_message_enabled('E1101', 70)
assert linter.is_message_enabled('E1101', 72)
assert linter.is_message_enabled('E1101', 75)
assert linter.is_message_enabled('E1101', 77)
fs = linter.file_state
assert 17 == fs._suppression_mapping['W0613', 18]
assert 30 == fs._suppression_mapping['E1101', 33]
assert ('E1101', 46) not in fs._suppression_mapping
assert 1 == fs._suppression_mapping['C0302', 18]
assert 1 == fs._suppression_mapping['C0302', 50]
# This is tricky. While the disable in line 106 is disabling
# both 108 and 110, this is usually not what the user wanted.
# Therefore, we report the closest previous disable comment.
assert 106 == fs._suppression_mapping['E1101', 108]
assert 109 == fs._suppression_mapping['E1101', 110]
def test_enable_by_symbol(init_linter):
"""messages can be controlled by symbolic names.
The state is consistent across symbols and numbers.
"""
linter = init_linter
assert linter.is_message_enabled('W0101')
assert linter.is_message_enabled('unreachable')
assert linter.is_message_enabled('W0102')
assert linter.is_message_enabled('dangerous-default-value')
linter.disable('unreachable', scope='package')
linter.disable('dangerous-default-value', scope='module', line=1)
assert not linter.is_message_enabled('W0101')
assert not linter.is_message_enabled('unreachable')
assert not linter.is_message_enabled('W0102', 1)
assert not linter.is_message_enabled('dangerous-default-value', 1)
linter.set_current_module('tutu')
assert not linter.is_message_enabled('W0101')
assert not linter.is_message_enabled('unreachable')
assert linter.is_message_enabled('W0102')
assert linter.is_message_enabled('dangerous-default-value')
linter.enable('unreachable', scope='package')
linter.enable('dangerous-default-value', scope='module', line=1)
assert linter.is_message_enabled('W0101')
assert linter.is_message_enabled('unreachable')
assert linter.is_message_enabled('W0102', 1)
assert linter.is_message_enabled('dangerous-default-value', 1)
def test_enable_report(linter):
assert linter.report_is_enabled('RP0001')
linter.disable('RP0001')
assert not linter.report_is_enabled('RP0001')
linter.enable('RP0001')
assert linter.report_is_enabled('RP0001')
def test_report_output_format_aliased(linter):
text.register(linter)
linter.set_option('output-format', 'text')
assert linter.reporter.__class__.__name__ == 'TextReporter'
def test_set_unsupported_reporter(linter):
text.register(linter)
with pytest.raises(exceptions.InvalidReporterError):
linter.set_option('output-format', 'missing.module.Class')
def test_set_option_1(linter):
linter.set_option('disable', 'C0111,W0234')
assert not linter.is_message_enabled('C0111')
assert not linter.is_message_enabled('W0234')
assert linter.is_message_enabled('W0113')
assert not linter.is_message_enabled('missing-docstring')
assert not linter.is_message_enabled('non-iterator-returned')
def test_set_option_2(linter):
linter.set_option('disable', ('C0111', 'W0234'))
assert not linter.is_message_enabled('C0111')
assert not linter.is_message_enabled('W0234')
assert linter.is_message_enabled('W0113')
assert not linter.is_message_enabled('missing-docstring')
assert not linter.is_message_enabled('non-iterator-returned')
def test_enable_checkers(linter):
linter.disable('design')
assert not ('design' in [c.name for c in linter.prepare_checkers()])
linter.enable('design')
assert 'design' in [c.name for c in linter.prepare_checkers()]
def test_errors_only(linter):
linter.error_mode()
checkers = linter.prepare_checkers()
checker_names = set(c.name for c in checkers)
should_not = set(('design', 'format', 'metrics',
'miscellaneous', 'similarities'))
assert set() == should_not & checker_names
def test_disable_similar(linter):
linter.set_option('disable', 'RP0801')
linter.set_option('disable', 'R0801')
assert not ('similarities' in [c.name for c in linter.prepare_checkers()])
def test_disable_alot(linter):
"""check that we disabled a lot of checkers"""
linter.set_option('reports', False)
linter.set_option('disable', 'R,C,W')
checker_names = [c.name for c in linter.prepare_checkers()]
for cname in ('design', 'metrics', 'similarities'):
assert not (cname in checker_names), cname
def test_addmessage(linter):
linter.set_reporter(testutils.TestReporter())
linter.open()
linter.set_current_module('0123')
linter.add_message('C0301', line=1, args=(1, 2))
linter.add_message('line-too-long', line=2, args=(3, 4))
assert ['C: 1: Line too long (1/2)', 'C: 2: Line too long (3/4)'] == \
linter.reporter.messages
def test_addmessage_invalid(linter):
linter.set_reporter(testutils.TestReporter())
linter.open()
linter.set_current_module('0123')
with pytest.raises(InvalidMessageError) as cm:
linter.add_message('line-too-long', args=(1, 2))
assert str(cm.value) == "Message C0301 must provide line, got None"
with pytest.raises(InvalidMessageError) as cm:
linter.add_message('line-too-long', line=2, node='fake_node', args=(1, 2))
assert str(cm.value) == "Message C0301 must only provide line, got line=2, node=fake_node"
with pytest.raises(InvalidMessageError) as cm:
linter.add_message('C0321')
assert str(cm.value) == "Message C0321 must provide Node, got None"
def test_init_hooks_called_before_load_plugins():
with pytest.raises(RuntimeError):
Run(['--load-plugins', 'unexistant', '--init-hook', 'raise RuntimeError'])
with pytest.raises(RuntimeError):
Run(['--init-hook', 'raise RuntimeError', '--load-plugins', 'unexistant'])
def test_analyze_explicit_script(linter):
linter.set_reporter(testutils.TestReporter())
linter.check(os.path.join(os.path.dirname(__file__), 'data', 'ascript'))
assert ['C: 2: Line too long (175/100)'] == linter.reporter.messages
def test_python3_checker_disabled(linter):
checker_names = [c.name for c in linter.prepare_checkers()]
assert 'python3' not in checker_names
linter.set_option('enable', 'python3')
checker_names = [c.name for c in linter.prepare_checkers()]
assert 'python3' in checker_names
def test_full_documentation(linter):
out = six.StringIO()
linter.print_full_documentation(out)
output = out.getvalue()
# A few spot checks only
for re_str in [
# autogenerated text
"^Pylint global options and switches$",
"Verbatim name of the checker is ``python3``",
# messages
"^:old-octal-literal \\(E1608\\):",
# options
"^:dummy-variables-rgx:",
]:
regexp = re.compile(re_str, re.MULTILINE)
assert re.search(regexp, output)
@pytest.fixture
def pop_pylintrc():
os.environ.pop('PYLINTRC', None)
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylint_home():
uhome = os.path.expanduser('~')
if uhome == '~':
expected = '.pylint.d'
else:
expected = os.path.join(uhome, '.pylint.d')
assert config.PYLINT_HOME == expected
try:
pylintd = join(tempfile.gettempdir(), '.pylint.d')
os.environ['PYLINTHOME'] = pylintd
try:
reload_module(config)
assert config.PYLINT_HOME == pylintd
finally:
try:
os.remove(pylintd)
except:
pass
finally:
del os.environ['PYLINTHOME']
@pytest.mark.skipif(PYPY_VERSION_INFO,
reason="TOX runs this test from within the repo and finds "
"the project's pylintrc.")
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc():
with fake_home():
current_dir = getcwd()
chdir(os.path.dirname(os.path.abspath(sys.executable)))
try:
assert config.find_pylintrc() is None
os.environ['PYLINTRC'] = join(tempfile.gettempdir(),
'.pylintrc')
assert config.find_pylintrc() is None
os.environ['PYLINTRC'] = '.'
assert config.find_pylintrc() is None
finally:
chdir(current_dir)
reload_module(config)
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc_parentdir():
with tempdir() as chroot:
create_files(['a/pylintrc', 'a/b/__init__.py', 'a/b/pylintrc',
'a/b/c/__init__.py', 'a/b/c/d/__init__.py',
'a/b/c/d/e/.pylintrc'])
with fake_home():
assert config.find_pylintrc() is None
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d/e' : join(chroot, 'a', 'b', 'c', 'd', 'e', '.pylintrc'),
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
assert config.find_pylintrc() == expected
@pytest.mark.usefixtures("pop_pylintrc")
def test_pylintrc_parentdir_no_package():
with tempdir() as chroot:
with fake_home():
create_files(['a/pylintrc', 'a/b/pylintrc', 'a/b/c/d/__init__.py'])
assert config.find_pylintrc() is None
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : None,
'a/b/c/d' : None,
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
assert config.find_pylintrc() == expected
class TestPreprocessOptions(object):
def _callback(self, name, value):
self.args.append((name, value))
def test_value_equal(self):
self.args = []
preprocess_options(['--foo', '--bar=baz', '--qu=ux'],
{'foo': (self._callback, False),
'qu': (self._callback, True)})
assert [('foo', None), ('qu', 'ux')] == self.args
def test_value_space(self):
self.args = []
preprocess_options(['--qu', 'ux'],
{'qu': (self._callback, True)})
assert [('qu', 'ux')] == self.args
def test_error_missing_expected_value(self):
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(['--foo', '--bar', '--qu=ux'],
{'bar': (None, True)})
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(['--foo', '--bar'],
{'bar': (None, True)})
def test_error_unexpected_value(self):
with pytest.raises(ArgumentPreprocessingError):
preprocess_options(['--foo', '--bar=spam', '--qu=ux'],
{'bar': (None, False)})
@pytest.fixture
def store():
store = MessagesStore()
class Checker(object):
name = 'achecker'
msgs = {
'W1234': ('message', 'msg-symbol', 'msg description.',
{'old_names': [('W0001', 'old-symbol')]}),
'E1234': ('Duplicate keyword argument %r in %s call',
'duplicate-keyword-arg',
'Used when a function call passes the same keyword argument multiple times.',
{'maxversion': (2, 6)}),
}
store.register_messages(Checker())
return store
class TestMessagesStore(object):
def _compare_messages(self, desc, msg, checkerref=False):
assert desc == msg.format_help(checkerref=checkerref)
def test_check_message_id(self, store):
assert isinstance(store.check_message_id('W1234'), MessageDefinition)
with pytest.raises(UnknownMessageError):
store.check_message_id('YB12')
def test_message_help(self, store):
msg = store.check_message_id('W1234')
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description. This message belongs to the achecker checker.''',
msg, checkerref=True)
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description.''',
msg, checkerref=False)
def test_message_help_minmax(self, store):
# build the message manually to be python version independent
msg = store.check_message_id('E1234')
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message belongs to the achecker checker. It can't be emitted when using
Python >= 2.6.''',
msg, checkerref=True)
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message can't be emitted when using Python >= 2.6.''',
msg, checkerref=False)
def test_list_messages(self, store):
sys.stdout = six.StringIO()
try:
store.list_messages()
output = sys.stdout.getvalue()
finally:
sys.stdout = sys.__stdout__
# cursory examination of the output: we're mostly testing it completes
assert ':msg-symbol (W1234): *message*' in output
def test_add_renamed_message(self, store):
store.add_renamed_message('W1234', 'old-bad-name', 'msg-symbol')
assert 'msg-symbol' == store.check_message_id('W1234').symbol
assert 'msg-symbol' == store.check_message_id('old-bad-name').symbol
def test_add_renamed_message_invalid(self, store):
# conflicting message ID
with pytest.raises(InvalidMessageError) as cm:
store.add_renamed_message(
'W1234', 'old-msg-symbol', 'duplicate-keyword-arg')
assert str(cm.value) == "Message id 'W1234' is already defined"
# conflicting message symbol
with pytest.raises(InvalidMessageError) as cm:
store.add_renamed_message(
'W1337', 'msg-symbol', 'duplicate-keyword-arg')
assert str(cm.value) == "Message symbol 'msg-symbol' is already defined"
def test_renamed_message_register(self, store):
assert 'msg-symbol' == store.check_message_id('W0001').symbol
assert 'msg-symbol' == store.check_message_id('old-symbol').symbol
def test_custom_should_analyze_file():
'''Check that we can write custom should_analyze_file that work
even for arguments.
'''
class CustomPyLinter(PyLinter):
def should_analyze_file(self, modname, path, is_argument=False):
if os.path.basename(path) == 'wrong.py':
return False
return super(CustomPyLinter, self).should_analyze_file(
modname, path, is_argument=is_argument)
package_dir = os.path.join(HERE, 'regrtest_data', 'bad_package')
wrong_file = os.path.join(package_dir, 'wrong.py')
reporter = testutils.TestReporter()
linter = CustomPyLinter()
linter.config.persistent = 0
linter.open()
linter.set_reporter(reporter)
try:
sys.path.append(os.path.dirname(package_dir))
linter.check([package_dir, wrong_file])
finally:
sys.path.pop()
messages = reporter.messages
assert len(messages) == 1
assert 'invalid syntax' in messages[0]
def test_filename_with__init__(init_linter):
# This tracks a regression where a file whose name ends in __init__.py,
# such as flycheck__init__.py, would accidentally lead to linting the
# entire containing directory.
reporter = testutils.TestReporter()
linter = init_linter
linter.open()
linter.set_reporter(reporter)
filepath = join(INPUTDIR, 'not__init__.py')
linter.check([filepath])
messages = reporter.messages
assert len(messages) == 0
| 1 | 10,171 | Please don't use \ as a line continuation. Do an implicit string join instead with parens: ``` ("Message ..." "and ...") | PyCQA-pylint | py |
@@ -258,7 +258,6 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
- // register localstore unreserve function on the batchstore before batch service starts listening to blockchain events
batchStore, err := batchstore.New(stateStore, nil)
if err != nil { | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package node
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"io"
"log"
"math/big"
"net"
"net/http"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/debugapi"
"github.com/ethersphere/bee/pkg/feeds/factory"
"github.com/ethersphere/bee/pkg/hive"
"github.com/ethersphere/bee/pkg/kademlia"
"github.com/ethersphere/bee/pkg/localstore"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/metrics"
"github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/p2p/libp2p"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/batchservice"
"github.com/ethersphere/bee/pkg/postage/batchstore"
"github.com/ethersphere/bee/pkg/postage/listener"
"github.com/ethersphere/bee/pkg/postage/postagecontract"
"github.com/ethersphere/bee/pkg/pricing"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/puller"
"github.com/ethersphere/bee/pkg/pullsync"
"github.com/ethersphere/bee/pkg/pullsync/pullstorage"
"github.com/ethersphere/bee/pkg/pusher"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/resolver/multiresolver"
"github.com/ethersphere/bee/pkg/retrieval"
settlement "github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/pseudosettle"
"github.com/ethersphere/bee/pkg/settlement/swap"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/pkg/settlement/swap/swapprotocol"
"github.com/ethersphere/bee/pkg/settlement/swap/transaction"
"github.com/ethersphere/bee/pkg/statestore/leveldb"
mockinmem "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/bee/pkg/traversal"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type Bee struct {
p2pService io.Closer
p2pCancel context.CancelFunc
apiCloser io.Closer
apiServer *http.Server
debugAPIServer *http.Server
resolverCloser io.Closer
errorLogWriter *io.PipeWriter
tracerCloser io.Closer
tagsCloser io.Closer
stateStoreCloser io.Closer
localstoreCloser io.Closer
topologyCloser io.Closer
pusherCloser io.Closer
pullerCloser io.Closer
pullSyncCloser io.Closer
pssCloser io.Closer
listenerCloser io.Closer
recoveryHandleCleanup func()
}
type Options struct {
DataDir string
DBCapacity uint64
APIAddr string
DebugAPIAddr string
Addr string
NATAddr string
EnableWS bool
EnableQUIC bool
WelcomeMessage string
Bootnodes []string
CORSAllowedOrigins []string
Logger logging.Logger
Standalone bool
TracingEnabled bool
TracingEndpoint string
TracingServiceName string
GlobalPinningEnabled bool
PaymentThreshold string
PaymentTolerance string
PaymentEarly string
ResolverConnectionCfgs []multiresolver.ConnectionConfig
GatewayMode bool
SwapEndpoint string
SwapFactoryAddress string
SwapInitialDeposit string
SwapEnable bool
PostageContractAddress string
PriceOracleAddress string
}
func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, signer crypto.Signer, networkID uint64, logger logging.Logger, libp2pPrivateKey, pssPrivateKey *ecdsa.PrivateKey, o Options) (*Bee, error) {
tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
Enabled: o.TracingEnabled,
Endpoint: o.TracingEndpoint,
ServiceName: o.TracingServiceName,
})
if err != nil {
return nil, fmt.Errorf("tracer: %w", err)
}
p2pCtx, p2pCancel := context.WithCancel(context.Background())
b := &Bee{
p2pCancel: p2pCancel,
errorLogWriter: logger.WriterLevel(logrus.ErrorLevel),
tracerCloser: tracerCloser,
}
var stateStore storage.StateStorer
if o.DataDir == "" {
stateStore = mockinmem.NewStateStore()
logger.Warning("using in-mem state store. no node state will be persisted")
} else {
stateStore, err = leveldb.NewStateStore(filepath.Join(o.DataDir, "statestore"))
if err != nil {
return nil, fmt.Errorf("statestore: %w", err)
}
}
b.stateStoreCloser = stateStore
addressbook := addressbook.New(stateStore)
var chequebookService chequebook.Service
var chequeStore chequebook.ChequeStore
var cashoutService chequebook.CashoutService
var overlayEthAddress common.Address
var transactionService transaction.Service
var swapBackend *ethclient.Client
var chainID *big.Int
if !o.Standalone {
swapBackend, err = ethclient.Dial(o.SwapEndpoint)
if err != nil {
return nil, err
}
transactionService, err = transaction.NewService(logger, swapBackend, signer)
if err != nil {
return nil, err
}
overlayEthAddress, err = signer.EthereumAddress()
if err != nil {
return nil, err
}
chainID, err = swapBackend.ChainID(p2pCtx)
if err != nil {
logger.Errorf("could not connect to backend at %v. A working blockchain node (for goerli network in production) is required. Check your node or specify another node using --swap-endpoint.", o.SwapEndpoint)
return nil, fmt.Errorf("could not get chain id from ethereum backend: %w", err)
}
}
if !o.Standalone && o.SwapEnable {
var factoryAddress common.Address
if o.SwapFactoryAddress == "" {
var found bool
factoryAddress, found = chequebook.DiscoverFactoryAddress(chainID.Int64())
if !found {
return nil, errors.New("no known factory address for this network")
}
logger.Infof("using default factory address for chain id %d: %x", chainID, factoryAddress)
} else if !common.IsHexAddress(o.SwapFactoryAddress) {
return nil, errors.New("malformed factory address")
} else {
factoryAddress = common.HexToAddress(o.SwapFactoryAddress)
logger.Infof("using custom factory address: %x", factoryAddress)
}
chequebookFactory, err := chequebook.NewFactory(swapBackend, transactionService, factoryAddress, chequebook.NewSimpleSwapFactoryBindingFunc)
if err != nil {
return nil, err
}
chequeSigner := chequebook.NewChequeSigner(signer, chainID.Int64())
maxDelay := 1 * time.Minute
synced, err := transaction.IsSynced(p2pCtx, swapBackend, maxDelay)
if err != nil {
return nil, err
}
if !synced {
logger.Infof("waiting for ethereum backend to be synced.")
err = transaction.WaitSynced(p2pCtx, swapBackend, maxDelay)
if err != nil {
return nil, fmt.Errorf("could not wait for ethereum backend to sync: %w", err)
}
}
swapInitialDeposit, ok := new(big.Int).SetString(o.SwapInitialDeposit, 10)
if !ok {
return nil, fmt.Errorf("invalid initial deposit: %s", swapInitialDeposit)
}
// initialize chequebook logic
chequebookService, err = chequebook.Init(p2pCtx,
chequebookFactory,
stateStore,
logger,
swapInitialDeposit,
transactionService,
swapBackend,
chainID.Int64(),
overlayEthAddress,
chequeSigner,
chequebook.NewSimpleSwapBindings,
chequebook.NewERC20Bindings)
if err != nil {
return nil, err
}
chequeStore = chequebook.NewChequeStore(stateStore, swapBackend, chequebookFactory, chainID.Int64(), overlayEthAddress, chequebook.NewSimpleSwapBindings, chequebook.RecoverCheque)
cashoutService, err = chequebook.NewCashoutService(stateStore, chequebook.NewSimpleSwapBindings, swapBackend, transactionService, chequeStore)
if err != nil {
return nil, err
}
}
// localstore depends on batchstore
var path string
if o.DataDir != "" {
path = filepath.Join(o.DataDir, "localstore")
}
lo := &localstore.Options{
Capacity: o.DBCapacity,
}
storer, err := localstore.New(path, swarmAddress.Bytes(), lo, logger)
if err != nil {
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
// register localstore unreserve function on the batchstore before batch service starts listening to blockchain events
batchStore, err := batchstore.New(stateStore, nil)
if err != nil {
return nil, fmt.Errorf("batchstore: %w", err)
}
post := postage.NewService(stateStore, chainID.Int64())
var postageContractService postagecontract.Interface
if !o.Standalone {
postageContractAddress, priceOracleAddress, found := listener.DiscoverAddresses(chainID.Int64())
if o.PostageContractAddress != "" {
if !common.IsHexAddress(o.PostageContractAddress) {
return nil, errors.New("malformed postage stamp address")
}
postageContractAddress = common.HexToAddress(o.PostageContractAddress)
}
if o.PriceOracleAddress != "" {
if !common.IsHexAddress(o.PriceOracleAddress) {
return nil, errors.New("malformed price oracle address")
}
priceOracleAddress = common.HexToAddress(o.PriceOracleAddress)
}
if (o.PostageContractAddress == "" || o.PriceOracleAddress == "") && !found {
return nil, errors.New("no known postage stamp addresses for this network")
}
eventListener := listener.New(logger, swapBackend, postageContractAddress, priceOracleAddress)
b.listenerCloser = eventListener
_ = batchservice.New(batchStore, logger, eventListener)
erc20Address, err := postagecontract.LookupERC20Address(p2pCtx, transactionService, postageContractAddress)
if err != nil {
return nil, err
}
postageContractService = postagecontract.New(
overlayEthAddress,
postageContractAddress,
erc20Address,
transactionService,
post,
)
}
p2ps, err := libp2p.New(p2pCtx, signer, networkID, swarmAddress, addr, addressbook, stateStore, logger, tracer, libp2p.Options{
PrivateKey: libp2pPrivateKey,
NATAddr: o.NATAddr,
EnableWS: o.EnableWS,
EnableQUIC: o.EnableQUIC,
Standalone: o.Standalone,
WelcomeMessage: o.WelcomeMessage,
})
if err != nil {
return nil, fmt.Errorf("p2p service: %w", err)
}
b.p2pService = p2ps
if !o.Standalone {
if natManager := p2ps.NATManager(); natManager != nil {
// wait for nat manager to init
logger.Debug("initializing NAT manager")
select {
case <-natManager.Ready():
// this is magic sleep to give NAT time to sync the mappings
// this is a hack, kind of alchemy and should be improved
time.Sleep(3 * time.Second)
logger.Debug("NAT manager initialized")
case <-time.After(10 * time.Second):
logger.Warning("NAT manager init timeout")
}
}
}
// Construct protocols.
pingPong := pingpong.New(p2ps, logger, tracer)
if err = p2ps.AddProtocol(pingPong.Protocol()); err != nil {
return nil, fmt.Errorf("pingpong service: %w", err)
}
hive := hive.New(p2ps, addressbook, networkID, logger)
if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err)
}
var bootnodes []ma.Multiaddr
if o.Standalone {
logger.Info("Starting node in standalone mode, no p2p connections will be made or accepted")
} else {
for _, a := range o.Bootnodes {
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Warningf("invalid bootnode address %s", a)
continue
}
bootnodes = append(bootnodes, addr)
}
}
var settlement settlement.Interface
var swapService *swap.Service
if o.SwapEnable {
swapProtocol := swapprotocol.New(p2ps, logger, overlayEthAddress)
swapAddressBook := swap.NewAddressbook(stateStore)
swapService = swap.New(swapProtocol, logger, stateStore, chequebookService, chequeStore, swapAddressBook, networkID, cashoutService, p2ps)
swapProtocol.SetSwap(swapService)
if err = p2ps.AddProtocol(swapProtocol.Protocol()); err != nil {
return nil, fmt.Errorf("swap protocol: %w", err)
}
settlement = swapService
} else {
pseudosettleService := pseudosettle.New(p2ps, logger, stateStore)
if err = p2ps.AddProtocol(pseudosettleService.Protocol()); err != nil {
return nil, fmt.Errorf("pseudosettle service: %w", err)
}
settlement = pseudosettleService
}
paymentThreshold, ok := new(big.Int).SetString(o.PaymentThreshold, 10)
if !ok {
return nil, fmt.Errorf("invalid payment threshold: %s", paymentThreshold)
}
pricing := pricing.New(p2ps, logger, paymentThreshold)
if err = p2ps.AddProtocol(pricing.Protocol()); err != nil {
return nil, fmt.Errorf("pricing service: %w", err)
}
paymentTolerance, ok := new(big.Int).SetString(o.PaymentTolerance, 10)
if !ok {
return nil, fmt.Errorf("invalid payment tolerance: %s", paymentTolerance)
}
paymentEarly, ok := new(big.Int).SetString(o.PaymentEarly, 10)
if !ok {
return nil, fmt.Errorf("invalid payment early: %s", paymentEarly)
}
acc, err := accounting.NewAccounting(
paymentThreshold,
paymentTolerance,
paymentEarly,
logger,
stateStore,
settlement,
pricing,
)
if err != nil {
return nil, fmt.Errorf("accounting: %w", err)
}
settlement.SetNotifyPaymentFunc(acc.AsyncNotifyPayment)
pricing.SetPaymentThresholdObserver(acc)
kad := kademlia.New(swarmAddress, addressbook, hive, p2ps, logger, kademlia.Options{Bootnodes: bootnodes, Standalone: o.Standalone})
b.topologyCloser = kad
hive.SetAddPeersHandler(kad.AddPeers)
p2ps.SetNotifier(kad)
addrs, err := p2ps.Addresses()
if err != nil {
return nil, fmt.Errorf("get server addresses: %w", err)
}
for _, addr := range addrs {
logger.Debugf("p2p address: %s", addr)
}
retrieve := retrieval.New(swarmAddress, storer, p2ps, kad, logger, acc, accounting.NewFixedPricer(swarmAddress, 1000000000), tracer)
tagService := tags.NewTags(stateStore, logger)
b.tagsCloser = tagService
if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil {
return nil, fmt.Errorf("retrieval service: %w", err)
}
pssService := pss.New(pssPrivateKey, logger)
b.pssCloser = pssService
var ns storage.Storer
if o.GlobalPinningEnabled {
// create recovery callback for content repair
recoverFunc := recovery.NewCallback(pssService)
ns = netstore.New(storer, recoverFunc, retrieve, logger)
} else {
ns = netstore.New(storer, nil, retrieve, logger)
}
traversalService := traversal.NewService(ns)
pushSyncProtocol := pushsync.New(p2ps, storer, kad, tagService, pssService.TryUnwrap, postage.ValidStamp(batchStore), logger, acc, accounting.NewFixedPricer(swarmAddress, 1000000000), tracer)
// set the pushSyncer in the PSS
pssService.SetPushSyncer(pushSyncProtocol)
if err = p2ps.AddProtocol(pushSyncProtocol.Protocol()); err != nil {
return nil, fmt.Errorf("pushsync service: %w", err)
}
if o.GlobalPinningEnabled {
// register function for chunk repair upon receiving a trojan message
chunkRepairHandler := recovery.NewRepairHandler(ns, logger, pushSyncProtocol)
b.recoveryHandleCleanup = pssService.Register(recovery.Topic, chunkRepairHandler)
}
pushSyncPusher := pusher.New(storer, kad, pushSyncProtocol, tagService, logger, tracer)
b.pusherCloser = pushSyncPusher
pullStorage := pullstorage.New(storer)
pullSync := pullsync.New(p2ps, pullStorage, pssService.TryUnwrap, postage.ValidStamp(batchStore), logger)
b.pullSyncCloser = pullSync
if err = p2ps.AddProtocol(pullSync.Protocol()); err != nil {
return nil, fmt.Errorf("pullsync protocol: %w", err)
}
puller := puller.New(stateStore, kad, pullSync, logger, puller.Options{})
b.pullerCloser = puller
multiResolver := multiresolver.NewMultiResolver(
multiresolver.WithConnectionConfigs(o.ResolverConnectionCfgs),
multiresolver.WithLogger(o.Logger),
)
b.resolverCloser = multiResolver
var apiService api.Service
if o.APIAddr != "" {
// API server
feedFactory := factory.New(ns)
apiService = api.New(tagService, ns, multiResolver, pssService, traversalService, feedFactory, post, postageContractService, signer, logger, tracer, api.Options{
CORSAllowedOrigins: o.CORSAllowedOrigins,
GatewayMode: o.GatewayMode,
WsPingPeriod: 60 * time.Second,
})
apiListener, err := net.Listen("tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
apiServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: apiService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("api address: %s", apiListener.Addr())
if err := apiServer.Serve(apiListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("api server: %v", err)
logger.Error("unable to serve api")
}
}()
b.apiServer = apiServer
b.apiCloser = apiService
}
if o.DebugAPIAddr != "" {
// Debug API server
debugAPIService := debugapi.New(swarmAddress, publicKey, pssPrivateKey.PublicKey, overlayEthAddress, p2ps, pingPong, kad, storer, logger, tracer, tagService, acc, settlement, o.SwapEnable, swapService, chequebookService)
// register metrics from components
debugAPIService.MustRegisterMetrics(p2ps.Metrics()...)
debugAPIService.MustRegisterMetrics(pingPong.Metrics()...)
debugAPIService.MustRegisterMetrics(acc.Metrics()...)
debugAPIService.MustRegisterMetrics(storer.Metrics()...)
debugAPIService.MustRegisterMetrics(puller.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncProtocol.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncPusher.Metrics()...)
debugAPIService.MustRegisterMetrics(pullSync.Metrics()...)
debugAPIService.MustRegisterMetrics(retrieve.Metrics()...)
if pssServiceMetrics, ok := pssService.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(pssServiceMetrics.Metrics()...)
}
if apiService != nil {
debugAPIService.MustRegisterMetrics(apiService.Metrics()...)
}
if l, ok := logger.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
if l, ok := settlement.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
debugAPIListener, err := net.Listen("tcp", o.DebugAPIAddr)
if err != nil {
return nil, fmt.Errorf("debug api listener: %w", err)
}
debugAPIServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: debugAPIService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("debug api address: %s", debugAPIListener.Addr())
if err := debugAPIServer.Serve(debugAPIListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("debug api server: %v", err)
logger.Error("unable to serve debug api")
}
}()
b.debugAPIServer = debugAPIServer
}
if err := kad.Start(p2pCtx); err != nil {
return nil, err
}
return b, nil
}
func (b *Bee) Shutdown(ctx context.Context) error {
errs := new(multiError)
if b.apiCloser != nil {
if err := b.apiCloser.Close(); err != nil {
errs.add(fmt.Errorf("api: %w", err))
}
}
var eg errgroup.Group
if b.apiServer != nil {
eg.Go(func() error {
if err := b.apiServer.Shutdown(ctx); err != nil {
return fmt.Errorf("api server: %w", err)
}
return nil
})
}
if b.debugAPIServer != nil {
eg.Go(func() error {
if err := b.debugAPIServer.Shutdown(ctx); err != nil {
return fmt.Errorf("debug api server: %w", err)
}
return nil
})
}
if err := eg.Wait(); err != nil {
errs.add(err)
}
if b.recoveryHandleCleanup != nil {
b.recoveryHandleCleanup()
}
if err := b.pusherCloser.Close(); err != nil {
errs.add(fmt.Errorf("pusher: %w", err))
}
if err := b.pullerCloser.Close(); err != nil {
errs.add(fmt.Errorf("puller: %w", err))
}
if err := b.pullSyncCloser.Close(); err != nil {
errs.add(fmt.Errorf("pull sync: %w", err))
}
if err := b.pssCloser.Close(); err != nil {
errs.add(fmt.Errorf("pss: %w", err))
}
b.p2pCancel()
if err := b.p2pService.Close(); err != nil {
errs.add(fmt.Errorf("p2p server: %w", err))
}
if err := b.tracerCloser.Close(); err != nil {
errs.add(fmt.Errorf("tracer: %w", err))
}
if err := b.tagsCloser.Close(); err != nil {
errs.add(fmt.Errorf("tag persistence: %w", err))
}
if b.listenerCloser != nil {
if err := b.listenerCloser.Close(); err != nil {
errs.add(fmt.Errorf("error listener: %w", err))
}
}
if err := b.stateStoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("statestore: %w", err))
}
if err := b.localstoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("localstore: %w", err))
}
if err := b.topologyCloser.Close(); err != nil {
errs.add(fmt.Errorf("topology driver: %w", err))
}
if err := b.errorLogWriter.Close(); err != nil {
errs.add(fmt.Errorf("error log writer: %w", err))
}
// Shutdown the resolver service only if it has been initialized.
if b.resolverCloser != nil {
if err := b.resolverCloser.Close(); err != nil {
errs.add(fmt.Errorf("resolver service: %w", err))
}
}
if errs.hasErrors() {
return errs
}
return nil
}
type multiError struct {
errors []error
}
func (e *multiError) Error() string {
if len(e.errors) == 0 {
return ""
}
s := e.errors[0].Error()
for _, err := range e.errors[1:] {
s += "; " + err.Error()
}
return s
}
func (e *multiError) add(err error) {
e.errors = append(e.errors, err)
}
func (e *multiError) hasErrors() bool {
return len(e.errors) > 0
}
| 1 | 14,095 | there needs to be a change here. setting up the batchstore with localstore.Unreserve hook | ethersphere-bee | go |
@@ -52,9 +52,15 @@ public final class ConfigUtil {
private static final String MICROSERVICE_CONFIG_LOADER_KEY = "cse-microservice-config-loader";
+ private static ConfigModel model = new ConfigModel();
+
private ConfigUtil() {
}
+ public static void setConfigs(Map<String, Object> config) {
+ model.setConfig(config);
+ }
+
public static Object getProperty(String key) {
Object config = DynamicPropertyFactory.getBackingConfigurationSource();
return getProperty(config, key); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.config;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_CSE_PREFIX;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_KEY_SPLITER;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_SERVICECOMB_PREFIX;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.EnvironmentConfiguration;
import org.apache.commons.configuration.SystemConfiguration;
import org.apache.servicecomb.config.archaius.scheduler.NeverStartPollingScheduler;
import org.apache.servicecomb.config.archaius.sources.ConfigModel;
import org.apache.servicecomb.config.archaius.sources.MicroserviceConfigLoader;
import org.apache.servicecomb.config.archaius.sources.MicroserviceConfigurationSource;
import org.apache.servicecomb.config.spi.ConfigCenterConfigurationSource;
import org.apache.servicecomb.foundation.common.utils.SPIServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.ConcurrentCompositeConfiguration;
import com.netflix.config.ConcurrentMapConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicConfiguration;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicWatchedConfiguration;
import com.netflix.config.WatchedUpdateListener;
import com.netflix.config.WatchedUpdateResult;
public final class ConfigUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(ConfigUtil.class);
private static final String MICROSERVICE_CONFIG_LOADER_KEY = "cse-microservice-config-loader";
private ConfigUtil() {
}
public static Object getProperty(String key) {
Object config = DynamicPropertyFactory.getBackingConfigurationSource();
return getProperty(config, key);
}
public static Object getProperty(Object config, String key) {
if (null != config && Configuration.class.isInstance(config)) {
Configuration configuration = (Configuration) config;
return configuration.getProperty(key);
}
return null;
}
private static void setMicroserviceConfigLoader(Configuration config, MicroserviceConfigLoader loader) {
config.setProperty(MICROSERVICE_CONFIG_LOADER_KEY, loader);
}
public static MicroserviceConfigLoader getMicroserviceConfigLoader() {
return (MicroserviceConfigLoader) getProperty(MICROSERVICE_CONFIG_LOADER_KEY);
}
public static MicroserviceConfigLoader getMicroserviceConfigLoader(Configuration config) {
return (MicroserviceConfigLoader) getProperty(config, MICROSERVICE_CONFIG_LOADER_KEY);
}
public static ConcurrentCompositeConfiguration createLocalConfig() {
MicroserviceConfigLoader loader = new MicroserviceConfigLoader();
loader.loadAndSort();
LOGGER.info("create local config:");
for (ConfigModel configModel : loader.getConfigModels()) {
LOGGER.info(" {}.", configModel.getUrl());
}
ConcurrentCompositeConfiguration config = ConfigUtil.createLocalConfig(loader.getConfigModels());
ConfigUtil.setMicroserviceConfigLoader(config, loader);
return config;
}
public static ConcurrentCompositeConfiguration createLocalConfig(List<ConfigModel> configModelList) {
ConcurrentCompositeConfiguration config = new ConcurrentCompositeConfiguration();
duplicateServiceCombConfigToCse(config,
new ConcurrentMapConfiguration(new SystemConfiguration()),
"configFromSystem");
duplicateServiceCombConfigToCse(config,
convertEnvVariable(new ConcurrentMapConfiguration(new EnvironmentConfiguration())),
"configFromEnvironment");
duplicateServiceCombConfigToCse(config,
new DynamicConfiguration(
new MicroserviceConfigurationSource(configModelList), new NeverStartPollingScheduler()),
"configFromYamlFile");
return config;
}
public static AbstractConfiguration convertEnvVariable(AbstractConfiguration source) {
Iterator<String> keys = source.getKeys();
while (keys.hasNext()) {
String key = keys.next();
String[] separatedKey = key.split(CONFIG_KEY_SPLITER);
if (separatedKey.length == 1) {
continue;
}
String newKey = String.join(".", separatedKey);
source.addProperty(newKey, source.getProperty(key));
}
return source;
}
//inject a copy of cse.xxx for servicecomb.xxx
private static void duplicateServiceCombConfigToCse(AbstractConfiguration source) {
Iterator<String> keys = source.getKeys();
while (keys.hasNext()) {
String key = keys.next();
if (!key.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
continue;
}
String cseKey = CONFIG_CSE_PREFIX + key.substring(key.indexOf(".") + 1);
source.addProperty(cseKey, source.getProperty(key));
}
}
private static void duplicateServiceCombConfigToCse(ConcurrentCompositeConfiguration compositeConfiguration,
AbstractConfiguration source,
String sourceName) {
duplicateServiceCombConfigToCse(source);
compositeConfiguration.addConfiguration(source, sourceName);
}
public static DynamicWatchedConfiguration createConfigFromConfigCenter(Configuration localConfiguration) {
ConfigCenterConfigurationSource configCenterConfigurationSource =
SPIServiceUtils.getTargetService(ConfigCenterConfigurationSource.class);
if (null == configCenterConfigurationSource) {
LOGGER.info(
"config center SPI service can not find, skip to load configuration from config center");
return null;
}
if (!configCenterConfigurationSource.isValidSource(localConfiguration)) {
LOGGER.info("Config Source serverUri is not correctly configured.");
return null;
}
configCenterConfigurationSource.init(localConfiguration);
return new DynamicWatchedConfiguration(configCenterConfigurationSource);
}
public static AbstractConfiguration createDynamicConfig() {
LOGGER.info("create dynamic config:");
ConcurrentCompositeConfiguration config = ConfigUtil.createLocalConfig();
DynamicWatchedConfiguration configFromConfigCenter = createConfigFromConfigCenter(config);
if (configFromConfigCenter != null) {
ConcurrentMapConfiguration injectConfig = new ConcurrentMapConfiguration();
config.addConfigurationAtFront(injectConfig, "extraInjectConfig");
duplicateServiceCombConfigToCse(configFromConfigCenter);
config.addConfigurationAtFront(configFromConfigCenter, "configCenterConfig");
configFromConfigCenter.getSource().addUpdateListener(new ServiceCombPropertyUpdateListener(injectConfig));
}
return config;
}
public static void installDynamicConfig() {
if (ConfigurationManager.isConfigurationInstalled()) {
LOGGER.warn("Configuration installed by others, will ignore this configuration.");
return;
}
AbstractConfiguration dynamicConfig = ConfigUtil.createDynamicConfig();
ConfigurationManager.install(dynamicConfig);
}
private static class ServiceCombPropertyUpdateListener implements WatchedUpdateListener {
private final ConcurrentMapConfiguration injectConfig;
ServiceCombPropertyUpdateListener(ConcurrentMapConfiguration injectConfig) {
this.injectConfig = injectConfig;
}
@Override
public void updateConfiguration(WatchedUpdateResult watchedUpdateResult) {
Map<String, Object> adds = watchedUpdateResult.getAdded();
if (adds != null) {
for (String add : adds.keySet()) {
if (add.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
String key = CONFIG_CSE_PREFIX + add.substring(add.indexOf(".") + 1);
injectConfig.addProperty(key, adds.get(add));
}
}
}
Map<String, Object> deletes = watchedUpdateResult.getDeleted();
if (deletes != null) {
for (String delete : deletes.keySet()) {
if (delete.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
injectConfig.clearProperty(CONFIG_CSE_PREFIX + delete.substring(delete.indexOf(".") + 1));
}
}
}
Map<String, Object> changes = watchedUpdateResult.getChanged();
if (changes != null) {
for (String change : changes.keySet()) {
if (change.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
String key = CONFIG_CSE_PREFIX + change.substring(change.indexOf(".") + 1);
injectConfig.setProperty(key, changes.get(change));
}
}
}
}
}
}
| 1 | 9,200 | I think if we can provide a more convenient method to add configs . e.g. public static void addConfig(String k, Object v) | apache-servicecomb-java-chassis | java |
@@ -266,7 +266,7 @@ func TestActPool_AddActs(t *testing.T) {
err = ap.AddTsf(overBalTsf)
require.Equal(ErrBalance, errors.Cause(err))
// Case VI: over gas limit
- creationExecution, err := action.NewExecution(addr1.RawAddress, action.EmptyAddress, uint64(5), big.NewInt(int64(0)), blockchain.GasLimit+100, big.NewInt(10), []byte{})
+ creationExecution, err := action.NewExecution(addr1.RawAddress, action.EmptyAddress, uint64(5), big.NewInt(int64(0)), action.GasLimit+100, big.NewInt(10), []byte{})
require.NoError(err)
err = ap.AddExecution(creationExecution)
require.Equal(ErrGasHigherThanLimit, errors.Cause(err)) | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package actpool
import (
"context"
"fmt"
"math/big"
"strings"
"testing"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/action"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/proto"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/testutil"
)
const (
pubkeyA = "2c9ccbeb9ee91271f7e5c2103753be9c9edff847e1a51227df6a6b0765f31a4b424e84027b44a663950f013a88b8fd8cdc53b1eda1d4b73f9d9dc12546c8c87d68ff1435a0f8a006"
prikeyA = "b5affb30846a00ef5aa39b57f913d70cd8cf6badd587239863cb67feacf6b9f30c34e800"
pubkeyB = "881504d84a0659e14dcba59f24a98e71cda55b139615342668840c64678f1514941bbd053c7492fb9b719e6050cfa972efa491b79e11a1713824dda5f638fc0d9fa1b68be3c0f905"
prikeyB = "b89c1ec0fb5b192c8bb8f6fcf9a871e4a67ef462f40d2b8ff426da1d1eaedd9696dc9d00"
pubkeyC = "252fc7bc9a993b68dd7b13a00213c9cf4befe80da49940c52220f93c7147771ba2d783045cf0fbf2a86b32a62848befb96c0f38c0487a5ccc806ff28bb06d9faf803b93dda107003"
prikeyC = "3e05de562a27fb6e25ac23ff8bcaa1ada0c253fa8ff7c6d15308f65d06b6990f64ee9601"
pubkeyD = "29aa28cc21c3ee3cc658d3a322997ceb8d5d352f45d052192d3ab57cd196d3375af558067f5a2cfe5fc65d5249cc07f991bab683468382a3acaa4c8b7af35156b46aeda00620f307"
prikeyD = "d4b7b441382751d9a1955152b46a69f3c9f9559c6205757af928f5181ff207060d0dab00"
pubkeyE = "64dc2d5f445a78b884527252a3dba1f72f52251c97ec213dda99868882024d4d1442f100c8f1f833d0c687871a959ee97665dea24de1a627cce6c970d9db5859da9e4295bb602e04"
prikeyE = "53a827f7c5b4b4040b22ae9b12fcaa234e8362fa022480f50b8643981806ed67c7f77a00"
)
const (
maxNumActsPerPool = 8192
maxNumActsPerAcct = 256
)
var (
addr1 = testutil.ConstructAddress(pubkeyA, prikeyA)
addr2 = testutil.ConstructAddress(pubkeyB, prikeyB)
addr3 = testutil.ConstructAddress(pubkeyC, prikeyC)
addr4 = testutil.ConstructAddress(pubkeyD, prikeyD)
addr5 = testutil.ConstructAddress(pubkeyE, prikeyE)
)
func TestActPool_validateTsf(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
// Case I: Coinbase transfer
coinbaseTsf := action.NewCoinBaseTransfer(big.NewInt(1), "1")
err = ap.validateTsf(coinbaseTsf)
require.Equal(ErrTransfer, errors.Cause(err))
// Case II: Oversized data
tmpPayload := [32769]byte{}
payload := tmpPayload[:]
tsf, err := action.NewTransfer(uint64(1), big.NewInt(1), "1", "2", payload, uint64(0), big.NewInt(0))
require.NoError(err)
err = ap.validateTsf(tsf)
require.Equal(ErrActPool, errors.Cause(err))
// Case III: Negative amount
tsf, err = action.NewTransfer(uint64(1), big.NewInt(-100), "1", "2", nil, uint64(0), big.NewInt(0))
require.NoError(err)
err = ap.validateTsf(tsf)
require.Equal(ErrBalance, errors.Cause(err))
// Case IV: Invalid address
tsf, err = action.NewTransfer(
1,
big.NewInt(1),
addr1.RawAddress,
"io1qyqsyqcyq5narhapakcsrhksfajfcpl24us3xp38zwvsep",
nil, uint64(0),
big.NewInt(0),
)
require.NoError(err)
err = ap.validateTsf(tsf)
require.Error(err)
require.True(strings.Contains(err.Error(), "error when validating recipient's address"))
// Case V: Signature verification fails
unsignedTsf, err := action.NewTransfer(uint64(1), big.NewInt(1), addr1.RawAddress, addr1.RawAddress, []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
err = ap.validateTsf(unsignedTsf)
require.Equal(action.ErrAction, errors.Cause(err))
// Case VI: Nonce is too low
prevTsf, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(prevTsf)
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, []*action.Transfer{prevTsf}, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
ap.Reset()
nTsf, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(60), []byte{}, uint64(100000), big.NewInt(10))
err = ap.validateTsf(nTsf)
require.Equal(ErrNonce, errors.Cause(err))
}
func TestActPool_validateVote(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
// Case I: Invalid address
vote, err := action.NewVote(1, addr1.RawAddress, "123", 0, big.NewInt(0))
require.NoError(err)
vote.SetVoterPublicKey(addr1.PublicKey)
err = ap.validateVote(vote)
require.Error(err)
require.True(strings.Contains(err.Error(), "error when validating votee's address"))
// Case II: Signature verification fails
unsignedVote, err := action.NewVote(1, addr1.RawAddress, addr2.RawAddress, uint64(100000), big.NewInt(10))
require.NoError(err)
unsignedVote.SetVoterPublicKey(addr1.PublicKey)
require.NoError(err)
err = ap.validateVote(unsignedVote)
require.Equal(action.ErrAction, errors.Cause(err))
// Case III: Nonce is too low
prevTsf, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(prevTsf)
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, []*action.Transfer{prevTsf}, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
ap.Reset()
nVote, _ := signedVote(addr1, addr1, uint64(1), uint64(100000), big.NewInt(10))
err = ap.validateVote(nVote)
require.Equal(ErrNonce, errors.Cause(err))
// Case IV: Votee is not a candidate
vote2, _ := signedVote(addr1, addr2, uint64(2), uint64(100000), big.NewInt(10))
err = ap.validateVote(vote2)
require.Equal(ErrVotee, errors.Cause(err))
}
func TestActPool_AddActs(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(10))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
// Test actpool status after adding a sequence of Tsfs/votes: need to check confirmed nonce, pending nonce, and pending balance
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
tsf5, _ := signedTransfer(addr1, addr1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf6, _ := signedTransfer(addr2, addr2, uint64(1), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
tsf7, _ := signedTransfer(addr2, addr2, uint64(3), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
tsf8, _ := signedTransfer(addr2, addr2, uint64(4), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
err = ap.AddTsf(tsf5)
require.Equal(ErrBalance, errors.Cause(err))
err = ap.AddTsf(tsf6)
require.NoError(err)
err = ap.AddTsf(tsf7)
require.NoError(err)
err = ap.AddTsf(tsf8)
require.NoError(err)
pBalance1, _ := ap.getPendingBalance(addr1.RawAddress)
require.Equal(uint64(40), pBalance1.Uint64())
pNonce1, _ := ap.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), pNonce1)
pBalance2, _ := ap.getPendingBalance(addr2.RawAddress)
require.Equal(uint64(5), pBalance2.Uint64())
pNonce2, _ := ap.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(2), pNonce2)
tsf9, _ := signedTransfer(addr2, addr2, uint64(2), big.NewInt(3), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf9)
require.NoError(err)
pBalance2, _ = ap.getPendingBalance(addr2.RawAddress)
require.Equal(uint64(1), pBalance2.Uint64())
pNonce2, _ = ap.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(4), pNonce2)
// Error Case Handling
// Case I: Action already exists in pool
err = ap.AddTsf(tsf1)
require.Equal(fmt.Errorf("existed transfer: %x", tsf1.Hash()), err)
err = ap.AddVote(vote4)
require.Equal(fmt.Errorf("existed vote: %x", vote4.Hash()), err)
// Case II: Pool space is full
mockBC := mock_blockchain.NewMockBlockchain(ctrl)
Ap2, err := NewActPool(mockBC, apConfig)
require.NoError(err)
ap2, ok := Ap2.(*actPool)
require.True(ok)
for i := uint64(0); i < ap2.cfg.MaxNumActsPerPool; i++ {
nTsf, err := action.NewTransfer(
i, big.NewInt(int64(i)), "1", "2", nil, uint64(0), big.NewInt(0))
require.NoError(err)
nAction := nTsf.ConvertToActionPb()
ap2.allActions[nTsf.Hash()] = nAction
}
mockBC.EXPECT().Nonce(gomock.Any()).Times(2).Return(uint64(0), nil)
mockBC.EXPECT().StateByAddr(gomock.Any()).Times(1).Return(nil, nil)
err = ap2.AddTsf(tsf1)
require.Equal(ErrActPool, errors.Cause(err))
err = ap2.AddVote(vote4)
require.Equal(ErrActPool, errors.Cause(err))
// Case III: Nonce already exists
replaceTsf, _ := signedTransfer(addr1, addr2, uint64(1), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(replaceTsf)
require.Equal(ErrNonce, errors.Cause(err))
replaceVote, err := action.NewVote(4, addr1.RawAddress, "", uint64(100000), big.NewInt(10))
require.NoError(err)
require.NoError(action.Sign(replaceVote, addr1))
err = ap.AddVote(replaceVote)
require.Equal(ErrNonce, errors.Cause(err))
// Case IV: Nonce is too large
outOfBoundsTsf, _ := signedTransfer(addr1, addr1, ap.cfg.MaxNumActsPerAcct+1, big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(outOfBoundsTsf)
require.Equal(ErrNonce, errors.Cause(err))
// Case V: Insufficient balance
overBalTsf, _ := signedTransfer(addr2, addr2, uint64(4), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(overBalTsf)
require.Equal(ErrBalance, errors.Cause(err))
// Case VI: over gas limit
creationExecution, err := action.NewExecution(addr1.RawAddress, action.EmptyAddress, uint64(5), big.NewInt(int64(0)), blockchain.GasLimit+100, big.NewInt(10), []byte{})
require.NoError(err)
err = ap.AddExecution(creationExecution)
require.Equal(ErrGasHigherThanLimit, errors.Cause(err))
// Case VII: insufficient gas
tmpData := [1234]byte{}
creationExecution, err = action.NewExecution(
addr1.RawAddress,
action.EmptyAddress,
uint64(5),
big.NewInt(int64(0)),
10,
big.NewInt(10),
tmpData[:],
)
require.NoError(err)
err = ap.AddExecution(creationExecution)
require.Equal(ErrInsufficientGas, errors.Cause(err))
}
func TestActPool_PickActs(t *testing.T) {
createActPool := func(cfg config.ActPool) (*actPool, []*action.Transfer, []*action.Vote, []*action.Execution) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(10))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
Ap, err := NewActPool(bc, cfg)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
tsf4, _ := signedTransfer(addr1, addr1, uint64(4), big.NewInt(40), []byte{}, uint64(100000), big.NewInt(10))
tsf5, _ := signedTransfer(addr1, addr1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
vote6, _ := signedVote(addr1, addr1, uint64(6), uint64(100000), big.NewInt(10))
vote7, _ := signedVote(addr2, addr2, uint64(1), uint64(100000), big.NewInt(10))
tsf8, _ := signedTransfer(addr2, addr2, uint64(3), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
tsf9, _ := signedTransfer(addr2, addr2, uint64(4), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
tsf10, _ := signedTransfer(addr2, addr2, uint64(5), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddTsf(tsf4)
require.NoError(err)
err = ap.AddTsf(tsf5)
require.Equal(ErrBalance, errors.Cause(err))
err = ap.AddVote(vote6)
require.NoError(err)
err = ap.AddVote(vote7)
require.NoError(err)
err = ap.AddTsf(tsf8)
require.NoError(err)
err = ap.AddTsf(tsf9)
require.NoError(err)
err = ap.AddTsf(tsf10)
require.NoError(err)
return ap, []*action.Transfer{tsf1, tsf2, tsf3, tsf4}, []*action.Vote{vote7}, []*action.Execution{}
}
t.Run("no-limit", func(t *testing.T) {
apConfig := getActPoolCfg()
ap, transfers, votes, executions := createActPool(apConfig)
pickedTsfs, pickedVotes, pickedExecutions := ap.PickActs()
require.Equal(t, transfers, pickedTsfs)
require.Equal(t, votes, pickedVotes)
require.Equal(t, executions, pickedExecutions)
})
t.Run("enough-limit", func(t *testing.T) {
apConfig := getActPoolCfg()
apConfig.MaxNumActsToPick = 10
ap, transfers, votes, executions := createActPool(apConfig)
pickedTsfs, pickedVotes, pickedExecutions := ap.PickActs()
require.Equal(t, transfers, pickedTsfs)
require.Equal(t, votes, pickedVotes)
require.Equal(t, executions, pickedExecutions)
})
t.Run("low-limit", func(t *testing.T) {
apConfig := getActPoolCfg()
apConfig.MaxNumActsToPick = 3
ap, _, _, _ := createActPool(apConfig)
pickedTsfs, pickedVotes, pickedExecutions := ap.PickActs()
require.Equal(t, 3, len(pickedTsfs)+len(pickedVotes)+len(pickedExecutions))
})
}
func TestActPool_removeConfirmedActs(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
require.Equal(4, len(ap.allActions))
require.NotNil(ap.accountActs[addr1.RawAddress])
_, err = bc.GetFactory().RunActions(0, []*action.Transfer{tsf1, tsf2, tsf3}, []*action.Vote{vote4}, []*action.Execution{})
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
ap.removeConfirmedActs()
require.Equal(0, len(ap.allActions))
require.Nil(ap.accountActs[addr1.RawAddress])
}
func TestActPool_Reset(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(200))
require.NoError(err)
_, err = bc.CreateState(addr3.RawAddress, uint64(300))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
apConfig := getActPoolCfg()
Ap1, err := NewActPool(bc, apConfig)
require.NoError(err)
ap1, ok := Ap1.(*actPool)
require.True(ok)
Ap2, err := NewActPool(bc, apConfig)
require.NoError(err)
ap2, ok := Ap2.(*actPool)
require.True(ok)
// Tsfs to be added to ap1
tsf1, _ := signedTransfer(addr1, addr2, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr3, uint64(2), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr2, uint64(3), big.NewInt(60), []byte{}, uint64(100000), big.NewInt(10))
tsf4, _ := signedTransfer(addr2, addr1, uint64(1), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf5, _ := signedTransfer(addr2, addr3, uint64(2), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf6, _ := signedTransfer(addr2, addr1, uint64(3), big.NewInt(60), []byte{}, uint64(100000), big.NewInt(10))
tsf7, _ := signedTransfer(addr3, addr1, uint64(1), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf8, _ := signedTransfer(addr3, addr2, uint64(2), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf9, _ := signedTransfer(addr3, addr1, uint64(4), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
err = ap1.AddTsf(tsf1)
require.NoError(err)
err = ap1.AddTsf(tsf2)
require.NoError(err)
err = ap1.AddTsf(tsf3)
require.Equal(ErrBalance, errors.Cause(err))
err = ap1.AddTsf(tsf4)
require.NoError(err)
err = ap1.AddTsf(tsf5)
require.NoError(err)
err = ap1.AddTsf(tsf6)
require.Equal(ErrBalance, errors.Cause(err))
err = ap1.AddTsf(tsf7)
require.NoError(err)
err = ap1.AddTsf(tsf8)
require.NoError(err)
err = ap1.AddTsf(tsf9)
require.NoError(err)
// Tsfs to be added to ap2 only
tsf10, _ := signedTransfer(addr1, addr2, uint64(3), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf11, _ := signedTransfer(addr1, addr3, uint64(4), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf12, _ := signedTransfer(addr2, addr3, uint64(2), big.NewInt(70), []byte{}, uint64(100000), big.NewInt(10))
tsf13, _ := signedTransfer(addr3, addr1, uint64(1), big.NewInt(200), []byte{}, uint64(100000), big.NewInt(10))
tsf14, _ := signedTransfer(addr3, addr2, uint64(2), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
err = ap2.AddTsf(tsf1)
require.NoError(err)
err = ap2.AddTsf(tsf2)
require.NoError(err)
err = ap2.AddTsf(tsf10)
require.NoError(err)
err = ap2.AddTsf(tsf11)
require.Equal(ErrBalance, errors.Cause(err))
err = ap2.AddTsf(tsf4)
require.NoError(err)
err = ap2.AddTsf(tsf12)
require.NoError(err)
err = ap2.AddTsf(tsf13)
require.NoError(err)
err = ap2.AddTsf(tsf14)
require.NoError(err)
err = ap2.AddTsf(tsf9)
require.Equal(ErrBalance, errors.Cause(err))
// Check confirmed nonce, pending nonce, and pending balance after adding Tsfs above for each account
// ap1
// Addr1
ap1PNonce1, _ := ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ := ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(20).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ := ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ := ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(50).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ := ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap1PNonce3)
ap1PBalance3, _ := ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(100).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ := ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(4), ap2PNonce1)
ap2PBalance1, _ := ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(0).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ := ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap2PNonce2)
ap2PBalance2, _ := ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(30).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ := ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ := ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(50).Uint64(), ap2PBalance3.Uint64())
// Let ap1 be BP's actpool
pickedTsfs, pickedVotes, pickedExecutions := ap1.PickActs()
// ap1 commits update of accounts to trie
_, err = bc.GetFactory().RunActions(0, pickedTsfs, pickedVotes, pickedExecutions)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
//Reset
ap1.Reset()
ap2.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(220).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(4), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance3.Uint64())
// Add more Tsfs after resetting
// Tsfs To be added to ap1 only
tsf15, _ := signedTransfer(addr3, addr2, uint64(3), big.NewInt(80), []byte{}, uint64(100000), big.NewInt(10))
// Tsfs To be added to ap2 only
tsf16, _ := signedTransfer(addr1, addr2, uint64(4), big.NewInt(150), []byte{}, uint64(100000), big.NewInt(10))
tsf17, _ := signedTransfer(addr2, addr1, uint64(3), big.NewInt(90), []byte{}, uint64(100000), big.NewInt(10))
tsf18, _ := signedTransfer(addr2, addr3, uint64(4), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf19, _ := signedTransfer(addr2, addr1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf20, _ := signedTransfer(addr3, addr2, uint64(3), big.NewInt(200), []byte{}, uint64(100000), big.NewInt(10))
err = ap1.AddTsf(tsf15)
require.NoError(err)
err = ap2.AddTsf(tsf16)
require.NoError(err)
err = ap2.AddTsf(tsf17)
require.NoError(err)
err = ap2.AddTsf(tsf18)
require.NoError(err)
err = ap2.AddTsf(tsf19)
require.Equal(ErrBalance, errors.Cause(err))
err = ap2.AddTsf(tsf20)
require.Equal(ErrBalance, errors.Cause(err))
// Check confirmed nonce, pending nonce, and pending balance after adding Tsfs above for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(220).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(5), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(50).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(5), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(10).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance3.Uint64())
// Let ap2 be BP's actpool
pickedTsfs, pickedVotes, pickedExecutions = ap2.PickActs()
// ap2 commits update of accounts to trie
_, err = bc.GetFactory().RunActions(0, pickedTsfs, pickedVotes, pickedExecutions)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
//Reset
ap1.Reset()
ap2.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(140).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(5), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(5), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(100).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(140).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(5), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(280).Uint64(), ap2PBalance3.Uint64())
// Add two more players
_, err = bc.CreateState(addr4.RawAddress, uint64(10))
require.NoError(err)
_, err = bc.CreateState(addr5.RawAddress, uint64(20))
require.NoError(err)
_, err = bc.GetFactory().RunActions(1, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
tsf21, _ := signedTransfer(addr4, addr5, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
vote22, _ := signedVote(addr4, addr4, uint64(2), uint64(100000), big.NewInt(10))
vote23, _ := action.NewVote(3, addr4.RawAddress, "", uint64(100000), big.NewInt(10))
_ = action.Sign(vote23, addr4)
vote24, _ := signedVote(addr5, addr5, uint64(1), uint64(100000), big.NewInt(10))
tsf25, _ := signedTransfer(addr5, addr4, uint64(2), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
vote26, _ := action.NewVote(3, addr5.RawAddress, "", uint64(100000), big.NewInt(10))
_ = action.Sign(vote26, addr5)
err = ap1.AddTsf(tsf21)
require.NoError(err)
err = ap1.AddVote(vote22)
require.NoError(err)
err = ap1.AddVote(vote23)
require.NoError(err)
err = ap1.AddVote(vote24)
require.NoError(err)
err = ap1.AddTsf(tsf25)
require.NoError(err)
err = ap1.AddVote(vote26)
require.NoError(err)
// Check confirmed nonce, pending nonce, and pending balance after adding actions above for account4 and account5
// ap1
// Addr4
ap1PNonce4, _ := ap1.getPendingNonce(addr4.RawAddress)
require.Equal(uint64(4), ap1PNonce4)
ap1PBalance4, _ := ap1.getPendingBalance(addr4.RawAddress)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance4.Uint64())
// Addr5
ap1PNonce5, _ := ap1.getPendingNonce(addr5.RawAddress)
require.Equal(uint64(4), ap1PNonce5)
ap1PBalance5, _ := ap1.getPendingBalance(addr5.RawAddress)
require.Equal(big.NewInt(10).Uint64(), ap1PBalance5.Uint64())
// Let ap1 be BP's actpool
pickedTsfs, pickedVotes, pickedExecutions = ap1.PickActs()
// ap1 commits update of accounts to trie
_, err = bc.GetFactory().RunActions(0, pickedTsfs, pickedVotes, pickedExecutions)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
//Reset
ap1.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr4
ap1PNonce4, _ = ap1.getPendingNonce(addr4.RawAddress)
require.Equal(uint64(4), ap1PNonce4)
ap1PBalance4, _ = ap1.getPendingBalance(addr4.RawAddress)
require.Equal(big.NewInt(10).Uint64(), ap1PBalance4.Uint64())
// Addr5
ap1PNonce5, _ = ap1.getPendingNonce(addr5.RawAddress)
require.Equal(uint64(4), ap1PNonce5)
ap1PBalance5, _ = ap1.getPendingBalance(addr5.RawAddress)
require.Equal(big.NewInt(20).Uint64(), ap1PBalance5.Uint64())
}
func TestActPool_removeInvalidActs(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
hash1 := tsf1.Hash()
action1 := tsf1.ConvertToActionPb()
hash2 := vote4.Hash()
action2 := vote4.ConvertToActionPb()
acts := []*iproto.ActionPb{action1, action2}
require.NotNil(ap.allActions[hash1])
require.NotNil(ap.allActions[hash2])
ap.removeInvalidActs(acts)
require.Nil(ap.allActions[hash1])
require.Nil(ap.allActions[hash2])
}
func TestActPool_GetPendingNonce(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
nonce, err := ap.GetPendingNonce(addr2.RawAddress)
require.NoError(err)
require.Equal(uint64(1), nonce)
nonce, err = ap.GetPendingNonce(addr1.RawAddress)
require.NoError(err)
require.Equal(uint64(2), nonce)
}
func TestActPool_GetUnconfirmedActs(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
act1 := tsf1.ConvertToActionPb()
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
act3 := tsf3.ConvertToActionPb()
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
act4 := vote4.ConvertToActionPb()
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
acts := ap.GetUnconfirmedActs(addr2.RawAddress)
require.Equal([]*iproto.ActionPb{}, acts)
acts = ap.GetUnconfirmedActs(addr1.RawAddress)
require.Equal([]*iproto.ActionPb{act1, act3, act4}, acts)
}
func TestActPool_GetActionByHash(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
hash1 := tsf1.Hash()
act1 := tsf1.ConvertToActionPb()
vote2, _ := signedVote(addr1, addr1, uint64(2), uint64(100000), big.NewInt(10))
hash2 := vote2.Hash()
act2 := vote2.ConvertToActionPb()
ap.allActions[hash1] = act1
act, err := ap.GetActionByHash(hash1)
require.NoError(err)
require.Equal(act1, act)
act, err = ap.GetActionByHash(hash2)
require.Equal(ErrHash, errors.Cause(err))
require.Nil(act)
ap.allActions[hash2] = act2
act, err = ap.GetActionByHash(hash2)
require.NoError(err)
require.Equal(act2, act)
}
func TestActPool_GetCapacity(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
require.Equal(uint64(maxNumActsPerPool), ap.GetCapacity())
}
func TestActPool_GetSize(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
require.NoError(bc.Start(context.Background()))
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.GetFactory().RunActions(0, nil, nil, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
require.Zero(ap.GetSize())
tsf1, err := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
tsf2, err := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
tsf3, err := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
vote4, err := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
require.NoError(err)
require.NoError(ap.AddTsf(tsf1))
require.NoError(ap.AddTsf(tsf2))
require.NoError(ap.AddTsf(tsf3))
require.NoError(ap.AddVote(vote4))
require.Equal(uint64(4), ap.GetSize())
_, err = bc.GetFactory().RunActions(0, []*action.Transfer{tsf1, tsf2, tsf3}, []*action.Vote{vote4}, nil)
require.NoError(err)
require.Nil(bc.GetFactory().Commit())
ap.removeConfirmedActs()
require.Equal(uint64(0), ap.GetSize())
}
// Helper function to return the correct pending nonce just in case of empty queue
func (ap *actPool) getPendingNonce(addr string) (uint64, error) {
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingNonce(), nil
}
committedNonce, err := ap.bc.Nonce(addr)
pendingNonce := committedNonce + 1
return pendingNonce, err
}
// Helper function to return the correct pending balance just in case of empty queue
func (ap *actPool) getPendingBalance(addr string) (*big.Int, error) {
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingBalance(), nil
}
return ap.bc.Balance(addr)
}
// Helper function to return a signed transfer
func signedTransfer(sender *iotxaddress.Address, recipient *iotxaddress.Address, nonce uint64, amount *big.Int, payload []byte, gasLimit uint64, gasPrice *big.Int) (*action.Transfer, error) {
transfer, err := action.NewTransfer(nonce, amount, sender.RawAddress, recipient.RawAddress, payload, gasLimit, gasPrice)
if err != nil {
return nil, err
}
if err := action.Sign(transfer, sender); err != nil {
return nil, err
}
return transfer, nil
}
// Helper function to return a signed vote
func signedVote(voter *iotxaddress.Address, votee *iotxaddress.Address, nonce uint64, gasLimit uint64, gasPrice *big.Int) (*action.Vote, error) {
vote, err := action.NewVote(nonce, voter.RawAddress, votee.RawAddress, gasLimit, gasPrice)
if err != nil {
return nil, err
}
if err := action.Sign(vote, voter); err != nil {
return nil, err
}
return vote, nil
}
func getActPoolCfg() config.ActPool {
return config.ActPool{
MaxNumActsPerPool: maxNumActsPerPool,
MaxNumActsPerAcct: maxNumActsPerAcct,
}
}
| 1 | 11,966 | line is 165 characters | iotexproject-iotex-core | go |
@@ -0,0 +1,8 @@
+package org.jkiss.dbeaver.ext.oceanbase.data;
+
+import org.jkiss.dbeaver.model.impl.jdbc.data.handlers.JDBCStandardValueHandlerProvider;
+
+public class OceanbaseValueHandlerProvider extends JDBCStandardValueHandlerProvider{
+
+
+} | 1 | 1 | 11,205 | Please add a copyright notice. Also, could you tell me please why do we need this empty provider here? | dbeaver-dbeaver | java |
|
@@ -95,8 +95,11 @@ public class EeaSendRawTransaction implements JsonRpcMethod {
maybePrivacyGroup =
privacyController.retrieveOnChainPrivacyGroup(
maybePrivacyGroupId.get(), enclavePublicKey);
- if (maybePrivacyGroup.isEmpty()
- && !privacyController.isGroupAdditionTransaction(privateTransaction)) {
+ if (maybePrivacyGroup.isEmpty()) {
+ if (!privacyController.isGroupAdditionTransaction(privateTransaction)) {
+ return new JsonRpcErrorResponse(id, JsonRpcError.ONCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST);
+ }
+ } else if (!maybePrivacyGroup.get().getMembers().contains(enclavePublicKey)) {
return new JsonRpcErrorResponse(id, JsonRpcError.ONCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST);
}
} else { // !onchainPirvacyGroupEnabled | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.eea;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcEnclaveErrorConverter.convertEnclaveInvalidReason;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcErrorConverter.convertTransactionInvalidReason;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.DECODE_ERROR;
import static org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError.PRIVATE_FROM_DOES_NOT_MATCH_ENCLAVE_PUBLIC_KEY;
import org.hyperledger.besu.enclave.types.PrivacyGroup;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods.JsonRpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.EnclavePublicKeyProvider;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.TransactionValidator.TransactionInvalidReason;
import org.hyperledger.besu.ethereum.mainnet.ValidationResult;
import org.hyperledger.besu.ethereum.privacy.PrivacyController;
import org.hyperledger.besu.ethereum.privacy.PrivateTransaction;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPException;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
public class EeaSendRawTransaction implements JsonRpcMethod {
private final TransactionPool transactionPool;
private final PrivacyController privacyController;
private final EnclavePublicKeyProvider enclavePublicKeyProvider;
/*
Temporarily adding this flag to this method to avoid being able to use offchain and onchain
privacy groups at the same time. Later on this check will be done in a better place.
*/
private final boolean onchainPrivacyGroupsEnabled;
public EeaSendRawTransaction(
final TransactionPool transactionPool,
final PrivacyController privacyController,
final EnclavePublicKeyProvider enclavePublicKeyProvider,
final boolean onchainPrivacyGroupsEnabled) {
this.transactionPool = transactionPool;
this.privacyController = privacyController;
this.enclavePublicKeyProvider = enclavePublicKeyProvider;
this.onchainPrivacyGroupsEnabled = onchainPrivacyGroupsEnabled;
}
@Override
public String getName() {
return RpcMethod.EEA_SEND_RAW_TRANSACTION.getMethodName();
}
@Override
public JsonRpcResponse response(final JsonRpcRequestContext requestContext) {
final Object id = requestContext.getRequest().getId();
final String rawPrivateTransaction = requestContext.getRequiredParameter(0, String.class);
try {
final PrivateTransaction privateTransaction =
PrivateTransaction.readFrom(RLP.input(Bytes.fromHexString(rawPrivateTransaction)));
final String enclavePublicKey =
enclavePublicKeyProvider.getEnclaveKey(requestContext.getUser());
if (!privateTransaction.getPrivateFrom().equals(Bytes.fromBase64String(enclavePublicKey))) {
return new JsonRpcErrorResponse(id, PRIVATE_FROM_DOES_NOT_MATCH_ENCLAVE_PUBLIC_KEY);
}
Optional<PrivacyGroup> maybePrivacyGroup = null;
final Optional<Bytes> maybePrivacyGroupId = privateTransaction.getPrivacyGroupId();
if (onchainPrivacyGroupsEnabled) {
if (!maybePrivacyGroupId.isPresent()) {
return new JsonRpcErrorResponse(id, JsonRpcError.ONCHAIN_PRIVACY_GROUP_ID_NOT_AVAILABLE);
}
maybePrivacyGroup =
privacyController.retrieveOnChainPrivacyGroup(
maybePrivacyGroupId.get(), enclavePublicKey);
if (maybePrivacyGroup.isEmpty()
&& !privacyController.isGroupAdditionTransaction(privateTransaction)) {
return new JsonRpcErrorResponse(id, JsonRpcError.ONCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST);
}
} else { // !onchainPirvacyGroupEnabled
if (maybePrivacyGroupId.isPresent()) {
maybePrivacyGroup =
privacyController.retrieveOffChainPrivacyGroup(
maybePrivacyGroupId.get().toBase64String(), enclavePublicKey);
} else {
maybePrivacyGroup = Optional.empty();
}
}
final ValidationResult<TransactionInvalidReason> validationResult =
privacyController.validatePrivateTransaction(privateTransaction, enclavePublicKey);
if (!validationResult.isValid()) {
return new JsonRpcErrorResponse(
id, convertTransactionInvalidReason(validationResult.getInvalidReason()));
}
final String enclaveKey =
privacyController.sendTransaction(
privateTransaction, enclavePublicKey, maybePrivacyGroup);
if (onchainPrivacyGroupsEnabled) {
final Bytes privacyGroupId =
maybePrivacyGroupId.orElseThrow(
() ->
new RuntimeException(
JsonRpcError.OFFCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST.getMessage()));
final Optional<String> addPayloadEnclaveKey =
privacyController.buildAndSendAddPayload(
privateTransaction,
Bytes32.wrap(privacyGroupId),
enclavePublicKeyProvider.getEnclaveKey(requestContext.getUser()));
return createPMTAndAddToTxPool(
id,
privateTransaction,
buildCompoundKey(enclaveKey, addPayloadEnclaveKey),
Address.ONCHAIN_PRIVACY);
} else { // legacy or pantheon transaction
return createPMTAndAddToTxPool(id, privateTransaction, enclaveKey, Address.DEFAULT_PRIVACY);
}
} catch (final IllegalArgumentException | RLPException e) {
return new JsonRpcErrorResponse(id, DECODE_ERROR);
} catch (final Exception e) {
final String message = e.getMessage();
return new JsonRpcErrorResponse(id, convertEnclaveInvalidReason(message));
}
}
JsonRpcResponse createPMTAndAddToTxPool(
final Object id,
final PrivateTransaction privateTransaction,
final String payload,
final Address privacyPrecompileAddress) {
final Transaction privacyMarkerTransaction;
privacyMarkerTransaction =
privacyController.createPrivacyMarkerTransaction(
payload, privateTransaction, privacyPrecompileAddress);
return transactionPool
.addLocalTransaction(privacyMarkerTransaction)
.either(
() -> new JsonRpcSuccessResponse(id, privacyMarkerTransaction.getHash().toString()),
errorReason -> getJsonRpcErrorResponse(id, errorReason));
}
JsonRpcErrorResponse getJsonRpcErrorResponse(
final Object id, final TransactionInvalidReason errorReason) {
if (errorReason.equals(TransactionInvalidReason.INTRINSIC_GAS_EXCEEDS_GAS_LIMIT)) {
return new JsonRpcErrorResponse(id, JsonRpcError.PMT_FAILED_INTRINSIC_GAS_EXCEEDS_LIMIT);
}
return new JsonRpcErrorResponse(id, convertTransactionInvalidReason(errorReason));
}
private String buildCompoundKey(
final String enclaveKey, final Optional<String> addPayloadEnclaveKey) {
return addPayloadEnclaveKey.isPresent()
? Bytes.concatenate(
Bytes.fromBase64String(enclaveKey),
Bytes.fromBase64String(addPayloadEnclaveKey.get()))
.toBase64String()
: enclaveKey;
}
}
| 1 | 23,257 | I feel like this would be easier to read if we join the two if's together: `if (maybePrivacyGroup.isEmpty() && !privacyController.isGroupAdditionTransaction(privateTransaction))` | hyperledger-besu | java |
@@ -567,7 +567,7 @@ public class DBOpenHelper extends SQLiteOpenHelper {
try {
result = new JSONObject(loadSoupBlobAsString(soupTableName, soupEntryId, passcode));
- } catch (JSONException ex) {
+ } catch (Exception ex) {
Log.e("DBOpenHelper:loadSoupBlob", "Exception occurred while attempting to read external soup blob.", ex);
}
return result; | 1 | /*
* Copyright (c) 2014-2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.store;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.json.JSONException;
import org.json.JSONObject;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.security.Encryptor;
import net.sqlcipher.database.SQLiteDatabase;
import net.sqlcipher.database.SQLiteDatabaseHook;
import net.sqlcipher.database.SQLiteOpenHelper;
import android.content.Context;
import android.text.TextUtils;
import android.util.Log;
/**
* Helper class to manage SmartStore's database creation and version management.
*/
public class DBOpenHelper extends SQLiteOpenHelper {
// 1 --> up until 2.3
// 2 --> starting at 2.3 (new meta data table long_operations_status)
// 3 --> starting at 4.3 (soup_names table changes to soup_attr)
public static final int DB_VERSION = 3;
public static final String DEFAULT_DB_NAME = "smartstore";
public static final String SOUP_ELEMENT_PREFIX = "soupelt_";
private static final String DB_NAME_SUFFIX = ".db";
private static final String ORG_KEY_PREFIX = "00D";
private static final String EXTERNAL_BLOBS_SUFFIX = "_external_soup_blobs/";
private static String dataDir;
private String dbName;
/*
* Cache for the helper instances
*/
private static Map<String, DBOpenHelper> openHelpers = new HashMap<String, DBOpenHelper>();
/**
* Returns a map of all DBOpenHelper instances created. The key is the
* database name and the value is the instance itself.
*
* @return Map of DBOpenHelper instances.
*/
public static synchronized Map<String, DBOpenHelper> getOpenHelpers() {
return openHelpers;
}
/**
* Returns the DBOpenHelper instance associated with this user account.
*
* @param ctx Context.
* @param account User account.
* @return DBOpenHelper instance.
*/
public static synchronized DBOpenHelper getOpenHelper(Context ctx,
UserAccount account) {
return getOpenHelper(ctx, account, null);
}
/**
* Returns the DBOpenHelper instance associated with this user and community.
*
* @param ctx Context.
* @param account User account.
* @param communityId Community ID.
* @return DBOpenHelper instance.
*/
public static synchronized DBOpenHelper getOpenHelper(Context ctx,
UserAccount account, String communityId) {
return getOpenHelper(ctx, DEFAULT_DB_NAME, account, communityId);
}
/**
* Returns the DBOpenHelper instance for the given database name.
*
* @param ctx Context.
* @param dbNamePrefix The database name. This must be a valid file name without a
* filename extension such as ".db".
* @param account User account. If this method is called before authentication,
* we will simply return the smart store DB, which is not associated
* with any user account. Otherwise, we will return a unique
* database at the community level.
* @param communityId Community ID.
* @return DBOpenHelper instance.
*/
public static DBOpenHelper getOpenHelper(Context ctx, String dbNamePrefix,
UserAccount account, String communityId) {
final StringBuffer dbName = new StringBuffer(dbNamePrefix);
// If we have account information, we will use it to create a database suffix for the user.
if (account != null) {
// Default user path for a user is 'internal', if community ID is null.
final String accountSuffix = account.getCommunityLevelFilenameSuffix(communityId);
dbName.append(accountSuffix);
}
dbName.append(DB_NAME_SUFFIX);
final String fullDBName = dbName.toString();
DBOpenHelper helper = openHelpers.get(fullDBName);
if (helper == null) {
helper = new DBOpenHelper(ctx, fullDBName);
openHelpers.put(fullDBName, helper);
}
return helper;
}
protected DBOpenHelper(Context context, String dbName) {
super(context, dbName, null, DB_VERSION, new DBHook());
this.loadLibs(context);
this.dbName = dbName;
dataDir = context.getApplicationInfo().dataDir;
}
protected void loadLibs(Context context) {
SQLiteDatabase.loadLibs(context);
}
@Override
public void onCreate(SQLiteDatabase db) {
/*
* SQLCipher manages locking on the DB at a low level. However,
* we explicitly lock on the DB as well, for all SmartStore
* operations. This can lead to deadlocks or ReentrantLock
* exceptions where a thread is waiting for itself. Hence, we
* set the default SQLCipher locking to 'false', since we
* manage locking at our level anyway.
*/
db.setLockingEnabled(false);
SmartStore.createMetaTables(db);
}
@Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
/*
* SQLCipher manages locking on the DB at a low level. However,
* we explicitly lock on the DB as well, for all SmartStore
* operations. This can lead to deadlocks or ReentrantLock
* exceptions where a thread is waiting for itself. Hence, we
* set the default SQLCipher locking to 'false', since we
* manage locking at our level anyway.
*/
db.setLockingEnabled(false);
if (oldVersion == 1) {
SmartStore.createLongOperationsStatusTable(db);
}
if (oldVersion < 3) {
// DB versions before 3 used soup_names, which has changed to soup_attrs
SmartStore.updateTableNameAndAddColumns(db, SmartStore.SOUP_NAMES_TABLE, SmartStore.SOUP_ATTRS_TABLE, new String[] { SoupSpec.FEATURE_EXTERNAL_STORAGE });
}
}
@Override
@SuppressWarnings("deprecation")
public void onOpen(SQLiteDatabase db) {
(new SmartStore(db)).resumeLongOperations();
}
/**
* Deletes the underlying database for the specified user account.
*
* @param ctx Context.
* @param account User account.
*/
public static synchronized void deleteDatabase(Context ctx, UserAccount account) {
deleteDatabase(ctx, account, null);
}
/**
* Deletes the underlying database for the specified user and community.
*
* @param ctx Context.
* @param account User account.
* @param communityId Community ID.
*/
public static synchronized void deleteDatabase(Context ctx, UserAccount account,
String communityId) {
deleteDatabase(ctx, DEFAULT_DB_NAME, account, communityId);
}
/**
* Deletes the underlying database for the specified user and community.
*
* @param ctx Context.
* @param dbNamePrefix The database name. This must be a valid file name without a
* filename extension such as ".db".
* @param account User account.
* @param communityId Community ID.
*/
public static synchronized void deleteDatabase(Context ctx, String dbNamePrefix,
UserAccount account, String communityId) {
try {
final StringBuffer dbName = new StringBuffer(dbNamePrefix);
// If we have account information, we will use it to create a database suffix for the user.
if (account != null) {
// Default user path for a user is 'internal', if community ID is null.
final String accountSuffix = account.getCommunityLevelFilenameSuffix(communityId);
dbName.append(accountSuffix);
}
dbName.append(DB_NAME_SUFFIX);
final String fullDBName = dbName.toString();
// Close and remove the helper from the cache if it exists.
final DBOpenHelper helper = openHelpers.get(fullDBName);
if (helper != null) {
helper.close();
openHelpers.remove(fullDBName);
}
// Physically delete the database from disk.
ctx.deleteDatabase(fullDBName);
// If community id was not passed in, then we remove ALL databases for the account.
if (account != null && TextUtils.isEmpty(communityId)) {
StringBuffer communityDBNamePrefix = new StringBuffer(dbNamePrefix);
String accountSuffix = account.getUserLevelFilenameSuffix();
communityDBNamePrefix.append(accountSuffix);
deleteFiles(ctx, communityDBNamePrefix.toString());
}
// Delete external blobs directory
StringBuilder blobsDbPath = new StringBuilder(ctx.getApplicationInfo().dataDir);
blobsDbPath.append("/databases/").append(fullDBName).append(EXTERNAL_BLOBS_SUFFIX);
removeAllFiles(new File(blobsDbPath.toString()));
} catch (Exception e) {
Log.e("DBOpenHelper:deleteDatabase", "Exception occurred while attempting to delete database.", e);
}
}
/**
* Deletes all remaining authenticated databases. We pass in the key prefix
* for an organization here, because all authenticated DBs will have to
* go against an org, which means the org ID will be a part of the DB name.
* This prevents the global DBs from being removed.
*
* @param ctx Context.
*/
public static synchronized void deleteAllUserDatabases(Context ctx) {
deleteFiles(ctx, ORG_KEY_PREFIX);
}
/**
* Determines if a smart store currently exists for the given account and/or community id.
*
* @param ctx Context.
* @param account User account.
* @param communityId Community ID.
* @return boolean indicating if a smartstore already exists.
*/
public static boolean smartStoreExists(Context ctx, UserAccount account,
String communityId) {
return smartStoreExists(ctx, DEFAULT_DB_NAME, account, communityId);
}
/**
* Determines if a smart store currently exists for the given database name, account
* and/or community id.
*
* @param ctx Context.
* @param dbNamePrefix The database name. This must be a valid file name without a
* filename extension such as ".db".
* @param account User account.
* @param communityId Community ID.
* @return boolean indicating if a smartstore already exists.
*/
public static boolean smartStoreExists(Context ctx, String dbNamePrefix,
UserAccount account, String communityId) {
final StringBuffer dbName = new StringBuffer(dbNamePrefix);
if (account != null) {
final String dbSuffix = account.getCommunityLevelFilenameSuffix(communityId);
dbName.append(dbSuffix);
}
dbName.append(DB_NAME_SUFFIX);
return ctx.getDatabasePath(dbName.toString()).exists();
}
static class DBHook implements SQLiteDatabaseHook {
public void preKey(SQLiteDatabase database) {
database.execSQL("PRAGMA cipher_default_kdf_iter = '4000'");
// the new default for sqlcipher 3.x (64000) is too slow
// also that way we can open 2.x databases without any migration
}
public void postKey(SQLiteDatabase database) {
}
};
private static void deleteFiles(Context ctx, String prefix) {
final String dbPath = ctx.getApplicationInfo().dataDir + "/databases";
final File dir = new File(dbPath);
if (dir != null) {
final SmartStoreFileFilter fileFilter = new SmartStoreFileFilter(prefix);
final File[] fileList = dir.listFiles();
if (fileList != null) {
for (final File file : fileList) {
if (file != null && fileFilter.accept(dir, file.getName())) {
file.delete();
openHelpers.remove(file.getName());
}
}
}
}
}
/**
* This class acts as a filter to identify only the relevant SmartStore files.
*
* @author bhariharan
*/
private static class SmartStoreFileFilter implements FilenameFilter {
private String dbNamePrefix;
/**
* Parameterized constructor.
*
* @param dbNamePrefix Database name prefix pattern.
*/
public SmartStoreFileFilter(String dbNamePrefix) {
this.dbNamePrefix = dbNamePrefix;
}
@Override
public boolean accept(File dir, String filename) {
if (filename != null && filename.contains(dbNamePrefix)) {
return true;
}
return false;
}
}
/**
* Returns the path to external blobs folder for the given soup in this db. If no soup is provided, the db folder is returned.
*
* @param soupTableName Name of the soup for which to get external blobs folder.
*
* @return Path to external blobs folder for the given soup. If no soup is provided, the parent directory is returned.
*/
public String getExternalSoupBlobsPath(String soupTableName) {
StringBuilder path = new StringBuilder(dataDir);
path.append("/databases/").append(dbName).append(EXTERNAL_BLOBS_SUFFIX);
if (soupTableName != null) {
path.append(soupTableName).append('/');
}
return path.toString();
}
/**
* Recursively determines size of all files in the given subdirectory of the soup storage.
*
* @param subDir Subdirectory to determine size of. Use null for top-level directory.
*
* @return Size of all files in all subdirectories.
*/
public int getSizeOfDir(File subDir) {
int size = 0;
if (subDir == null) {
// Top level directory
subDir = new File(getExternalSoupBlobsPath(null));
}
if (subDir.exists()) {
File[] files = subDir.listFiles();
if (files != null) {
for (File file : files) {
if (file.isFile()) {
size += file.length();
} else {
size += getSizeOfDir(file);
}
}
}
}
return size;
}
/**
* Removes all files and folders in the given directory recursively as well as removes itself.
*
* @param dir Directory to remove all files and folders recursively.
* @return True if all delete operations were successful. False otherwise.
*/
public static boolean removeAllFiles(File dir) {
if (dir != null && dir.exists()) {
boolean success = true;
File[] files = dir.listFiles();
if (files != null) {
for (File file : files) {
if (file.isFile()) {
success &= file.delete();
} else {
success &= removeAllFiles(file);
}
}
}
success &= dir.delete();
return success;
} else {
return false;
}
}
/**
* Creates the folder for external blobs for the given soup name.
*
* @param soupTableName Soup for which to create the external blobs folder
*
* @return True if directory was created, false otherwise.
*/
public boolean createExternalBlobsDirectory(String soupTableName) {
File blobsDirectory = new File(getExternalSoupBlobsPath(soupTableName));
return blobsDirectory.mkdirs();
}
/**
* Removes the folder for external blobs for the given soup name.
*
* @param soupTableName Soup for which to remove the external blobs folder
*
* @return True if directory was removed, false otherwise.
*/
public boolean removeExternalBlobsDirectory(String soupTableName) {
if (dataDir != null) {
return removeAllFiles(new File(getExternalSoupBlobsPath(soupTableName)));
} else {
return false;
}
}
/**
* Re-encrypts the files on external storage with the new key. If external storage is not enabled for any table in the db, this operation is ignored.
*
* @param db DB containing external storage (if applicable).
* @param oldKey Old key with which to decrypt the existing data.
* @param newKey New key with which to encrypt the existing data.
*/
public static void reEncryptAllFiles(SQLiteDatabase db, String oldKey, String newKey) {
StringBuilder path = new StringBuilder(db.getPath()).append(EXTERNAL_BLOBS_SUFFIX);
File dir = new File(path.toString());
if (dir.exists()) {
File[] tables = dir.listFiles();
if (tables != null) {
for (File table : tables) {
File[] blobs = table.listFiles();
if (blobs != null) {
for (File blob : blobs) {
StringBuilder json = new StringBuilder();
String result = null;
try {
BufferedReader br = new BufferedReader(new FileReader(blob));
String line;
while ((line = br.readLine()) != null) {
json.append(line).append('\n');
}
br.close();
result = Encryptor.decrypt(json.toString(), oldKey);
blob.delete();
FileOutputStream outputStream = new FileOutputStream(blob, false);
outputStream.write(Encryptor.encrypt(result, newKey).getBytes());
outputStream.close();
} catch (IOException ex) {
Log.e("DBOpenHelper:reEncryptAllFiles", "Exception occurred while rekeying external files.", ex);
}
}
}
}
}
}
}
/**
* Places the soup blob on file storage. The name and folder are determined by the soup and soup entry id.
*
* @param soupTableName Name of the soup that the blob belongs to.
* @param soupEntryId Entry id for the soup blob.
* @param soupElt Blob to store on file storage in JSON format.
* @param passcode Key with which to encrypt the data.
*
* @return True if operation was successful, false otherwise.
*/
public boolean saveSoupBlob(String soupTableName, long soupEntryId, JSONObject soupElt, String passcode) {
return saveSoupBlobFromString(soupTableName, soupEntryId, soupElt.toString(), passcode);
}
/**
* Places the soup blob on file storage. The name and folder are determined by the soup and soup entry id.
*
* @param soupTableName Name of the soup that the blob belongs to.
* @param soupEntryId Entry id for the soup blob.
* @param soupEltStr Blob to store on file storage as a String.
* @param passcode Key with which to encrypt the data.
*
* @return True if operation was successful, false otherwise.
*/
public boolean saveSoupBlobFromString(String soupTableName, long soupEntryId, String soupEltStr, String passcode) {
FileOutputStream outputStream;
File file = getSoupBlobFile(soupTableName, soupEntryId);
try {
outputStream = new FileOutputStream(file, false);
outputStream.write(Encryptor.encrypt(soupEltStr, passcode).getBytes());
outputStream.close();
return true;
} catch (IOException ex) {
Log.e("DBOpenHelper:saveSoupBlob", "Exception occurred while attempting to write external soup blob.", ex);
}
return false;
}
/**
* Retrieves the soup blob for the given soup entry id from file storage.
*
* @param soupTableName Soup name to which the blob belongs.
* @param soupEntryId Entry id for the requested soup blob.
* @param passcode Key with which to decrypt the data.
*
* @return The blob from file storage represented as JSON. Returns null if there was an error.
*/
public JSONObject loadSoupBlob(String soupTableName, long soupEntryId, String passcode) {
JSONObject result = null;
try {
result = new JSONObject(loadSoupBlobAsString(soupTableName, soupEntryId, passcode));
} catch (JSONException ex) {
Log.e("DBOpenHelper:loadSoupBlob", "Exception occurred while attempting to read external soup blob.", ex);
}
return result;
}
/**
* Retrieves the soup blob for the given soup entry id from file storage.
*
* @param soupTableName Soup name to which the blob belongs.
* @param soupEntryId Entry id for the requested soup blob.
* @param passcode Key with which to decrypt the data.
*
* @return The blob from file storage represented as String. Returns null if there was an error.
*/
public String loadSoupBlobAsString(String soupTableName, long soupEntryId, String passcode) {
File file = getSoupBlobFile(soupTableName, soupEntryId);
StringBuilder json = new StringBuilder();
String result = null;
try {
BufferedReader br = new BufferedReader(new FileReader(file));
String line;
while ((line = br.readLine()) != null) {
json.append(line).append('\n');
}
br.close();
result = Encryptor.decrypt(json.toString(), passcode);
} catch (IOException ex) {
Log.e("DBOpenHelper:loadSoupBlob", "Exception occurred while attempting to read external soup blob.", ex);
}
return result;
}
/**
* Removes the blobs represented by the given list of soup entry ids from external storage.
*
* @param soupTableName Soup name to which the blobs belong.
* @param soupEntryIds List of soup entry ids to delete.
*
* @return True if all soup entry ids were deleted, false if blob could not be found or had an error.
*/
public boolean removeSoupBlob(String soupTableName, Long[] soupEntryIds) {
File file;
boolean success = true;
for (long soupEntryId : soupEntryIds) {
file = getSoupBlobFile(soupTableName, soupEntryId);
success &= file.delete();
}
return success;
}
/**
* Returns a file that the soup data is stored in for the given soup name and entry id.
*
* @param soupTableName Soup name to which the blob belongs.
* @param soupEntryId Entry id for the requested soup blob.
*
* @return A File representing the soup blob in external storage.
*/
public File getSoupBlobFile(String soupTableName, long soupEntryId) {
return new File(getExternalSoupBlobsPath(soupTableName), SOUP_ELEMENT_PREFIX + soupEntryId);
}
}
| 1 | 15,553 | Tests are failing because the exception type is NullPointException here if the soupBlob is deleted (so instead of a mal-format json, it's a null) | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -439,12 +439,8 @@ Blockly.Connection.prototype.isConnectionAllowed = function(candidate) {
break;
}
case Blockly.OUTPUT_VALUE: {
- // Don't offer to connect an already connected left (male) value plug to
- // an available right (female) value plug.
- if (candidate.targetConnection || this.targetConnection) {
- return false;
- }
- break;
+ // Can't drag an input to an output--you have to move the inferior block.
+ return false;
}
case Blockly.INPUT_VALUE: {
// Offering to connect the left (male) of a value block to an already | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2011 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Components for creating connections between blocks.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.Connection');
goog.require('goog.asserts');
goog.require('goog.dom');
/**
* Class for a connection between blocks.
* @param {!Blockly.Block} source The block establishing this connection.
* @param {number} type The type of the connection.
* @constructor
*/
Blockly.Connection = function(source, type) {
/**
* @type {!Blockly.Block}
* @private
*/
this.sourceBlock_ = source;
/** @type {number} */
this.type = type;
// Shortcut for the databases for this connection's workspace.
if (source.workspace.connectionDBList) {
this.db_ = source.workspace.connectionDBList[type];
this.dbOpposite_ =
source.workspace.connectionDBList[Blockly.OPPOSITE_TYPE[type]];
this.hidden_ = !this.db_;
}
};
/**
* Constant for identifying connections that accept a boolean.
* @const
*/
Blockly.Connection.BOOLEAN = 1;
/**
* Constant for identifying connections that accept a string.
* @const
*/
Blockly.Connection.STRING = 2;
/**
* Constant for identifying connections that accept a number OR null.
* @const
*/
Blockly.Connection.NUMBER = 3;
/**
* Constants for checking whether two connections are compatible.
*/
Blockly.Connection.CAN_CONNECT = 0;
Blockly.Connection.REASON_SELF_CONNECTION = 1;
Blockly.Connection.REASON_WRONG_TYPE = 2;
Blockly.Connection.REASON_TARGET_NULL = 3;
Blockly.Connection.REASON_CHECKS_FAILED = 4;
Blockly.Connection.REASON_DIFFERENT_WORKSPACES = 5;
Blockly.Connection.REASON_SHADOW_PARENT = 6;
/**
* Connection this connection connects to. Null if not connected.
* @type {Blockly.Connection}
*/
Blockly.Connection.prototype.targetConnection = null;
/**
* List of compatible value types. Null if all types are compatible.
* @type {Array}
* @private
*/
Blockly.Connection.prototype.check_ = null;
/**
* DOM representation of a shadow block, or null if none.
* @type {Element}
* @private
*/
Blockly.Connection.prototype.shadowDom_ = null;
/**
* Horizontal location of this connection.
* @type {number}
* @private
*/
Blockly.Connection.prototype.x_ = 0;
/**
* Vertical location of this connection.
* @type {number}
* @private
*/
Blockly.Connection.prototype.y_ = 0;
/**
* Has this connection been added to the connection database?
* @type {boolean}
* @private
*/
Blockly.Connection.prototype.inDB_ = false;
/**
* Connection database for connections of this type on the current workspace.
* @type {Blockly.ConnectionDB}
* @private
*/
Blockly.Connection.prototype.db_ = null;
/**
* Connection database for connections compatible with this type on the
* current workspace.
* @type {Blockly.ConnectionDB}
* @private
*/
Blockly.Connection.prototype.dbOpposite_ = null;
/**
* Whether this connections is hidden (not tracked in a database) or not.
* @type {boolean}
* @private
*/
Blockly.Connection.prototype.hidden_ = null;
/**
* Connect two connections together. This is the connection on the superior
* block.
* @param {!Blockly.Connection} childConnection Connection on inferior block.
* @private
*/
Blockly.Connection.prototype.connect_ = function(childConnection) {
var parentConnection = this;
var parentBlock = parentConnection.getSourceBlock();
var childBlock = childConnection.getSourceBlock();
var isSurroundingC = false;
if (parentConnection == parentBlock.getFirstStatementConnection()) {
isSurroundingC = true;
}
// Disconnect any existing parent on the child connection.
if (childConnection.isConnected()) {
// Scratch-specific behaviour:
// If we're using a c-shaped block to surround a stack, remember where the
// stack used to be connected.
if (isSurroundingC) {
var previousParentConnection = childConnection.targetConnection;
}
childConnection.disconnect();
}
if (parentConnection.isConnected()) {
// Other connection is already connected to something.
// Disconnect it and reattach it or bump it as needed.
var orphanBlock = parentConnection.targetBlock();
var shadowDom = parentConnection.getShadowDom();
// Temporarily set the shadow DOM to null so it does not respawn.
parentConnection.setShadowDom(null);
// Displaced shadow blocks dissolve rather than reattaching or bumping.
if (orphanBlock.isShadow()) {
// Save the shadow block so that field values are preserved.
shadowDom = Blockly.Xml.blockToDom(orphanBlock);
orphanBlock.dispose();
orphanBlock = null;
} else if (parentConnection.type == Blockly.INPUT_VALUE) {
// Value connections.
// If female block is already connected, disconnect and bump the male.
if (!orphanBlock.outputConnection) {
throw 'Orphan block does not have an output connection.';
}
// Attempt to reattach the orphan at the end of the newly inserted
// block. Since this block may be a row, walk down to the end
// or to the first (and only) shadow block.
var connection = Blockly.Connection.lastConnectionInRow_(
childBlock, orphanBlock);
if (connection) {
orphanBlock.outputConnection.connect(connection);
orphanBlock = null;
}
} else if (parentConnection.type == Blockly.NEXT_STATEMENT) {
// Statement connections.
// Statement blocks may be inserted into the middle of a stack.
// Split the stack.
if (!orphanBlock.previousConnection) {
throw 'Orphan block does not have a previous connection.';
}
// Attempt to reattach the orphan at the bottom of the newly inserted
// block. Since this block may be a stack, walk down to the end.
var newBlock = childBlock;
while (newBlock.nextConnection) {
if (newBlock.nextConnection.isConnected()) {
newBlock = newBlock.getNextBlock();
} else {
if (orphanBlock.previousConnection.checkType_(
newBlock.nextConnection)) {
newBlock.nextConnection.connect(orphanBlock.previousConnection);
orphanBlock = null;
}
break;
}
}
}
if (orphanBlock) {
// Unable to reattach orphan.
parentConnection.disconnect();
if (Blockly.Events.recordUndo) {
// Bump it off to the side after a moment.
var group = Blockly.Events.getGroup();
setTimeout(function() {
// Verify orphan hasn't been deleted or reconnected (user on meth).
if (orphanBlock.workspace && !orphanBlock.getParent()) {
Blockly.Events.setGroup(group);
if (orphanBlock.outputConnection) {
orphanBlock.outputConnection.bumpAwayFrom_(parentConnection);
} else if (orphanBlock.previousConnection) {
orphanBlock.previousConnection.bumpAwayFrom_(parentConnection);
}
Blockly.Events.setGroup(false);
}
}, Blockly.BUMP_DELAY);
}
}
// Restore the shadow DOM.
parentConnection.setShadowDom(shadowDom);
}
if (isSurroundingC && previousParentConnection) {
previousParentConnection.connect(parentBlock.previousConnection);
}
var event;
if (Blockly.Events.isEnabled()) {
event = new Blockly.Events.Move(childBlock);
}
// Establish the connections.
Blockly.Connection.connectReciprocally_(parentConnection, childConnection);
// Demote the inferior block so that one is a child of the superior one.
childBlock.setParent(parentBlock);
if (event) {
event.recordNew();
Blockly.Events.fire(event);
}
};
/**
* Sever all links to this connection (not including from the source object).
*/
Blockly.Connection.prototype.dispose = function() {
if (this.isConnected()) {
throw 'Disconnect connection before disposing of it.';
}
if (this.inDB_) {
this.db_.removeConnection_(this);
}
if (Blockly.highlightedConnection_ == this) {
Blockly.highlightedConnection_ = null;
}
if (Blockly.localConnection_ == this) {
Blockly.localConnection_ = null;
}
this.db_ = null;
this.dbOpposite_ = null;
};
/**
* @return {boolean} true if the connection is not connected or is connected to
* an insertion marker, false otherwise.
*/
Blockly.Connection.prototype.isConnectedToNonInsertionMarker = function() {
return this.targetConnection && !this.targetBlock().isInsertionMarker();
};
/**
* Get the source block for this connection.
* @return {Blockly.Block} The source block, or null if there is none.
*/
Blockly.Connection.prototype.getSourceBlock = function() {
return this.sourceBlock_;
};
/**
* Does the connection belong to a superior block (higher in the source stack)?
* @return {boolean} True if connection faces down or right.
*/
Blockly.Connection.prototype.isSuperior = function() {
return this.type == Blockly.INPUT_VALUE ||
this.type == Blockly.NEXT_STATEMENT;
};
/**
* Is the connection connected?
* @return {boolean} True if connection is connected to another connection.
*/
Blockly.Connection.prototype.isConnected = function() {
return !!this.targetConnection;
};
/**
* Checks whether the current connection can connect with the target
* connection.
* @param {Blockly.Connection} target Connection to check compatibility with.
* @return {number} Blockly.Connection.CAN_CONNECT if the connection is legal,
* an error code otherwise.
* @private
*/
Blockly.Connection.prototype.canConnectWithReason_ = function(target) {
if (this.isSuperior()) {
var blockA = this.sourceBlock_;
var blockB = target.getSourceBlock();
} else {
var blockB = this.sourceBlock_;
var blockA = target.getSourceBlock();
}
if (!target) {
return Blockly.Connection.REASON_TARGET_NULL;
} else if (blockA && blockA == blockB) {
return Blockly.Connection.REASON_SELF_CONNECTION;
} else if (target.type != Blockly.OPPOSITE_TYPE[this.type]) {
return Blockly.Connection.REASON_WRONG_TYPE;
} else if (blockA && blockB && blockA.workspace !== blockB.workspace) {
return Blockly.Connection.REASON_DIFFERENT_WORKSPACES;
} else if (!this.checkType_(target)) {
return Blockly.Connection.REASON_CHECKS_FAILED;
} else if (blockA.isShadow() && !blockB.isShadow()) {
return Blockly.Connection.REASON_SHADOW_PARENT;
}
return Blockly.Connection.CAN_CONNECT;
};
/**
* Checks whether the current connection and target connection are compatible
* and throws an exception if they are not.
* @param {Blockly.Connection} target The connection to check compatibility
* with.
* @private
*/
Blockly.Connection.prototype.checkConnection_ = function(target) {
switch (this.canConnectWithReason_(target)) {
case Blockly.Connection.CAN_CONNECT:
break;
case Blockly.Connection.REASON_SELF_CONNECTION:
throw 'Attempted to connect a block to itself.';
case Blockly.Connection.REASON_DIFFERENT_WORKSPACES:
// Usually this means one block has been deleted.
throw 'Blocks not on same workspace.';
case Blockly.Connection.REASON_WRONG_TYPE:
throw 'Attempt to connect incompatible types.';
case Blockly.Connection.REASON_TARGET_NULL:
throw 'Target connection is null.';
case Blockly.Connection.REASON_CHECKS_FAILED:
throw 'Connection checks failed.';
case Blockly.Connection.REASON_SHADOW_PARENT:
throw 'Connecting non-shadow to shadow block.';
default:
throw 'Unknown connection failure: this should never happen!';
}
};
/**
* Check if the two connections can be dragged to connect to each other.
* This is used by the connection database when searching for the closest
* connection.
* @param {!Blockly.Connection} candidate A nearby connection to check.
* @return {boolean} True if the connection is allowed, false otherwise.
*/
Blockly.Connection.prototype.isConnectionAllowed = function(candidate) {
// Don't consider insertion markers.
if (candidate.sourceBlock_.isInsertionMarker()) {
return false;
}
// Type checking.
var canConnect = this.canConnectWithReason_(candidate);
if (canConnect != Blockly.Connection.CAN_CONNECT &&
canConnect != Blockly.Connection.REASON_MUST_DISCONNECT) {
return false;
}
var firstStatementConnection =
this.sourceBlock_.getFirstStatementConnection();
switch (candidate.type) {
case Blockly.PREVIOUS_STATEMENT: {
if (!firstStatementConnection || this != firstStatementConnection) {
if (this.targetConnection) {
return false;
}
if (candidate.targetConnection) {
// If the other side of this connection is the active insertion marker
// connection, we've obviously already decided that this is a good
// connection.
if (candidate.targetConnection ==
Blockly.insertionMarkerConnection_) {
return true;
} else {
return false;
}
}
}
// Scratch-specific behaviour:
// If this is a c-shaped block, statement blocks cannot be connected
// anywhere other than inside the first statement input.
if (firstStatementConnection) {
// Can't connect if there is already a block inside the first statement
// input.
if (this == firstStatementConnection) {
if (this.targetConnection) {
return false;
}
}
// Can't connect this block's next connection unless we're connecting
// in front of the first block on a stack.
else if (this == this.sourceBlock_.nextConnection &&
candidate.isConnectedToNonInsertionMarker()) {
return false;
}
}
break;
}
case Blockly.OUTPUT_VALUE: {
// Don't offer to connect an already connected left (male) value plug to
// an available right (female) value plug.
if (candidate.targetConnection || this.targetConnection) {
return false;
}
break;
}
case Blockly.INPUT_VALUE: {
// Offering to connect the left (male) of a value block to an already
// connected value pair is ok, we'll splice it in.
// However, don't offer to splice into an unmovable block.
if (candidate.targetConnection &&
!candidate.targetBlock().isMovable() &&
!candidate.targetBlock().isShadow()) {
return false;
}
break;
}
case Blockly.NEXT_STATEMENT: {
// Scratch-specific behaviour:
// If this is a c-block, we can't connect this block's
// previous connection unless we're connecting to the end of the last
// block on a stack or there's already a block connected inside the c.
if (firstStatementConnection &&
this == this.sourceBlock_.previousConnection &&
candidate.isConnectedToNonInsertionMarker() &&
!firstStatementConnection.targetConnection) {
return false;
}
// Don't let a block with no next connection bump other blocks out of the
// stack.
if (candidate.isConnectedToNonInsertionMarker() &&
!this.sourceBlock_.nextConnection) {
return false;
}
break;
}
default:
throw 'Unknown connection type in isConnectionAllowed';
}
// Don't let blocks try to connect to themselves or ones they nest.
if (Blockly.draggingConnections_.indexOf(candidate) != -1) {
return false;
}
return true;
};
/**
* Connect this connection to another connection.
* @param {!Blockly.Connection} otherConnection Connection to connect to.
*/
Blockly.Connection.prototype.connect = function(otherConnection) {
if (this.targetConnection == otherConnection) {
// Already connected together. NOP.
return;
}
this.checkConnection_(otherConnection);
// Determine which block is superior (higher in the source stack).
if (this.isSuperior()) {
// Superior block.
this.connect_(otherConnection);
} else {
// Inferior block.
otherConnection.connect_(this);
}
};
/**
* Update two connections to target each other.
* @param {Blockly.Connection} first The first connection to update.
* @param {Blockly.Connection} second The second conneciton to update.
* @private
*/
Blockly.Connection.connectReciprocally_ = function(first, second) {
goog.asserts.assert(first && second, 'Cannot connect null connections.');
first.targetConnection = second;
second.targetConnection = first;
};
/**
* Does the given block have one and only one connection point that will accept
* an orphaned block?
* @param {!Blockly.Block} block The superior block.
* @param {!Blockly.Block} orphanBlock The inferior block.
* @return {Blockly.Connection} The suitable connection point on 'block',
* or null.
* @private
*/
Blockly.Connection.singleConnection_ = function(block, orphanBlock) {
var connection = false;
for (var i = 0; i < block.inputList.length; i++) {
var thisConnection = block.inputList[i].connection;
if (thisConnection && thisConnection.type == Blockly.INPUT_VALUE &&
orphanBlock.outputConnection.checkType_(thisConnection)) {
if (connection) {
return null; // More than one connection.
}
connection = thisConnection;
}
}
return connection;
};
/**
* Walks down a row a blocks, at each stage checking if there are any
* connections that will accept the orphaned block. If at any point there
* are zero or multiple eligible connections, returns null. Otherwise
* returns the only input on the last block in the chain.
* Terminates early for shadow blocks.
* @param {!Blockly.Block} startBlock The block on which to start the search.
* @param {!Blockly.Block} orphanBlock The block that is looking for a home.
* @return {Blockly.Connection} The suitable connection point on the chain
* of blocks, or null.
* @private
*/
Blockly.Connection.lastConnectionInRow_ = function(startBlock, orphanBlock) {
var newBlock = startBlock;
var connection;
while (connection = Blockly.Connection.singleConnection_(
/** @type {!Blockly.Block} */ (newBlock), orphanBlock)) {
// '=' is intentional in line above.
newBlock = connection.targetBlock();
if (!newBlock || newBlock.isShadow()) {
return connection;
}
}
return null;
};
/**
* Disconnect this connection.
*/
Blockly.Connection.prototype.disconnect = function() {
var otherConnection = this.targetConnection;
goog.asserts.assert(otherConnection, 'Source connection not connected.');
goog.asserts.assert(otherConnection.targetConnection == this,
'Target connection not connected to source connection.');
var parentBlock, childBlock, parentConnection;
if (this.isSuperior()) {
// Superior block.
parentBlock = this.sourceBlock_;
childBlock = otherConnection.getSourceBlock();
parentConnection = this;
} else {
// Inferior block.
parentBlock = otherConnection.getSourceBlock();
childBlock = this.sourceBlock_;
parentConnection = otherConnection;
}
this.disconnectInternal_(parentBlock, childBlock);
parentConnection.respawnShadow_();
};
/**
* Disconnect two blocks that are connected by this connection.
* @param {!Blockly.Block} parentBlock The superior block.
* @param {!Blockly.Block} childBlock The inferior block.
* @private
*/
Blockly.Connection.prototype.disconnectInternal_ = function(parentBlock,
childBlock) {
var event;
if (Blockly.Events.isEnabled()) {
event = new Blockly.Events.Move(childBlock);
}
var otherConnection = this.targetConnection;
otherConnection.targetConnection = null;
this.targetConnection = null;
childBlock.setParent(null);
if (event) {
event.recordNew();
Blockly.Events.fire(event);
}
};
/**
* Respawn the shadow block if there was one connected to the this connection.
* @return {Blockly.Block} The newly spawned shadow block, or null if none was
* spawned.
* @private
*/
Blockly.Connection.prototype.respawnShadow_ = function() {
var parentBlock = this.getSourceBlock();
var shadow = this.getShadowDom();
if (parentBlock.workspace && shadow && Blockly.Events.recordUndo) {
var blockShadow =
Blockly.Xml.domToBlock(shadow, parentBlock.workspace);
if (blockShadow.outputConnection) {
this.connect(blockShadow.outputConnection);
} else if (blockShadow.previousConnection) {
this.connect(blockShadow.previousConnection);
} else {
throw 'Child block does not have output or previous statement.';
}
return blockShadow;
}
return null;
};
/**
* Returns the block that this connection connects to.
* @return {Blockly.Block} The connected block or null if none is connected.
*/
Blockly.Connection.prototype.targetBlock = function() {
if (this.isConnected()) {
return this.targetConnection.getSourceBlock();
}
return null;
};
/**
* Is this connection compatible with another connection with respect to the
* value type system. E.g. square_root("Hello") is not compatible.
* @param {!Blockly.Connection} otherConnection Connection to compare against.
* @return {boolean} True if the connections share a type.
* @private
*/
Blockly.Connection.prototype.checkType_ = function(otherConnection) {
if (!this.check_ || !otherConnection.check_) {
// One or both sides are promiscuous enough that anything will fit.
return true;
}
// Find any intersection in the check lists.
for (var i = 0; i < this.check_.length; i++) {
if (otherConnection.check_.indexOf(this.check_[i]) != -1) {
return true;
}
}
// No intersection.
return false;
};
/**
* Change a connection's compatibility.
* @param {*} check Compatible value type or list of value types.
* Null if all types are compatible.
* @return {!Blockly.Connection} The connection being modified
* (to allow chaining).
*/
Blockly.Connection.prototype.setCheck = function(check) {
if (check) {
// Ensure that check is in an array.
if (!goog.isArray(check)) {
check = [check];
}
this.check_ = check;
// The new value type may not be compatible with the existing connection.
if (this.isConnected() && !this.checkType_(this.targetConnection)) {
var child = this.isSuperior() ? this.targetBlock() : this.sourceBlock_;
child.unplug();
// Bump away.
this.sourceBlock_.bumpNeighbours_();
}
} else {
this.check_ = null;
}
return this;
};
/**
* Returns a shape enum for this connection.
* @return {number} Enum representing shape.
*/
Blockly.Connection.prototype.getOutputShape = function() {
if (!this.check_) return Blockly.Connection.NUMBER;
if (this.check_.indexOf('Boolean') !== -1) {
return Blockly.Connection.BOOLEAN;
}
if (this.check_.indexOf('String') !== -1) {
return Blockly.Connection.STRING;
}
return Blockly.Connection.NUMBER;
};
/**
* Change a connection's shadow block.
* @param {Element} shadow DOM representation of a block or null.
*/
Blockly.Connection.prototype.setShadowDom = function(shadow) {
this.shadowDom_ = shadow;
};
/**
* Return a connection's shadow block.
* @return {Element} shadow DOM representation of a block or null.
*/
Blockly.Connection.prototype.getShadowDom = function() {
return this.shadowDom_;
};
| 1 | 7,848 | Glad we fixed this as well. Blockly is going to be left as-is for this case, right? | LLK-scratch-blocks | js |
@@ -36,9 +36,7 @@ import java.util.Locale;
* This class represents a typical Salesforce object.
*
* @author bhariharan
- * @deprecated Will be removed in Mobile SDK 7.0.
*/
-@Deprecated
public class SalesforceObject {
protected String objectType; | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartsync.model;
import com.salesforce.androidsdk.smartsync.util.Constants;
import org.json.JSONObject;
import java.util.Locale;
/**
* This class represents a typical Salesforce object.
*
* @author bhariharan
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public class SalesforceObject {
protected String objectType;
protected String name;
protected String objectId;
protected final JSONObject rawData;
/**
* Parameterized constructor.
*
* @param object Raw data for object.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public SalesforceObject(JSONObject object) {
objectId = object.optString(Constants.ID);
if (objectId == null || Constants.EMPTY_STRING.equals(objectId)) {
objectId = object.optString(Constants.ID.toLowerCase(Locale.US));
objectType = object.optString(Constants.TYPE.toLowerCase(Locale.US));
name = object.optString(Constants.NAME.toLowerCase(Locale.US));
} else {
name = object.optString(Constants.NAME);
final JSONObject attributes = object.optJSONObject(Constants.ATTRIBUTES);
if (attributes != null) {
objectType = attributes.optString(Constants.TYPE.toLowerCase(Locale.US));
if (objectType == null || Constants.RECENTLY_VIEWED.equals(objectType)
|| Constants.NULL_STRING.equals(objectType)) {
objectType = object.optString(Constants.TYPE);
}
}
}
rawData = object;
}
/**
* Returns the object type.
*
* @return Object type.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public String getObjectType() {
return objectType;
}
/**
* Sets the object type.
*
* @param objectType Object type.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public void setObjectType(String objectType) {
this.objectType = objectType;
}
/**
* Returns the name.
*
* @return Name.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public String getName() {
return name;
}
/**
* Sets the object name.
*
* @param name Object name.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public void setName(String name) {
this.name = name;
}
/**
* Returns the object ID.
*
* @return Object ID.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public String getObjectId() {
return objectId;
}
/**
* Returns the complete metadata.
*
* @return Complete metadata.
* @deprecated Will be removed in Mobile SDK 7.0.
*/
@Deprecated
public JSONObject getRawData() {
return rawData;
}
@Override
public String toString() {
return String.format("name: [%s], objectId: [%s], type: [%s], rawData: " +
"[%s]", name, objectId, objectType, rawData);
}
@Override
public boolean equals(Object object) {
if (object == null || !(object instanceof SalesforceObject)) {
return false;
}
final SalesforceObject obj = (SalesforceObject) object;
if (objectId == null || obj.getObjectId() == null || !objectId.equals(obj.getObjectId())) {
return false;
}
if (name == null || obj.getName() == null || !name.equals(obj.getName())) {
return false;
}
if (objectType == null || obj.getObjectType() == null || !objectType.equals(obj.getObjectType())) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = objectId.hashCode();
result ^= rawData.hashCode() + result * 37;
return result;
}
}
| 1 | 16,812 | I had accidentally deprecated this class. This is meant to stick around. Only `SalesforceObjectType` goes away. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -208,6 +208,7 @@ def rev_hex(s):
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
+ s = s.lstrip('0x')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
| 1 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import hmac
import os
import json
import ecdsa
import pyaes
from .util import bfh, bh2u, to_string
from . import version
from .util import print_error, InvalidPassword, assert_bytes, to_bytes, inv_dict
from . import segwit_addr
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
# Version numbers for BIP32 extended keys
# standard: xprv, xpub
# segwit in p2sh: yprv, ypub
# native segwit: zprv, zpub
XPRV_HEADERS = {
'standard': 0x0488ade4,
'p2wpkh-p2sh': 0x049d7878,
'p2wsh-p2sh': 0x295b005,
'p2wpkh': 0x4b2430c,
'p2wsh': 0x2aa7a99
}
XPUB_HEADERS = {
'standard': 0x0488b21e,
'p2wpkh-p2sh': 0x049d7cb2,
'p2wsh-p2sh': 0x295b43f,
'p2wpkh': 0x4b24746,
'p2wsh': 0x2aa7ed3
}
class NetworkConstants:
@classmethod
def set_mainnet(cls):
cls.TESTNET = False
cls.WIF_PREFIX = 0x80
cls.ADDRTYPE_P2PKH = 0
cls.ADDRTYPE_P2SH = 5
cls.SEGWIT_HRP = "bc"
cls.GENESIS = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
cls.DEFAULT_PORTS = {'t': '50001', 's': '50002'}
cls.DEFAULT_SERVERS = read_json('servers.json', {})
cls.CHECKPOINTS = read_json('checkpoints.json', [])
@classmethod
def set_testnet(cls):
cls.TESTNET = True
cls.WIF_PREFIX = 0xef
cls.ADDRTYPE_P2PKH = 111
cls.ADDRTYPE_P2SH = 196
cls.SEGWIT_HRP = "tb"
cls.GENESIS = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
cls.DEFAULT_PORTS = {'t':'51001', 's':'51002'}
cls.DEFAULT_SERVERS = read_json('servers_testnet.json', {})
cls.CHECKPOINTS = read_json('checkpoints_testnet.json', [])
NetworkConstants.set_mainnet()
################################## transactions
FEE_STEP = 10000
MAX_FEE_RATE = 300000
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPadding(Exception):
pass
def append_PKCS7_padding(data):
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data):
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def push_script(x):
return op_push(len(x)//2) + x
def sha256(x):
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
def Hash(x):
x = to_bytes(x, 'utf8')
out = bytes(sha256(sha256(x)))
return out
hash_encode = lambda x: bh2u(x[::-1])
hash_decode = lambda x: bfh(x)[::-1]
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def is_old_seed(seed):
from . import old_mnemonic, mnemonic
seed = mnemonic.normalize_text(seed)
words = seed.split()
try:
# checks here are deliberately left weak for legacy reasons, see #3149
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return bfh(key)
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
except BaseException:
from . import ripemd
md = ripemd.new(sha256(public_key))
return md.digest()
def hash160_to_b58_address(h160, addrtype, witness_program_version=1):
s = bytes([addrtype])
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def b58_address_to_hash160(addr):
addr = to_bytes(addr, 'ascii')
_bytes = base_decode(addr, 25, base=58)
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash160_to_b58_address(h160, NetworkConstants.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
def hash_to_segwit_addr(h):
return segwit_addr.encode(NetworkConstants.SEGWIT_HRP, 0, h)
def public_key_to_p2wpkh(public_key):
return hash_to_segwit_addr(hash_160(public_key))
def script_to_p2wsh(script):
return hash_to_segwit_addr(sha256(bfh(script)))
def p2wpkh_nested_script(pubkey):
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script):
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type, pubkey):
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey))
elif txin_type == 'p2wpkh':
return hash_to_segwit_addr(hash_160(bfh(pubkey)))
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type, redeem_script):
if txin_type == 'p2sh':
return hash160_to_p2sh(hash_160(bfh(redeem_script)))
elif txin_type == 'p2wsh':
return script_to_p2wsh(redeem_script)
elif txin_type == 'p2wsh-p2sh':
scriptSig = p2wsh_nested_script(redeem_script)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def script_to_address(script):
from .transaction import get_address_from_output_script
t, addr = get_address_from_output_script(bfh(script))
assert t == TYPE_ADDRESS
return addr
def address_to_script(addr):
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
if witprog is not None:
assert (0 <= witver <= 16)
OP_n = witver + 0x50 if witver > 0 else 0
script = bh2u(bytes([OP_n]))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def address_to_scripthash(addr):
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script):
h = sha256(bytes.fromhex(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey):
script = push_script(pubkey)
script += 'ac' # op_checksig
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * c
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(bytes([c])) * (base**i)
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
# extended key export format for segwit
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
def serialize_privkey(secret, compressed, txin_type):
prefix = bytes([(SCRIPT_TYPES[txin_type]+NetworkConstants.WIF_PREFIX)&255])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
return EncodeBase58Check(vchIn)
def deserialize_privkey(key):
# whether the pubkey is compressed should be visible from the keystore
vch = DecodeBase58Check(key)
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), True
elif vch:
txin_type = inv_dict(SCRIPT_TYPES)[vch[0] - NetworkConstants.WIF_PREFIX]
assert len(vch) in [33, 34]
compressed = len(vch) == 34
return txin_type, vch[1:33], compressed
else:
raise BaseException("cannot deserialize", key)
def regenerate_key(pk):
assert len(pk) == 32
return EC_KEY(pk)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return bfh('%064x' % pkey.secret)
def is_compressed(sec):
return deserialize_privkey(sec)[2]
def public_key_from_private_key(pk, compressed):
pkey = regenerate_key(pk)
public_key = GetPubKey(pkey.pubkey, compressed)
return bh2u(public_key)
def address_from_private_key(sec):
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = public_key_from_private_key(privkey, compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr):
try:
witver, witprog = segwit_addr.decode(NetworkConstants.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr):
try:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [NetworkConstants.ADDRTYPE_P2PKH, NetworkConstants.ADDRTYPE_P2SH]:
return False
return addr == hash160_to_b58_address(h, addrtype)
def is_address(addr):
return is_segwit_address(addr) or is_b58_address(addr)
def is_private_key(key):
try:
k = deserialize_privkey(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
length = bfh(var_int(len(message)))
return b"\x18Bitcoin Signed Message:\n" + length + message
def verify_message(address, sig, message):
assert_bytes(sig, message)
try:
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, bh2u(pubkey))
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey, magic=b'BIE1'):
return EC_KEY.encrypt_message(message, bfh(pubkey), magic)
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)//4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return bfh( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) )
return bfh( '04'+('%064x'%P.x())+('%064x'%P.y()) )
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0] == 0x03)[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
from . import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return bh2u(point_to_ser(self.pubkey.point, compressed))
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
message = to_bytes(message, 'utf8')
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
assert_bytes(message)
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey, magic=b'BIE1'):
assert_bytes(message)
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = bfh(ephemeral.get_public_key(compressed=True))
encrypted = magic + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted, magic=b'BIE1'):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic_found = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic_found != magic:
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, bfh(rev_hex(int_to_hex(n,4))), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = bytes([0]) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, bfh(rev_hex(int_to_hex(n,4))))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return bfh("%08x" % XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % XPUB_HEADERS[xtype])
def serialize_xprv(xtype, c, k, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xprv = xprv_header(xtype) + bytes([depth]) + fingerprint + child_number + c + bytes([0]) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xpub = xpub_header(xtype) + bytes([depth]) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = xkey[4]
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = int('0x' + bh2u(xkey[0:4]), 16)
headers = XPRV_HEADERS if prv else XPUB_HEADERS
if header not in headers.values():
raise BaseException('Invalid xpub format', hex(header))
xtype = list(headers.keys())[list(headers.values()).index(header)]
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def xpub_type(x):
return deserialize_xpub(x)[0]
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in [0x02, 0x03]
return serialize_xpub(xtype, b'\x00'*32, cK)
def bip32_derivation(s):
assert s.startswith('m/')
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return k
| 1 | 12,283 | `hex(i)[2:]` is trying to do the same thing above. what is `i` in your malformed case? | spesmilo-electrum | py |
@@ -126,8 +126,8 @@ public class XML {
}
}
- /** escapes character data in val */
- public final static void writeXML(Writer out, String tag, String val, Object... attrs) throws IOException {
+ /** escapes character data in val if shouldEscape is true*/
+ public final static void writeXML(Writer out, String tag, boolean shouldEscape, String val, Object... attrs) throws IOException {
out.write('<');
out.write(tag);
for (int i=0; i<attrs.length; i++) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.util;
import java.io.Writer;
import java.io.IOException;
import java.util.Map;
/**
*
*/
public class XML {
//
// copied from some of my personal code... -YCS
// table created from python script.
// only have to escape quotes in attribute values, and don't really have to escape '>'
// many chars less than 0x20 are *not* valid XML, even when escaped!
// for example, <foo>�<foo> is invalid XML.
private static final String[] chardata_escapes=
{"#0;","#1;","#2;","#3;","#4;","#5;","#6;","#7;","#8;",null,null,"#11;","#12;",null,"#14;","#15;","#16;","#17;","#18;","#19;","#20;","#21;","#22;","#23;","#24;","#25;","#26;","#27;","#28;","#29;","#30;","#31;",null,null,null,null,null,null,"&",null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"<",null,">"};
private static final String[] attribute_escapes=
{"#0;","#1;","#2;","#3;","#4;","#5;","#6;","#7;","#8;",null,null,"#11;","#12;",null,"#14;","#15;","#16;","#17;","#18;","#19;","#20;","#21;","#22;","#23;","#24;","#25;","#26;","#27;","#28;","#29;","#30;","#31;",null,null,""",null,null,null,"&",null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"<"};
/*****************************************
#Simple python script used to generate the escape table above. -YCS
#
#use individual char arrays or one big char array for better efficiency
# or byte array?
#other={'&':'amp', '<':'lt', '>':'gt', "'":'apos', '"':'quot'}
#
other={'&':'amp', '<':'lt'}
maxi=ord(max(other.keys()))+1
table=[None] * maxi
#NOTE: invalid XML chars are "escaped" as #nn; *not* &#nn; because
#a real XML escape would cause many strict XML parsers to choke.
for i in range(0x20): table[i]='#%d;' % i
for i in '\n\r\t ': table[ord(i)]=None
for k,v in other.items():
table[ord(k)]='&%s;' % v
result=""
for i in range(maxi):
val=table[i]
if not val: val='null'
else: val='"%s"' % val
result += val + ','
print result
****************************************/
/*********
*
* @throws IOException If there is a low-level I/O error.
*/
public static void escapeCharData(String str, Writer out) throws IOException {
escape(str, out, chardata_escapes);
}
public static void escapeAttributeValue(String str, Writer out) throws IOException {
escape(str, out, attribute_escapes);
}
public static void escapeAttributeValue(char [] chars, int start, int length, Writer out) throws IOException {
escape(chars, start, length, out, attribute_escapes);
}
public final static void writeXML(Writer out, String tag, String val) throws IOException {
out.write('<');
out.write(tag);
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
escapeCharData(val,out);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
/** does NOT escape character data in val, must already be valid XML */
public final static void writeUnescapedXML(Writer out, String tag, String val, Object... attrs) throws IOException {
out.write('<');
out.write(tag);
for (int i=0; i<attrs.length; i++) {
out.write(' ');
out.write(attrs[i++].toString());
out.write('=');
out.write('"');
out.write(attrs[i].toString());
out.write('"');
}
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
out.write(val);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
/** escapes character data in val */
public final static void writeXML(Writer out, String tag, String val, Object... attrs) throws IOException {
out.write('<');
out.write(tag);
for (int i=0; i<attrs.length; i++) {
out.write(' ');
out.write(attrs[i++].toString());
out.write('=');
out.write('"');
escapeAttributeValue(attrs[i].toString(), out);
out.write('"');
}
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
escapeCharData(val,out);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
/** escapes character data in val */
public static void writeXML(Writer out, String tag, String val, Map<String, String> attrs) throws IOException {
out.write('<');
out.write(tag);
for (Map.Entry<String, String> entry : attrs.entrySet()) {
out.write(' ');
out.write(entry.getKey());
out.write('=');
out.write('"');
escapeAttributeValue(entry.getValue(), out);
out.write('"');
}
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
escapeCharData(val,out);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
private static void escape(char [] chars, int offset, int length, Writer out, String [] escapes) throws IOException{
for (int i=offset; i<length; i++) {
char ch = chars[i];
if (ch<escapes.length) {
String replacement = escapes[ch];
if (replacement != null) {
out.write(replacement);
continue;
}
}
out.write(ch);
}
}
private static void escape(String str, Writer out, String[] escapes) throws IOException {
for (int i=0; i<str.length(); i++) {
char ch = str.charAt(i);
if (ch<escapes.length) {
String replacement = escapes[ch];
if (replacement != null) {
out.write(replacement);
continue;
}
}
out.write(ch);
}
}
}
| 1 | 27,508 | I think this change is redundant; see the previously defined method "writeUnescapedXML". | apache-lucene-solr | java |
@@ -0,0 +1,18 @@
+package types
+
+import cbor "gx/ipfs/QmRoARq3nkUb13HSKZGepCZSWe5GrVPwx7xURJGZ7KWv9V/go-ipld-cbor"
+
+func init() {
+ cbor.RegisterCborType(Commitments{})
+}
+
+// CommitmentLength is the length of a single commitment (in bytes).
+const CommitmentLength = 32
+
+// Commitments is a struct containing the replica and data commitments produced
+// when sealing a sector.
+type Commitments struct {
+ CommD [CommitmentLength]byte
+ CommR [CommitmentLength]byte
+ CommRStar [CommitmentLength]byte
+} | 1 | 1 | 15,793 | Ah now I have at least some idea what these are for. | filecoin-project-venus | go |
|
@@ -58,8 +58,8 @@ public class DataReader<T> implements DatumReader<T> {
}
@Override
- public void setSchema(Schema fileSchema) {
- this.fileSchema = Schema.applyAliases(fileSchema, readSchema);
+ public void setSchema(Schema schema) {
+ this.fileSchema = Schema.applyAliases(schema, readSchema);
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data.avro;
import com.google.common.collect.MapMaker;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.ResolvingDecoder;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.avro.AvroSchemaVisitor;
import org.apache.iceberg.avro.LogicalMap;
import org.apache.iceberg.avro.ValueReader;
import org.apache.iceberg.avro.ValueReaders;
import org.apache.iceberg.exceptions.RuntimeIOException;
public class DataReader<T> implements DatumReader<T> {
private static final ThreadLocal<Map<Schema, Map<Schema, ResolvingDecoder>>> DECODER_CACHES =
ThreadLocal.withInitial(() -> new MapMaker().weakKeys().makeMap());
public static <D> DataReader<D> create(Schema readSchema) {
return new DataReader<>(readSchema);
}
private final Schema readSchema;
private final ValueReader<T> reader;
private Schema fileSchema = null;
@SuppressWarnings("unchecked")
private DataReader(Schema readSchema) {
this.readSchema = readSchema;
this.reader = (ValueReader<T>) AvroSchemaVisitor.visit(readSchema, new ReadBuilder());
}
@Override
public void setSchema(Schema fileSchema) {
this.fileSchema = Schema.applyAliases(fileSchema, readSchema);
}
@Override
public T read(T reuse, Decoder decoder) throws IOException {
ResolvingDecoder resolver = resolve(decoder);
T value = reader.read(resolver, reuse);
resolver.drain();
return value;
}
private ResolvingDecoder resolve(Decoder decoder) throws IOException {
Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get();
Map<Schema, ResolvingDecoder> fileSchemaToResolver = cache
.computeIfAbsent(readSchema, k -> new HashMap<>());
ResolvingDecoder resolver = fileSchemaToResolver.get(fileSchema);
if (resolver == null) {
resolver = newResolver();
fileSchemaToResolver.put(fileSchema, resolver);
}
resolver.configure(decoder);
return resolver;
}
private ResolvingDecoder newResolver() {
try {
return DecoderFactory.get().resolvingDecoder(fileSchema, readSchema, null);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
private static class ReadBuilder extends AvroSchemaVisitor<ValueReader<?>> {
private ReadBuilder() {
}
@Override
public ValueReader<?> record(Schema record, List<String> names, List<ValueReader<?>> fields) {
return GenericReaders.struct(AvroSchemaUtil.convert(record).asStructType(), fields);
}
@Override
public ValueReader<?> union(Schema union, List<ValueReader<?>> options) {
return ValueReaders.union(options);
}
@Override
public ValueReader<?> array(Schema array, ValueReader<?> elementReader) {
if (array.getLogicalType() instanceof LogicalMap) {
ValueReaders.StructReader<?> keyValueReader = (ValueReaders.StructReader) elementReader;
ValueReader<?> keyReader = keyValueReader.reader(0);
ValueReader<?> valueReader = keyValueReader.reader(1);
return ValueReaders.arrayMap(keyReader, valueReader);
}
return ValueReaders.array(elementReader);
}
@Override
public ValueReader<?> map(Schema map, ValueReader<?> valueReader) {
return ValueReaders.map(ValueReaders.strings(), valueReader);
}
@Override
public ValueReader<?> primitive(Schema primitive) {
LogicalType logicalType = primitive.getLogicalType();
if (logicalType != null) {
switch (logicalType.getName()) {
case "date":
return GenericReaders.dates();
case "time-micros":
return GenericReaders.times();
case "timestamp-micros":
if (AvroSchemaUtil.isTimestamptz(primitive)) {
return GenericReaders.timestamptz();
}
return GenericReaders.timestamps();
case "decimal":
ValueReader<byte[]> inner;
switch (primitive.getType()) {
case FIXED:
inner = ValueReaders.fixed(primitive.getFixedSize());
break;
case BYTES:
inner = ValueReaders.bytes();
break;
default:
throw new IllegalArgumentException(
"Invalid primitive type for decimal: " + primitive.getType());
}
LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType;
return ValueReaders.decimal(inner, decimal.getScale());
case "uuid":
return ValueReaders.uuids();
default:
throw new IllegalArgumentException("Unknown logical type: " + logicalType);
}
}
switch (primitive.getType()) {
case NULL:
return ValueReaders.nulls();
case BOOLEAN:
return ValueReaders.booleans();
case INT:
return ValueReaders.ints();
case LONG:
return ValueReaders.longs();
case FLOAT:
return ValueReaders.floats();
case DOUBLE:
return ValueReaders.doubles();
case STRING:
// might want to use a binary-backed container like Utf8
return ValueReaders.strings();
case FIXED:
return ValueReaders.fixed(primitive.getFixedSize());
case BYTES:
return ValueReaders.byteBuffers();
default:
throw new IllegalArgumentException("Unsupported type: " + primitive);
}
}
}
}
| 1 | 13,501 | I believe in previous PRs @mccheah frequently used `fileSchema` -> `newFileSchema` type of renames to avoid hiding fields in builders. Would it make sense to make it consistent? | apache-iceberg | java |
@@ -62,9 +62,17 @@ func (manager *connectionManager) Connect(consumerID, providerID identity.Identi
}
}()
+ err = manager.startConnection(consumerID, providerID)
+ if err == utils.ErrRequestCancelled {
+ return ErrConnectionCancelled
+ }
+ return err
+}
+
+func (manager *connectionManager) startConnection(consumerID, providerID identity.Identity) (err error) {
cancelable := utils.NewCancelable()
manager.cleanConnection = utils.CallOnce(func() {
- log.Info(managerLogPrefix, "Canceling connection initiation")
+ log.Info(managerLogPrefix, "Cancelling connection initiation")
manager.status = statusDisconnecting()
cancelable.Cancel()
}) | 1 | package connection
import (
"errors"
log "github.com/cihub/seelog"
"github.com/mysterium/node/communication"
"github.com/mysterium/node/identity"
"github.com/mysterium/node/openvpn"
"github.com/mysterium/node/openvpn/middlewares/client/bytescount"
"github.com/mysterium/node/server"
"github.com/mysterium/node/service_discovery/dto"
"github.com/mysterium/node/session"
"github.com/mysterium/node/utils"
)
const managerLogPrefix = "[connection-manager] "
var (
// ErrNoConnection error indicates that action applied to manager expects active connection (i.e. disconnect)
ErrNoConnection = errors.New("no connection exists")
// ErrAlreadyExists error indicates that aciton applieto to manager expects no active connection (i.e. connect)
ErrAlreadyExists = errors.New("connection already exists")
// ErrConnectionCancelled indicates that connection in progress was cancelled by request of api user
ErrConnectionCancelled = errors.New("connection was cancelled")
// ErrOpenvpnProcessDied indicates that Connect method didn't reach "Connected" phase due to openvpn error
ErrOpenvpnProcessDied = errors.New("openvpn process died")
)
type connectionManager struct {
//these are passed on creation
mysteriumClient server.Client
newDialog DialogCreator
newVpnClient VpnClientCreator
statsKeeper bytescount.SessionStatsKeeper
//these are populated by Connect at runtime
status ConnectionStatus
cleanConnection func()
}
// NewManager creates connection manager with given dependencies
func NewManager(mysteriumClient server.Client, dialogCreator DialogCreator,
vpnClientCreator VpnClientCreator, statsKeeper bytescount.SessionStatsKeeper) *connectionManager {
return &connectionManager{
mysteriumClient: mysteriumClient,
newDialog: dialogCreator,
newVpnClient: vpnClientCreator,
statsKeeper: statsKeeper,
status: statusNotConnected(),
cleanConnection: warnOnClean,
}
}
func (manager *connectionManager) Connect(consumerID, providerID identity.Identity) (err error) {
if manager.status.State != NotConnected {
return ErrAlreadyExists
}
manager.status = statusConnecting()
defer func() {
if err != nil {
manager.status = statusNotConnected()
}
}()
cancelable := utils.NewCancelable()
manager.cleanConnection = utils.CallOnce(func() {
log.Info(managerLogPrefix, "Canceling connection initiation")
manager.status = statusDisconnecting()
cancelable.Cancel()
})
val, err := cancelable.
NewRequest(func() (interface{}, error) {
return manager.findProposalByProviderID(providerID)
}).
Call()
if err != nil {
return err
}
proposal := val.(*dto.ServiceProposal)
val, err = cancelable.
NewRequest(func() (interface{}, error) {
return manager.newDialog(consumerID, providerID, proposal.ProviderContacts[0])
}).
Cleanup(utils.InvokeOnSuccess(func(val interface{}) {
val.(communication.Dialog).Close()
})).
Call()
if err != nil {
return err
}
dialog := val.(communication.Dialog)
val, err = cancelable.
NewRequest(func() (interface{}, error) {
return session.RequestSessionCreate(dialog, proposal.ID)
}).
Call()
if err != nil {
dialog.Close()
return err
}
vpnSession := val.(*session.SessionDto)
stateChannel := make(chan openvpn.State, 10)
val, err = cancelable.
NewRequest(func() (interface{}, error) {
return manager.startOpenvpnClient(*vpnSession, consumerID, providerID, stateChannel)
}).
Cleanup(utils.InvokeOnSuccess(func(val interface{}) {
val.(openvpn.Client).Stop()
})).
Call()
if err != nil {
dialog.Close()
return err
}
openvpnClient := val.(openvpn.Client)
err = manager.waitForConnectedState(stateChannel, vpnSession.ID, cancelable.Cancelled)
if err != nil {
dialog.Close()
openvpnClient.Stop()
return err
}
manager.cleanConnection = func() {
log.Info(managerLogPrefix, "Closing active connection")
manager.status = statusDisconnecting()
if err := openvpnClient.Stop(); err != nil {
log.Warn(managerLogPrefix, "Openvpn client stopped with error: ", err)
} else {
log.Info(managerLogPrefix, "Openvpn client stopped")
}
}
go openvpnClientWaiter(openvpnClient, dialog)
go manager.consumeOpenvpnStates(stateChannel, vpnSession.ID)
return nil
}
func (manager *connectionManager) Status() ConnectionStatus {
return manager.status
}
func (manager *connectionManager) Disconnect() error {
if manager.status.State == NotConnected {
return ErrNoConnection
}
manager.cleanConnection()
return nil
}
func warnOnClean() {
log.Warn(managerLogPrefix, "Trying to close when there is nothing to close. Possible bug or race condition")
}
// TODO this can be extraced as depencency later when node selection criteria will be clear
func (manager *connectionManager) findProposalByProviderID(providerID identity.Identity) (*dto.ServiceProposal, error) {
proposals, err := manager.mysteriumClient.FindProposals(providerID.Address)
if err != nil {
return nil, err
}
if len(proposals) == 0 {
err = errors.New("provider has no service proposals")
return nil, err
}
return &proposals[0], nil
}
func openvpnClientWaiter(openvpnClient openvpn.Client, dialog communication.Dialog) {
err := openvpnClient.Wait()
if err != nil {
log.Warn(managerLogPrefix, "Openvpn client exited with error: ", err)
} else {
log.Info(managerLogPrefix, "Openvpn client exited")
}
dialog.Close()
}
func (manager *connectionManager) startOpenvpnClient(vpnSession session.SessionDto, consumerID, providerID identity.Identity, stateChannel chan openvpn.State) (openvpn.Client, error) {
openvpnClient, err := manager.newVpnClient(
vpnSession,
consumerID,
providerID,
channelToStateCallbackAdapter(stateChannel),
)
if err != nil {
return nil, err
}
if err = openvpnClient.Start(); err != nil {
return nil, err
}
return openvpnClient, nil
}
func (manager *connectionManager) waitForConnectedState(stateChannel <-chan openvpn.State, sessionID session.SessionID, cancelRequest utils.CancelChannel) error {
for {
select {
case state, more := <-stateChannel:
if !more {
return ErrOpenvpnProcessDied
}
switch state {
case openvpn.ConnectedState:
manager.onStateChanged(state, sessionID)
return nil
default:
manager.onStateChanged(state, sessionID)
}
case <-cancelRequest:
return ErrConnectionCancelled
}
}
}
func (manager *connectionManager) consumeOpenvpnStates(stateChannel <-chan openvpn.State, sessionID session.SessionID) {
for state := range stateChannel {
manager.onStateChanged(state, sessionID)
}
manager.status = statusNotConnected()
log.Debug(managerLogPrefix, "State updater stopped")
}
func (manager *connectionManager) onStateChanged(state openvpn.State, sessionID session.SessionID) {
switch state {
case openvpn.ConnectedState:
manager.statsKeeper.MarkSessionStart()
manager.status = statusConnected(sessionID)
case openvpn.ExitingState:
manager.statsKeeper.MarkSessionEnd()
case openvpn.ReconnectingState:
manager.status = statusReconnecting()
}
}
| 1 | 10,894 | Do we really need separate error in manager if utils.ErrRequestCancelled is the only error which indicates cancelation ? | mysteriumnetwork-node | go |
@@ -136,7 +136,7 @@ MODEL_PARAMS = {
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
- 'temporalImp': 'cpp',
+ 'temporalImp': 'tm_cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# CPU usage encoder.
'encoders': {
'cpu': {
'fieldname': u'cpu',
'n': 200,
'name': u'cpu',
'type': 'ScalarEncoder',
'minval': 0.0,
'maxval': 100.0,
'w': 21
}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '5',
},
'trainSPNetOnlyIfRequested': False,
},
}
| 1 | 21,191 | Leave as `cpp` since that still gives better results. | numenta-nupic | py |
@@ -27,7 +27,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
-func pausedPredicates(logger logr.Logger) predicate.Funcs {
+func PausedPredicates(logger logr.Logger) predicate.Funcs {
return predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return processIfUnpaused(logger.WithValues("predicate", "updateEvent"), e.ObjectNew, e.MetaNew) | 1 | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"strings"
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterutil "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
func pausedPredicates(logger logr.Logger) predicate.Funcs {
return predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return processIfUnpaused(logger.WithValues("predicate", "updateEvent"), e.ObjectNew, e.MetaNew)
},
CreateFunc: func(e event.CreateEvent) bool {
return processIfUnpaused(logger.WithValues("predicate", "createEvent"), e.Object, e.Meta)
},
DeleteFunc: func(e event.DeleteEvent) bool {
return processIfUnpaused(logger.WithValues("predicate", "deleteEvent"), e.Object, e.Meta)
},
GenericFunc: func(e event.GenericEvent) bool {
return processIfUnpaused(logger.WithValues("predicate", "genericEvent"), e.Object, e.Meta)
},
}
}
func processIfUnpaused(logger logr.Logger, obj runtime.Object, meta metav1.Object) bool {
kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind)
log := logger.WithValues("namespace", meta.GetNamespace(), kind, meta.GetName())
if clusterutil.HasPausedAnnotation(meta) {
log.V(4).Info("Resource is paused, will not attempt to map resource")
return false
}
log.V(4).Info("Resource is not paused, will attempt to map resource")
return true
}
| 1 | 16,852 | temp change so the predicate can be used by the `exp` package | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -387,6 +387,11 @@ class KeyboardInputGesture(inputCore.InputGesture):
# This could be for an event such as gyroscope movement,
# so don't report it.
return False
+ if self.vkCode in self.TOGGLE_KEYS:
+ # #5490: Dont report for keys that toggle on off.
+ # This is to avoid them from reported twice: once by the 'speak command keys' feature,
+ # and once by the 'speak typed characters' feature
+ return False
return not self.isCharacter
def _get_isCharacter(self): | 1 | # -*- coding: UTF-8 -*-
#keyboardHandler.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2006-2015 NV Access Limited, Peter Vágner, Aleksey Sadovoy
"""Keyboard support"""
import time
import re
import wx
import winUser
import vkCodes
import speech
import ui
from keyLabels import localizedKeyLabels
from logHandler import log
import queueHandler
import config
import api
import winInputHook
import inputCore
import tones
ignoreInjected=False
# Fake vk codes.
# These constants should be assigned to the name that NVDA will use for the key.
VK_WIN = "windows"
#: Keys which have been trapped by NVDA and should not be passed to the OS.
trappedKeys=set()
#: Tracks the number of keys passed through by request of the user.
#: If -1, pass through is disabled.
#: If 0 or higher then key downs and key ups will be passed straight through.
passKeyThroughCount=-1
#: The last key down passed through by request of the user.
lastPassThroughKeyDown = None
#: The last NVDA modifier key that was pressed with no subsequent key presses.
lastNVDAModifier = None
#: When the last NVDA modifier key was released.
lastNVDAModifierReleaseTime = None
#: Indicates that the NVDA modifier's special functionality should be bypassed until a key is next released.
bypassNVDAModifier = False
#: The modifiers currently being pressed.
currentModifiers = set()
#: A counter which is incremented each time a key is pressed.
#: Note that this may be removed in future, so reliance on it should generally be avoided.
#: @type: int
keyCounter = 0
#: The current sticky NVDa modifier key.
stickyNVDAModifier = None
#: Whether the sticky NVDA modifier is locked.
stickyNVDAModifierLocked = False
def passNextKeyThrough():
global passKeyThroughCount
if passKeyThroughCount==-1:
passKeyThroughCount=0
def isNVDAModifierKey(vkCode,extended):
if config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"] and vkCode==winUser.VK_INSERT and not extended:
return True
elif config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"] and vkCode==winUser.VK_INSERT and extended:
return True
elif config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] and vkCode==winUser.VK_CAPITAL:
return True
else:
return False
def internal_keyDownEvent(vkCode,scanCode,extended,injected):
"""Event called by winInputHook when it receives a keyDown.
"""
try:
global lastNVDAModifier, lastNVDAModifierReleaseTime, bypassNVDAModifier, passKeyThroughCount, lastPassThroughKeyDown, currentModifiers, keyCounter, stickyNVDAModifier, stickyNVDAModifierLocked
# Injected keys should be ignored in some cases.
if injected and (ignoreInjected or not config.conf['keyboard']['handleInjectedKeys']):
return True
keyCode = (vkCode, extended)
if passKeyThroughCount >= 0:
# We're passing keys through.
if lastPassThroughKeyDown != keyCode:
# Increment the pass key through count.
# We only do this if this isn't a repeat of the previous key down, as we don't receive key ups for repeated key downs.
passKeyThroughCount += 1
lastPassThroughKeyDown = keyCode
return True
keyCounter += 1
stickyKeysFlags = winUser.getSystemStickyKeys().dwFlags
if stickyNVDAModifier and not stickyKeysFlags & winUser.SKF_STICKYKEYSON:
# Sticky keys has been disabled,
# so clear the sticky NVDA modifier.
currentModifiers.discard(stickyNVDAModifier)
stickyNVDAModifier = None
stickyNVDAModifierLocked = False
gesture = KeyboardInputGesture(currentModifiers, vkCode, scanCode, extended)
if not (stickyKeysFlags & winUser.SKF_STICKYKEYSON) and (bypassNVDAModifier or (keyCode == lastNVDAModifier and lastNVDAModifierReleaseTime and time.time() - lastNVDAModifierReleaseTime < 0.5)):
# The user wants the key to serve its normal function instead of acting as an NVDA modifier key.
# There may be key repeats, so ensure we do this until they stop.
bypassNVDAModifier = True
gesture.isNVDAModifierKey = False
lastNVDAModifierReleaseTime = None
if gesture.isNVDAModifierKey:
lastNVDAModifier = keyCode
if stickyKeysFlags & winUser.SKF_STICKYKEYSON:
if keyCode == stickyNVDAModifier:
if stickyKeysFlags & winUser.SKF_TRISTATE and not stickyNVDAModifierLocked:
# The NVDA modifier is being locked.
stickyNVDAModifierLocked = True
if stickyKeysFlags & winUser.SKF_AUDIBLEFEEDBACK:
tones.beep(1984, 60)
return False
else:
# The NVDA modifier is being unlatched/unlocked.
stickyNVDAModifier = None
stickyNVDAModifierLocked = False
if stickyKeysFlags & winUser.SKF_AUDIBLEFEEDBACK:
tones.beep(496, 60)
return False
else:
# The NVDA modifier is being latched.
if stickyNVDAModifier:
# Clear the previous sticky NVDA modifier.
currentModifiers.discard(stickyNVDAModifier)
stickyNVDAModifierLocked = False
stickyNVDAModifier = keyCode
if stickyKeysFlags & winUser.SKF_AUDIBLEFEEDBACK:
tones.beep(1984, 60)
else:
# Another key was pressed after the last NVDA modifier key, so it should not be passed through on the next press.
lastNVDAModifier = None
if gesture.isModifier:
if gesture.speechEffectWhenExecuted in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME) and keyCode in currentModifiers:
# Ignore key repeats for the pause speech key to avoid speech stuttering as it continually pauses and resumes.
return True
currentModifiers.add(keyCode)
elif stickyNVDAModifier and not stickyNVDAModifierLocked:
# A non-modifier was pressed, so unlatch the NVDA modifier.
currentModifiers.discard(stickyNVDAModifier)
stickyNVDAModifier = None
try:
inputCore.manager.executeGesture(gesture)
trappedKeys.add(keyCode)
if canModifiersPerformAction(gesture.generalizedModifiers):
# #3472: These modifiers can perform an action if pressed alone
# and we've just consumed the main key.
# Send special reserved vkcode (0xff) to at least notify the app's key state that something happendd.
# This allows alt and windows to be bound to scripts and
# stops control+shift from switching keyboard layouts in cursorManager selection scripts.
KeyboardInputGesture((),0xff,0,False).send()
return False
except inputCore.NoInputGestureAction:
if gesture.isNVDAModifierKey:
# Never pass the NVDA modifier key to the OS.
trappedKeys.add(keyCode)
return False
except:
log.error("internal_keyDownEvent", exc_info=True)
return True
def internal_keyUpEvent(vkCode,scanCode,extended,injected):
"""Event called by winInputHook when it receives a keyUp.
"""
try:
global lastNVDAModifier, lastNVDAModifierReleaseTime, bypassNVDAModifier, passKeyThroughCount, lastPassThroughKeyDown, currentModifiers
# Injected keys should be ignored in some cases.
if injected and (ignoreInjected or not config.conf['keyboard']['handleInjectedKeys']):
return True
keyCode = (vkCode, extended)
if passKeyThroughCount >= 1:
if lastPassThroughKeyDown == keyCode:
# This key has been released.
lastPassThroughKeyDown = None
passKeyThroughCount -= 1
if passKeyThroughCount == 0:
passKeyThroughCount = -1
return True
if lastNVDAModifier and keyCode == lastNVDAModifier:
# The last pressed NVDA modifier key is being released and there were no key presses in between.
# The user may want to press it again quickly to pass it through.
lastNVDAModifierReleaseTime = time.time()
# If we were bypassing the NVDA modifier, stop doing so now, as there will be no more repeats.
bypassNVDAModifier = False
if keyCode != stickyNVDAModifier:
currentModifiers.discard(keyCode)
# help inputCore manage its sayAll state for keyboard modifiers -- inputCore itself has no concept of key releases
if not currentModifiers:
inputCore.manager.lastModifierWasInSayAll=False
if keyCode in trappedKeys:
trappedKeys.remove(keyCode)
return False
except:
log.error("", exc_info=True)
return True
#Register internal key press event with operating system
def initialize():
"""Initialises keyboard support."""
winInputHook.initialize()
winInputHook.setCallbacks(keyDown=internal_keyDownEvent,keyUp=internal_keyUpEvent)
def terminate():
winInputHook.terminate()
def getInputHkl():
"""Obtain the hkl currently being used for input.
This retrieves the hkl from the thread of the focused window.
"""
focus = api.getFocusObject()
if focus:
thread = focus.windowThreadID
else:
thread = 0
return winUser.user32.GetKeyboardLayout(thread)
def canModifiersPerformAction(modifiers):
"""Determine whether given generalized modifiers can perform an action if pressed alone.
For example, alt activates the menu bar if it isn't modifying another key.
"""
if inputCore.manager.isInputHelpActive:
return False
control = shift = other = False
for vk, ext in modifiers:
if vk in (winUser.VK_MENU, VK_WIN):
# Alt activates the menu bar.
# Windows activates the Start Menu.
return True
elif vk == winUser.VK_CONTROL:
control = True
elif vk == winUser.VK_SHIFT:
shift = True
elif (vk, ext) not in trappedKeys :
# Trapped modifiers aren't relevant.
other = True
if control and shift and not other:
# Shift+control switches keyboard layouts.
return True
return False
class KeyboardInputGesture(inputCore.InputGesture):
"""A key pressed on the traditional system keyboard.
"""
#: All normal modifier keys, where modifier vk codes are mapped to a more general modifier vk code or C{None} if not applicable.
#: @type: dict
NORMAL_MODIFIER_KEYS = {
winUser.VK_LCONTROL: winUser.VK_CONTROL,
winUser.VK_RCONTROL: winUser.VK_CONTROL,
winUser.VK_CONTROL: None,
winUser.VK_LSHIFT: winUser.VK_SHIFT,
winUser.VK_RSHIFT: winUser.VK_SHIFT,
winUser.VK_SHIFT: None,
winUser.VK_LMENU: winUser.VK_MENU,
winUser.VK_RMENU: winUser.VK_MENU,
winUser.VK_MENU: None,
winUser.VK_LWIN: VK_WIN,
winUser.VK_RWIN: VK_WIN,
VK_WIN: None,
}
#: All possible toggle key vk codes.
#: @type: frozenset
TOGGLE_KEYS = frozenset((winUser.VK_CAPITAL, winUser.VK_NUMLOCK, winUser.VK_SCROLL))
#: All possible keyboard layouts, where layout names are mapped to localised layout names.
#: @type: dict
LAYOUTS = {
# Translators: One of the keyboard layouts for NVDA.
"desktop": _("desktop"),
# Translators: One of the keyboard layouts for NVDA.
"laptop": _("laptop"),
}
@classmethod
def getVkName(cls, vkCode, isExtended):
if isinstance(vkCode, str):
return vkCode
name = vkCodes.byCode.get((vkCode, isExtended))
if not name and isExtended is not None:
# Whether the key is extended doesn't matter for many keys, so try None.
name = vkCodes.byCode.get((vkCode, None))
return name if name else ""
def __init__(self, modifiers, vkCode, scanCode, isExtended):
#: The keyboard layout in which this gesture was created.
#: @type: str
self.layout = config.conf["keyboard"]["keyboardLayout"]
self.modifiers = modifiers = set(modifiers)
# Don't double up if this is a modifier key repeat.
modifiers.discard((vkCode, isExtended))
if vkCode in (winUser.VK_DIVIDE, winUser.VK_MULTIPLY, winUser.VK_SUBTRACT, winUser.VK_ADD) and winUser.getKeyState(winUser.VK_NUMLOCK) & 1:
# Some numpad keys have the same vkCode regardless of numlock.
# For these keys, treat numlock as a modifier.
modifiers.add((winUser.VK_NUMLOCK, False))
self.generalizedModifiers = set((self.NORMAL_MODIFIER_KEYS.get(mod) or mod, extended) for mod, extended in modifiers)
self.vkCode = vkCode
self.scanCode = scanCode
self.isExtended = isExtended
super(KeyboardInputGesture, self).__init__()
def _get_bypassInputHelp(self):
# #4226: Numlock must always be handled normally otherwise the Keyboard controller and Windows can get out of synk wih each other in regard to this key state.
return self.vkCode==winUser.VK_NUMLOCK
def _get_isNVDAModifierKey(self):
return isNVDAModifierKey(self.vkCode, self.isExtended)
def _get_isModifier(self):
return self.vkCode in self.NORMAL_MODIFIER_KEYS or self.isNVDAModifierKey
def _get_mainKeyName(self):
if self.isNVDAModifierKey:
return "NVDA"
name = self.getVkName(self.vkCode, self.isExtended)
if name:
return name
if 32 < self.vkCode < 128:
return unichr(self.vkCode).lower()
vkChar = winUser.user32.MapVirtualKeyExW(self.vkCode, winUser.MAPVK_VK_TO_CHAR, getInputHkl())
if vkChar>0:
if vkChar == 43: # "+"
# A gesture identifier can't include "+" except as a separator.
return "plus"
return unichr(vkChar).lower()
if self.vkCode == 0xFF:
# #3468: This key is unknown to Windows.
# GetKeyNameText often returns something inappropriate in these cases
# due to disregarding the extended flag.
return "unknown_%02x" % self.scanCode
return winUser.getKeyNameText(self.scanCode, self.isExtended)
def _get_modifierNames(self):
modTexts = set()
for modVk, modExt in self.generalizedModifiers:
if isNVDAModifierKey(modVk, modExt):
modTexts.add("NVDA")
else:
modTexts.add(self.getVkName(modVk, None))
return modTexts
def _get__keyNamesInDisplayOrder(self):
return tuple(self.modifierNames) + (self.mainKeyName,)
def _get_logIdentifier(self):
return u"kb({layout}):{key}".format(layout=self.layout,
key="+".join(self._keyNamesInDisplayOrder))
def _get_displayName(self):
return "+".join(
# Translators: Reported for an unknown key press.
# %s will be replaced with the key code.
_("unknown %s") % key[8:] if key.startswith("unknown_")
else localizedKeyLabels.get(key.lower(), key) for key in self._keyNamesInDisplayOrder)
def _get_identifiers(self):
keyNames = set(self.modifierNames)
keyNames.add(self.mainKeyName)
keyName = "+".join(keyNames).lower()
return (
u"kb({layout}):{key}".format(layout=self.layout, key=keyName),
u"kb:{key}".format(key=keyName)
)
def _get_shouldReportAsCommand(self):
if self.isExtended and winUser.VK_VOLUME_MUTE <= self.vkCode <= winUser.VK_VOLUME_UP:
# Don't report volume controlling keys.
return False
if self.vkCode == 0xFF:
# #3468: This key is unknown to Windows.
# This could be for an event such as gyroscope movement,
# so don't report it.
return False
return not self.isCharacter
def _get_isCharacter(self):
# Aside from space, a key name of more than 1 character is a potential command and therefore is not a character.
if self.vkCode != winUser.VK_SPACE and len(self.mainKeyName) > 1:
return False
# If this key has modifiers other than shift, it is a command and not a character; e.g. shift+f is a character, but control+f is a command.
modifiers = self.generalizedModifiers
if modifiers and (len(modifiers) > 1 or tuple(modifiers)[0][0] != winUser.VK_SHIFT):
return False
return True
def _get_speechEffectWhenExecuted(self):
if inputCore.manager.isInputHelpActive:
return self.SPEECHEFFECT_CANCEL
if self.isExtended and winUser.VK_VOLUME_MUTE <= self.vkCode <= winUser.VK_VOLUME_UP:
return None
if self.vkCode == 0xFF:
# #3468: This key is unknown to Windows.
# This could be for an event such as gyroscope movement,
# so don't interrupt speech.
return None
if not config.conf['keyboard']['speechInterruptForCharacters'] and (not self.shouldReportAsCommand or self.vkCode in (winUser.VK_SHIFT, winUser.VK_LSHIFT, winUser.VK_RSHIFT)):
return None
if self.vkCode==winUser.VK_RETURN and not config.conf['keyboard']['speechInterruptForEnter']:
return None
if self.vkCode in (winUser.VK_SHIFT, winUser.VK_LSHIFT, winUser.VK_RSHIFT):
return self.SPEECHEFFECT_RESUME if speech.isPaused else self.SPEECHEFFECT_PAUSE
return self.SPEECHEFFECT_CANCEL
def reportExtra(self):
if self.vkCode in self.TOGGLE_KEYS:
wx.CallLater(30, self._reportToggleKey)
def _reportToggleKey(self):
toggleState = winUser.getKeyState(self.vkCode) & 1
key = self.mainKeyName
ui.message(u"{key} {state}".format(
key=localizedKeyLabels.get(key.lower(), key),
state=_("on") if toggleState else _("off")))
def send(self):
global ignoreInjected
keys = []
for vk, ext in self.generalizedModifiers:
if vk == VK_WIN:
if winUser.getKeyState(winUser.VK_LWIN) & 32768 or winUser.getKeyState(winUser.VK_RWIN) & 32768:
# Already down.
continue
vk = winUser.VK_LWIN
elif winUser.getKeyState(vk) & 32768:
# Already down.
continue
keys.append((vk, 0, ext))
keys.append((self.vkCode, self.scanCode, self.isExtended))
try:
ignoreInjected=True
if winUser.getKeyState(self.vkCode) & 32768:
# This key is already down, so send a key up for it first.
winUser.keybd_event(self.vkCode, self.scanCode, self.isExtended + 2, 0)
# Send key down events for these keys.
for vk, scan, ext in keys:
winUser.keybd_event(vk, scan, ext, 0)
# Send key up events for the keys in reverse order.
for vk, scan, ext in reversed(keys):
winUser.keybd_event(vk, scan, ext + 2, 0)
if not queueHandler.isPendingItems(queueHandler.eventQueue):
time.sleep(0.01)
wx.Yield()
finally:
ignoreInjected=False
@classmethod
def fromName(cls, name):
"""Create an instance given a key name.
@param name: The key name.
@type name: str
@return: A gesture for the specified key.
@rtype: L{KeyboardInputGesture}
"""
keyNames = name.split("+")
keys = []
for keyName in keyNames:
if keyName == "plus":
# A key name can't include "+" except as a separator.
keyName = "+"
if keyName == VK_WIN:
vk = winUser.VK_LWIN
ext = False
elif len(keyName) == 1:
ext = False
requiredMods, vk = winUser.VkKeyScanEx(keyName, getInputHkl())
if requiredMods & 1:
keys.append((winUser.VK_SHIFT, False))
if requiredMods & 2:
keys.append((winUser.VK_CONTROL, False))
if requiredMods & 4:
keys.append((winUser.VK_MENU, False))
# Not sure whether we need to support the Hankaku modifier (& 8).
else:
vk, ext = vkCodes.byName[keyName.lower()]
if ext is None:
ext = False
keys.append((vk, ext))
if not keys:
raise ValueError
return cls(keys[:-1], vk, 0, ext)
RE_IDENTIFIER = re.compile(r"^kb(?:\((.+?)\))?:(.*)$")
@classmethod
def getDisplayTextForIdentifier(cls, identifier):
layout, keys = cls.RE_IDENTIFIER.match(identifier).groups()
dispSource = None
if layout:
try:
# Translators: Used when describing keys on the system keyboard with a particular layout.
# %s is replaced with the layout name.
# For example, in English, this might produce "laptop keyboard".
dispSource = _("%s keyboard") % cls.LAYOUTS[layout]
except KeyError:
pass
if not dispSource:
# Translators: Used when describing keys on the system keyboard applying to all layouts.
dispSource = _("keyboard, all layouts")
keys = set(keys.split("+"))
names = []
main = None
try:
# If present, the NVDA key should appear first.
keys.remove("nvda")
names.append("NVDA")
except KeyError:
pass
for key in keys:
try:
# vkCodes.byName values are (vk, ext)
vk = vkCodes.byName[key][0]
except KeyError:
# This could be a fake vk.
vk = key
label = localizedKeyLabels.get(key, key)
if vk in cls.NORMAL_MODIFIER_KEYS:
names.append(label)
else:
# The main key must be last, so handle that outside the loop.
main = label
names.append(main)
return dispSource, "+".join(names)
inputCore.registerGestureSource("kb", KeyboardInputGesture)
def injectRawKeyboardInput(isPress, code, isExtended):
"""Injet raw input from a system keyboard that is not handled natively by Windows.
For example, this might be used for input from a QWERTY keyboard on a braille display.
NVDA will treat the key as if it had been pressed on a normal system keyboard.
If it is not handled by NVDA, it will be sent to the operating system.
@param isPress: Whether the key is being pressed.
@type isPress: bool
@param code: The scan code (PC set 1) of the key.
@type code: int
@param isExtended: Whether this is an extended key.
@type isExtended: bool
"""
mapScan = code
if isExtended:
# Change what we pass to MapVirtualKeyEx, but don't change what NVDA gets.
mapScan |= 0xE000
vkCode = winUser.user32.MapVirtualKeyExW(mapScan, winUser.MAPVK_VSC_TO_VK_EX, getInputHkl())
if isPress:
shouldSend = internal_keyDownEvent(vkCode, code, isExtended, False)
else:
shouldSend = internal_keyUpEvent(vkCode, code, isExtended, False)
if shouldSend:
flags = 0
if not isPress:
flags |= 2
if isExtended:
flags |= 1
global ignoreInjected
ignoreInjected = True
try:
winUser.keybd_event(vkCode, code, flags, None)
wx.Yield()
finally:
ignoreInjected = False
| 1 | 17,890 | Just as a tiny clarification, this isn't affected in any way by "speak typed characters". That is, "caps lock on", etc. is always spoken, even if speak typed characters is off. | nvaccess-nvda | py |
@@ -61,10 +61,11 @@ func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h *resource.Helpe
}
hLog.Info("deployment applied (%s)", result)
- // Deploy the desired ClusterImageSets representing installable releases of OpenShift.
- // TODO: in future this should be pipelined somehow.
applyAssets := []string{
"config/manager/service.yaml",
+
+ // Deploy the desired ClusterImageSets representing installable releases of OpenShift.
+ // TODO: in future this should be pipelined somehow.
"config/clusterimagesets/openshift-4.0-latest.yaml",
"config/clusterimagesets/openshift-4.0-beta3.yaml",
"config/clusterimagesets/openshift-4.0-beta4.yaml", | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hive
import (
"context"
log "github.com/sirupsen/logrus"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
"github.com/openshift/hive/pkg/controller/images"
"github.com/openshift/hive/pkg/operator/assets"
"github.com/openshift/hive/pkg/operator/util"
"github.com/openshift/hive/pkg/resource"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
)
func (r *ReconcileHiveConfig) deployHive(hLog log.FieldLogger, h *resource.Helper, instance *hivev1.HiveConfig, recorder events.Recorder) error {
asset := assets.MustAsset("config/manager/deployment.yaml")
hLog.Debug("reading deployment")
hiveDeployment := resourceread.ReadDeploymentV1OrDie(asset)
if r.hiveImage != "" {
hiveDeployment.Spec.Template.Spec.Containers[0].Image = r.hiveImage
// NOTE: overwriting all environment vars here, there are no others at the time of
// writing:
hiveImageEnvVar := corev1.EnvVar{
Name: images.HiveImageEnvVar,
Value: r.hiveImage,
}
hiveDeployment.Spec.Template.Spec.Containers[0].Env = append(hiveDeployment.Spec.Template.Spec.Containers[0].Env, hiveImageEnvVar)
}
result, err := h.ApplyRuntimeObject(hiveDeployment, scheme.Scheme)
if err != nil {
hLog.WithError(err).Error("error applying deployment")
return err
}
hLog.Info("deployment applied (%s)", result)
// Deploy the desired ClusterImageSets representing installable releases of OpenShift.
// TODO: in future this should be pipelined somehow.
applyAssets := []string{
"config/manager/service.yaml",
"config/clusterimagesets/openshift-4.0-latest.yaml",
"config/clusterimagesets/openshift-4.0-beta3.yaml",
"config/clusterimagesets/openshift-4.0-beta4.yaml",
"config/rbac/hive_admin_role.yaml",
"config/rbac/hive_admin_role_binding.yaml",
"config/rbac/hive_reader_role.yaml",
"config/rbac/hive_reader_role_binding.yaml",
}
for _, a := range applyAssets {
err = util.ApplyAsset(h, a, hLog)
if err != nil {
return err
}
}
// Remove legacy ClusterImageSets we do not want installable anymore.
removeImageSets := []string{
"openshift-v4.0-beta2",
"openshift-v4.0.0-0.8",
}
for _, isName := range removeImageSets {
clusterImageSet := &hivev1.ClusterImageSet{}
err := r.Get(context.Background(), types.NamespacedName{Name: isName}, clusterImageSet)
if err != nil && !errors.IsNotFound(err) {
hLog.WithError(err).Error("error looking for obsolete ClusterImageSet")
return err
} else if err != nil {
hLog.WithField("clusterImageSet", isName).Debug("legacy ClusterImageSet does not exist")
} else {
err = r.Delete(context.Background(), clusterImageSet)
if err != nil {
hLog.WithError(err).WithField("clusterImageSet", clusterImageSet).Error(
"error deleting outdated ClusterImageSet")
return err
}
hLog.WithField("clusterImageSet", isName).Info("deleted outdated ClusterImageSet")
}
}
hLog.Info("all hive components successfully reconciled")
return nil
}
| 1 | 6,296 | @dgoodwin Will it cause an issue when the issue get fixed in OLM? | openshift-hive | go |
@@ -29,9 +29,8 @@ namespace Nethermind.Blockchain.Processing
IgnoreParentNotOnMainChain = 16,
DoNotVerifyNonce = 32,
DoNotUpdateHead = 64,
- DumpParityTraces = 128,
- DumpGetTraces = 256,
- All = 511,
+ RerunWithTraceOnFailure = 128,
+ All = 255,
ProducingBlock = NoValidation | ReadOnlyChain,
Trace = ForceProcessing | ReadOnlyChain | DoNotVerifyNonce | NoValidation,
Beam = IgnoreParentNotOnMainChain | DoNotUpdateHead | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
namespace Nethermind.Blockchain.Processing
{
[Flags]
public enum ProcessingOptions
{
None = 0,
ReadOnlyChain = 1,
ForceProcessing = 2,
StoreReceipts = 4,
NoValidation = 8,
IgnoreParentNotOnMainChain = 16,
DoNotVerifyNonce = 32,
DoNotUpdateHead = 64,
DumpParityTraces = 128,
DumpGetTraces = 256,
All = 511,
ProducingBlock = NoValidation | ReadOnlyChain,
Trace = ForceProcessing | ReadOnlyChain | DoNotVerifyNonce | NoValidation,
Beam = IgnoreParentNotOnMainChain | DoNotUpdateHead
}
public static class ProcessingOptionsExtensions
{
public static bool IsReadOnly(this ProcessingOptions processingOptions) => (processingOptions & ProcessingOptions.ReadOnlyChain) == ProcessingOptions.ReadOnlyChain;
public static bool IsNotReadOnly(this ProcessingOptions processingOptions) => (processingOptions & ProcessingOptions.ReadOnlyChain) != ProcessingOptions.ReadOnlyChain;
public static bool IsProducingBlock(this ProcessingOptions processingOptions) => (processingOptions & ProcessingOptions.ProducingBlock) == ProcessingOptions.ProducingBlock;
}
} | 1 | 24,263 | This option is now unnecessary | NethermindEth-nethermind | .cs |
@@ -360,10 +360,7 @@ func TestWriter(t *testing.T) {
t.Fatal(err)
}
- b, err := NewBucket(subdir)
- if err != nil {
- t.Fatal(err)
- }
+ b := &bucket{dir: subdir}
ctx := context.Background()
names := []string{
// Backslashes not allowed. | 1 | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileblob
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/google/go-x-cloud/blob"
)
func TestNewBucket(t *testing.T) {
t.Run("DirMissing", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
_, err = NewBucket(filepath.Join(dir, "notfound"))
if err == nil {
t.Error("NewBucket did not return error")
}
})
t.Run("File", func(t *testing.T) {
f, err := ioutil.TempFile("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
_, err = NewBucket(f.Name())
if err == nil {
t.Error("NewBucket did not return error")
}
})
t.Run("DirExists", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
_, err = NewBucket(dir)
if err != nil {
t.Fatal(err)
}
})
}
func TestReader(t *testing.T) {
t.Run("MetadataOnly", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
const fileContent = "Hello, World!\n"
err = ioutil.WriteFile(filepath.Join(dir, "foo.txt"), []byte(fileContent), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
r, err := b.NewRangeReader(ctx, "foo.txt", 0, 0)
if err != nil {
t.Fatal("NewRangeReader:", err)
}
defer func() {
if err := r.Close(); err != nil {
t.Error("Close:", err)
}
}()
if got := r.Size(); got != int64(len(fileContent)) {
t.Errorf("r.Attrs().Size at beginning = %d; want %d", got, len(fileContent))
}
if got, err := ioutil.ReadAll(r); err != nil {
t.Errorf("Read error: %v", err)
} else if len(got) > 0 {
t.Errorf("ioutil.ReadAll(r) = %q; fileContent \"\"", got)
}
if got := r.Size(); got != int64(len(fileContent)) {
t.Errorf("r.Attrs().Size at end = %d; want %d", got, len(fileContent))
}
})
t.Run("WholeBlob", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
const want = "Hello, World!\n"
err = ioutil.WriteFile(filepath.Join(dir, "foo.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
r, err := b.NewReader(ctx, "foo.txt")
if err != nil {
t.Fatal("NewReader:", err)
}
defer func() {
if err := r.Close(); err != nil {
t.Error("Close:", err)
}
}()
if got := r.Size(); got != int64(len(want)) {
t.Errorf("r.Attrs().Size at beginning = %d; want %d", got, len(want))
}
if got, err := ioutil.ReadAll(r); err != nil {
t.Errorf("Read error: %v", err)
} else if string(got) != want {
t.Errorf("ioutil.ReadAll(r) = %q; want %q", got, want)
}
if got := r.Size(); got != int64(len(want)) {
t.Errorf("r.Attrs().Size at end = %d; want %d", got, len(want))
}
})
t.Run("WithSlashSep", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
const want = "Hello, World!\n"
if err := os.Mkdir(filepath.Join(dir, "foo"), 0777); err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(dir, "foo", "bar.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
r, err := b.NewReader(ctx, "foo/bar.txt")
if err != nil {
t.Fatal("NewReader:", err)
}
defer func() {
if err := r.Close(); err != nil {
t.Error("Close:", err)
}
}()
if got, err := ioutil.ReadAll(r); err != nil {
t.Errorf("Read error: %v", err)
} else if string(got) != want {
t.Errorf("ioutil.ReadAll(r) = %q; want %q", got, want)
}
})
t.Run("BadNames", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
subdir := filepath.Join(dir, "root")
if err := os.Mkdir(subdir, 0777); err != nil {
t.Fatal(err)
}
const want = "Hello, World!\n"
if err := os.Mkdir(filepath.Join(subdir, "foo"), 0777); err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(subdir, "foo", "bar.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(subdir, "baz.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(dir, "passwd.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(subdir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
names := []string{
// Backslashes not allowed.
"foo\\bar.txt",
// Aliasing problems with unclean paths.
"./baz.txt",
"foo//bar.txt",
"foo/../baz.txt",
// Reaching outside directory.
"../passwd.txt",
"foo/../../passwd.txt",
"/baz.txt",
"C:\\baz.txt",
"C:/baz.txt",
}
for _, name := range names {
r, err := b.NewReader(ctx, name)
if err == nil {
r.Close()
t.Errorf("b.NewReader(ctx, %q) did not return error", name)
}
}
})
t.Run("Range", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
const wholeFile = "Hello, World!\n"
const offset, rangeLen = 1, 4
const want = "ello"
err = ioutil.WriteFile(filepath.Join(dir, "foo.txt"), []byte(wholeFile), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
r, err := b.NewRangeReader(ctx, "foo.txt", offset, rangeLen)
if err != nil {
t.Fatal("NewRangeReader:", err)
}
defer func() {
if err := r.Close(); err != nil {
t.Error("Close:", err)
}
}()
if got := r.Size(); got != int64(len(wholeFile)) {
t.Errorf("r.Attrs().Size at beginning = %d; want %d", got, len(want))
}
if got, err := ioutil.ReadAll(r); err != nil {
t.Errorf("Read error: %v", err)
} else if string(got) != want {
t.Errorf("ioutil.ReadAll(r) = %q; want %q", got, want)
}
if got := r.Size(); got != int64(len(wholeFile)) {
t.Errorf("r.Attrs().Size at end = %d; want %d", got, len(want))
}
})
t.Run("ObjectDoesNotExist", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if _, err := b.NewRangeReader(ctx, "foo_not_exist.txt", 0, 0); err == nil || !blob.IsNotExist(err) {
t.Errorf("NewReader: got %#v, want not exist error", err)
}
})
// TODO(light): For sake of conformance test completionism, this should also
// test range that goes past the end of the blob, but then we're just testing
// the OS for fileblob.
}
func TestWriter(t *testing.T) {
t.Run("Basic", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
w, err := b.NewWriter(ctx, "foo.txt", nil)
if err != nil {
t.Fatal("NewWriter:", err)
}
const want = "Hello, World!\n"
if n, err := w.Write([]byte(want)); n != len(want) || err != nil {
t.Errorf("w.Write(%q) = %d, %v; want %d, <nil>", want, n, err, len(want))
}
if err := w.Close(); err != nil {
t.Errorf("w.Close() = %v", err)
}
if got, err := ioutil.ReadFile(filepath.Join(dir, "foo.txt")); err != nil {
t.Errorf("Read foo.txt: %v", err)
} else if string(got) != want {
t.Errorf("ioutil.ReadFile(\".../foo.txt\") = %q; want %q", got, want)
}
})
t.Run("WithSlashSep", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
w, err := b.NewWriter(ctx, "foo/bar.txt", nil)
if err != nil {
t.Fatal("NewWriter:", err)
}
const want = "Hello, World!\n"
if n, err := w.Write([]byte(want)); n != len(want) || err != nil {
t.Errorf("w.Write(%q) = %d, %v; want %d, <nil>", want, n, err, len(want))
}
if err := w.Close(); err != nil {
t.Errorf("w.Close() = %v", err)
}
fpath := filepath.Join("foo", "bar.txt")
if got, err := ioutil.ReadFile(filepath.Join(dir, fpath)); err != nil {
t.Errorf("Read %s: %v", fpath, err)
} else if string(got) != want {
t.Errorf("ioutil.ReadFile(%q) = %q; want %q", filepath.Join("...", fpath), got, want)
}
})
t.Run("BadNames", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
subdir := filepath.Join(dir, "foo")
if err := os.Mkdir(subdir, 0777); err != nil {
t.Fatal(err)
}
b, err := NewBucket(subdir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
names := []string{
// Backslashes not allowed.
"foo\\bar.txt",
// Aliasing problems with unclean paths.
"./baz.txt",
"foo//bar.txt",
"foo/../baz.txt",
// Reaching outside directory.
"../passwd.txt",
"foo/../../passwd.txt",
"/baz.txt",
"C:\\baz.txt",
"C:/baz.txt",
}
for _, name := range names {
w, err := b.NewWriter(ctx, name, nil)
if err == nil {
w.Close()
t.Errorf("b.NewWriter(ctx, %q, nil) did not return an error", name)
}
}
})
}
func TestDelete(t *testing.T) {
t.Run("Exists", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
const fileContent = "Hello, World!\n"
err = ioutil.WriteFile(filepath.Join(dir, "foo.txt"), []byte(fileContent), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := b.Delete(ctx, "foo.txt"); err != nil {
t.Error("Delete:", err)
}
if _, err := os.Stat(filepath.Join(dir, "foo.txt")); !os.IsNotExist(err) {
t.Errorf("os.Stat(\".../foo.txt\") = _, %v; want not exist", err)
}
})
t.Run("DoesNotExistError", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if err := b.Delete(ctx, "foo.txt"); err == nil || !blob.IsNotExist(err) {
t.Errorf("Delete: got %#v, want not exist error", err)
}
if _, err := os.Stat(filepath.Join(dir, "foo.txt")); !os.IsNotExist(err) {
t.Errorf("os.Stat(\".../foo.txt\") = _, %v; want not exist", err)
}
})
t.Run("BadNames", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
subdir := filepath.Join(dir, "root")
if err := os.Mkdir(subdir, 0777); err != nil {
t.Fatal(err)
}
const want = "Hello, World!\n"
if err := os.Mkdir(filepath.Join(subdir, "foo"), 0777); err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(subdir, "foo", "bar.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(subdir, "baz.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(dir, "passwd.txt"), []byte(want), 0666)
if err != nil {
t.Fatal(err)
}
b, err := NewBucket(subdir)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
names := []string{
// Backslashes not allowed.
"foo\\bar.txt",
// Aliasing problems with unclean paths.
"./baz.txt",
"foo//bar.txt",
"foo/../baz.txt",
// Reaching outside directory.
"../passwd.txt",
"foo/../../passwd.txt",
"/baz.txt",
"C:\\baz.txt",
"C:/baz.txt",
}
for _, name := range names {
err := b.Delete(ctx, name)
if err == nil {
t.Errorf("b.Delete(ctx, %q) did not return error", name)
}
}
mustExist := []string{
filepath.Join(subdir, "foo", "bar.txt"),
filepath.Join(subdir, "baz.txt"),
filepath.Join(dir, "passwd.txt"),
}
for _, name := range mustExist {
if _, err := os.Stat(name); err != nil {
t.Errorf("os.Stat(%q): %v", name, err)
}
}
})
}
| 1 | 10,180 | Don't jump down to the unexported interface: just set the content type to `"application/octet-stream"` explicitly when creating the `Writer`. | google-go-cloud | go |
@@ -176,8 +176,11 @@ class BufferedUpdates implements Accountable {
}
void clearDeleteTerms() {
- deleteTerms.clear();
numTermDeletes.set(0);
+ deleteTerms.forEach((term, docIDUpto) -> {
+ bytesUsed.addAndGet(-(BYTES_PER_DEL_TERM + term.bytes.length + (Character.BYTES * term.field().length())));
+ });
+ deleteTerms.clear();
}
void clear() { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.index.DocValuesUpdate.BinaryDocValuesUpdate;
import org.apache.lucene.index.DocValuesUpdate.NumericDocValuesUpdate;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.RamUsageEstimator;
/** Holds buffered deletes and updates, by docID, term or query for a
* single segment. This is used to hold buffered pending
* deletes and updates against the to-be-flushed segment. Once the
* deletes and updates are pushed (on flush in DocumentsWriter), they
* are converted to a {@link FrozenBufferedUpdates} instance and
* pushed to the {@link BufferedUpdatesStream}. */
// NOTE: instances of this class are accessed either via a private
// instance on DocumentWriterPerThread, or via sync'd code by
// DocumentsWriterDeleteQueue
class BufferedUpdates implements Accountable {
/* Rough logic: HashMap has an array[Entry] w/ varying
load factor (say 2 * POINTER). Entry is object w/ Term
key, Integer val, int hash, Entry next
(OBJ_HEADER + 3*POINTER + INT). Term is object w/
String field and String text (OBJ_HEADER + 2*POINTER).
Term's field is String (OBJ_HEADER + 4*INT + POINTER +
OBJ_HEADER + string.length*CHAR).
Term's text is String (OBJ_HEADER + 4*INT + POINTER +
OBJ_HEADER + string.length*CHAR). Integer is
OBJ_HEADER + INT. */
final static int BYTES_PER_DEL_TERM = 9*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 7*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 10*Integer.BYTES;
/* Rough logic: del docIDs are List<Integer>. Say list
allocates ~2X size (2*POINTER). Integer is OBJ_HEADER
+ int */
final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Integer.BYTES;
/* Rough logic: HashMap has an array[Entry] w/ varying
load factor (say 2 * POINTER). Entry is object w/
Query key, Integer val, int hash, Entry next
(OBJ_HEADER + 3*POINTER + INT). Query we often
undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */
final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*Integer.BYTES + 24;
final AtomicInteger numTermDeletes = new AtomicInteger();
final AtomicInteger numFieldUpdates = new AtomicInteger();
final Map<Term,Integer> deleteTerms = new HashMap<>(); // TODO cut this over to FieldUpdatesBuffer
final Map<Query,Integer> deleteQueries = new HashMap<>();
final List<Integer> deleteDocIDs = new ArrayList<>();
final Map<String, FieldUpdatesBuffer> fieldUpdates = new HashMap<>();
public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE);
private final Counter bytesUsed = Counter.newCounter(true);
final Counter fieldUpdatesBytesUsed = Counter.newCounter(true);
private final static boolean VERBOSE_DELETES = false;
long gen;
final String segmentName;
public BufferedUpdates(String segmentName) {
this.segmentName = segmentName;
}
@Override
public String toString() {
if (VERBOSE_DELETES) {
return "gen=" + gen + " numTerms=" + numTermDeletes + ", deleteTerms=" + deleteTerms
+ ", deleteQueries=" + deleteQueries + ", deleteDocIDs=" + deleteDocIDs + ", fieldUpdates=" + fieldUpdates
+ ", bytesUsed=" + bytesUsed;
} else {
String s = "gen=" + gen;
if (numTermDeletes.get() != 0) {
s += " " + numTermDeletes.get() + " deleted terms (unique count=" + deleteTerms.size() + ")";
}
if (deleteQueries.size() != 0) {
s += " " + deleteQueries.size() + " deleted queries";
}
if (deleteDocIDs.size() != 0) {
s += " " + deleteDocIDs.size() + " deleted docIDs";
}
if (numFieldUpdates.get() != 0) {
s += " " + numFieldUpdates.get() + " field updates";
}
if (bytesUsed.get() != 0) {
s += " bytesUsed=" + bytesUsed.get();
}
return s;
}
}
public void addQuery(Query query, int docIDUpto) {
Integer current = deleteQueries.put(query, docIDUpto);
// increment bytes used only if the query wasn't added so far.
if (current == null) {
bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
}
}
public void addDocID(int docID) {
deleteDocIDs.add(Integer.valueOf(docID));
bytesUsed.addAndGet(BYTES_PER_DEL_DOCID);
}
public void addTerm(Term term, int docIDUpto) {
Integer current = deleteTerms.get(term);
if (current != null && docIDUpto < current) {
// Only record the new number if it's greater than the
// current one. This is important because if multiple
// threads are replacing the same doc at nearly the
// same time, it's possible that one thread that got a
// higher docID is scheduled before the other
// threads. If we blindly replace than we can
// incorrectly get both docs indexed.
return;
}
deleteTerms.put(term, Integer.valueOf(docIDUpto));
// note that if current != null then it means there's already a buffered
// delete on that term, therefore we seem to over-count. this over-counting
// is done to respect IndexWriterConfig.setMaxBufferedDeleteTerms.
numTermDeletes.incrementAndGet();
if (current == null) {
bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length + (Character.BYTES * term.field().length()));
}
}
void addNumericUpdate(NumericDocValuesUpdate update, int docIDUpto) {
FieldUpdatesBuffer buffer = fieldUpdates.computeIfAbsent(update.field, k -> new FieldUpdatesBuffer(fieldUpdatesBytesUsed, update, docIDUpto));
if (update.hasValue) {
buffer.addUpdate(update.term, update.getValue(), docIDUpto);
} else {
buffer.addNoValue(update.term, docIDUpto);
}
numFieldUpdates.incrementAndGet();
}
void addBinaryUpdate(BinaryDocValuesUpdate update, int docIDUpto) {
FieldUpdatesBuffer buffer = fieldUpdates.computeIfAbsent(update.field, k -> new FieldUpdatesBuffer(fieldUpdatesBytesUsed, update, docIDUpto));
if (update.hasValue) {
buffer.addUpdate(update.term, update.getValue(), docIDUpto);
} else {
buffer.addNoValue(update.term, docIDUpto);
}
numFieldUpdates.incrementAndGet();
}
void clearDeleteTerms() {
deleteTerms.clear();
numTermDeletes.set(0);
}
void clear() {
deleteTerms.clear();
deleteQueries.clear();
deleteDocIDs.clear();
numTermDeletes.set(0);
numFieldUpdates.set(0);
fieldUpdates.clear();
bytesUsed.addAndGet(-bytesUsed.get());
fieldUpdatesBytesUsed.addAndGet(-fieldUpdatesBytesUsed.get());
}
boolean any() {
return deleteTerms.size() > 0 || deleteDocIDs.size() > 0 || deleteQueries.size() > 0 || numFieldUpdates.get() > 0;
}
@Override
public long ramBytesUsed() {
return bytesUsed.get() + fieldUpdatesBytesUsed.get();
}
void clearDeletedDocIds() {
deleteDocIDs.clear();
bytesUsed.addAndGet(-deleteDocIDs.size() * BufferedUpdates.BYTES_PER_DEL_DOCID);
}
}
| 1 | 33,478 | Instead of counting this here on clear, can we use a second counter for the deleteTerms next to `bytesUsed`? This would be great. It doesn't need to be thread safe IMO | apache-lucene-solr | java |
@@ -24,10 +24,10 @@ class User extends UserBase
* Validation rules
*/
public $rules = [
- 'email' => 'required|between:6,255|email|unique:backend_users',
- 'login' => 'required|between:2,255|unique:backend_users',
- 'password' => 'required:create|between:4,255|confirmed',
- 'password_confirmation' => 'required_with:password|between:4,255'
+ 'email' => 'required|between:6,191|email|unique:backend_users',
+ 'login' => 'required|between:2,191|unique:backend_users',
+ 'password' => 'required:create|between:8,191|confirmed',
+ 'password_confirmation' => 'required_with:password|between:8,191'
];
/** | 1 | <?php namespace Backend\Models;
use Mail;
use Event;
use Backend;
use October\Rain\Auth\Models\User as UserBase;
/**
* Administrator user model
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class User extends UserBase
{
use \October\Rain\Database\Traits\SoftDelete;
/**
* @var string The database table used by the model.
*/
protected $table = 'backend_users';
/**
* Validation rules
*/
public $rules = [
'email' => 'required|between:6,255|email|unique:backend_users',
'login' => 'required|between:2,255|unique:backend_users',
'password' => 'required:create|between:4,255|confirmed',
'password_confirmation' => 'required_with:password|between:4,255'
];
/**
* @var array Attributes that should be cast to dates
*/
protected $dates = [
'activated_at',
'last_login',
'created_at',
'updated_at',
'deleted_at',
];
/**
* Relations
*/
public $belongsToMany = [
'groups' => [UserGroup::class, 'table' => 'backend_users_groups']
];
public $belongsTo = [
'role' => UserRole::class
];
public $attachOne = [
'avatar' => \System\Models\File::class
];
/**
* Purge attributes from data set.
*/
protected $purgeable = ['password_confirmation', 'send_invite'];
/**
* @var string Login attribute
*/
public static $loginAttribute = 'login';
/**
* @return string Returns the user's full name.
*/
public function getFullNameAttribute()
{
return trim($this->first_name . ' ' . $this->last_name);
}
/**
* Gets a code for when the user is persisted to a cookie or session which identifies the user.
* @return string
*/
public function getPersistCode()
{
// Option A: @todo config
// return parent::getPersistCode();
// Option B:
if (!$this->persist_code) {
return parent::getPersistCode();
}
return $this->persist_code;
}
/**
* Returns the public image file path to this user's avatar.
*/
public function getAvatarThumb($size = 25, $options = null)
{
if (is_string($options)) {
$options = ['default' => $options];
}
elseif (!is_array($options)) {
$options = [];
}
// Default is "mm" (Mystery man)
$default = array_get($options, 'default', 'mm');
if ($this->avatar) {
return $this->avatar->getThumb($size, $size, $options);
}
return '//www.gravatar.com/avatar/' .
md5(strtolower(trim($this->email))) .
'?s='. $size .
'&d='. urlencode($default);
}
/**
* After create event
* @return void
*/
public function afterCreate()
{
$this->restorePurgedValues();
if ($this->send_invite) {
$this->sendInvitation();
}
}
/**
* After login event
* @return void
*/
public function afterLogin()
{
parent::afterLogin();
Event::fire('backend.user.login', [$this]);
}
/**
* Sends an invitation to the user using template "backend::mail.invite".
* @return void
*/
public function sendInvitation()
{
$data = [
'name' => $this->full_name,
'login' => $this->login,
'password' => $this->getOriginalHashValue('password'),
'link' => Backend::url('backend'),
];
Mail::send('backend::mail.invite', $data, function ($message) {
$message->to($this->email, $this->full_name);
});
}
public function getGroupsOptions()
{
$result = [];
foreach (UserGroup::all() as $group) {
$result[$group->id] = [$group->name, $group->description];
}
return $result;
}
public function getRoleOptions()
{
$result = [];
foreach (UserRole::all() as $role) {
$result[$role->id] = [$role->name, $role->description];
}
return $result;
}
}
| 1 | 17,908 | @daftspunk @bennothommo I wonder if we need to go as deep as detecting what the default varchar length is with a special character to be parsed by the validation trait since we've introduced the config for it. | octobercms-october | php |
@@ -2186,7 +2186,7 @@ class WebElement {
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
- setParameter('text', keys).
+ setParameter('text', keys.then(keys => keys.join(''))).
setParameter('value', keys),
'WebElement.sendKeys()');
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const {Session} = require('./session');
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|IThenable<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|IThenable<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Structural interface for a WebDriver client.
*
* @record
*/
class IWebDriver {
/** @return {!promise.ControlFlow} The control flow used by this instance. */
controlFlow() {}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Thenable<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {}
/**
* @return {!promise.Thenable<!Session>} A promise for this client's session.
*/
getSession() {}
/**
* @return {!promise.Thenable<!Capabilities>} A promise that will resolve with
* the this instance's capabilities.
*/
getCapabilities() {}
/**
* Terminates the browser session. After calling quit, this instance will be
* invalidated and may no longer be used to issue commands against the
* browser.
*
* @return {!promise.Thenable<void>} A promise that will be resolved when the
* command has completed.
*/
quit() {}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Thenable<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('[email protected]');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Thenable<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|IThenable<T>)} fn The function to execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Thenable<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisfied. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisfied the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(IThenable<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Thenable<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @throws {TypeError} if the provided `condition` is not a valid type.
* @template T
*/
wait(condition, opt_timeout, opt_message) {}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Thenable<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {}
/**
* Schedules a command to close the current window.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
close() {}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #findElements}:
*
* driver.findElements(By.id('foo'))
* .then(found => console.log('Element found? %s', !!found.length));
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Thenable<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Thenable<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {}
}
/**
* Each WebDriver instance provides automated control over a browser session.
*
* @implements {IWebDriver}
*/
class WebDriver {
/**
* @param {!(Session|IThenable<!Session>)} session Either a known session or a
* promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
* @param {(function(this: void): ?)=} opt_onQuit A function to call, if any,
* when the session is terminated.
*/
constructor(session, executor, opt_flow, opt_onQuit) {
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {!promise.Thenable<!Session>} */
this.session_ = this.flow_.promise(resolve => resolve(session));
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {input.FileDetector} */
this.fileDetector_ = null;
/** @private @const {(function(this: void): ?|undefined)} */
this.onQuit_ = opt_onQuit;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @param {(function(new: WebDriver,
* !IThenable<!Session>,
* !command.Executor,
* promise.ControlFlow=))=} opt_ctor
* A reference to the constructor of the specific type of WebDriver client
* to instantiate. Will create a vanilla {@linkplain WebDriver} instance
* if a constructor is not provided.
* @param {(function(this: void): ?)=} opt_onQuit A callback to invoke when
* the newly created session is terminated. This should be used to clean
* up any resources associated with the session.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(
executor, capabilities, opt_flow, opt_ctor, opt_onQuit) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
if (typeof opt_onQuit === 'function') {
session = session.catch(err => {
return Promise.resolve(opt_onQuit.call(void 0)).then(_ => {throw err;});
});
}
const ctor = opt_ctor || WebDriver;
return new ctor(session, executor, flow, opt_onQuit);
}
/** @override */
controlFlow() {
return this.flow_;
}
/** @override */
schedule(command, description) {
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(() => {
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(this, value));
}, description);
}
/** @override */
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/** @override */
getExecutor() {
return this.executor_;
}
/** @override */
getSession() {
return this.session_;
}
/** @override */
getCapabilities() {
return this.session_.then(s => s.getCapabilities());
}
/** @override */
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attempting to use a driver post-quit.
return /** @type {!promise.Thenable} */(promise.finally(result, () => {
this.session_ = this.flow_.promise((_, reject) => {
reject(new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be used.'));
});
// Only want the session rejection to bubble if accessed.
this.session_.catch(function() {});
if (this.onQuit_) {
return this.onQuit_.call(void 0);
}
}));
}
/** @override */
actions() {
return new actions.ActionSequence(this);
}
/** @override */
touchActions() {
return new actions.TouchSequence(this);
}
/** @override */
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/** @override */
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/** @override */
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
return this.flow_.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/** @override */
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!IThenable} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
if (typeof fn !== 'function') {
throw TypeError(
'Wait condition must be a promise-like object, function, or a '
+ 'Condition object');
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/** @override */
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/** @override */
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/** @override */
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/** @override */
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/** @override */
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/** @override */
get(url) {
return this.navigate().to(url);
}
/** @override */
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/** @override */
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/** @override */
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Thenable<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/** @override */
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Thenable<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/** @override */
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/** @override */
manage() {
return new Options(this);
}
/** @override */
navigate() {
return new Navigation(this);
}
/** @override */
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Thenable<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Thenable<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* Schedules a command to fetch the timeouts currently configured for the
* current session.
*
* @return {!promise.Thenable<{script: number,
* pageLoad: number,
* implicit: number}>} A promise that will be
* resolved with the timeouts currently configured for the current
* session.
* @see #setTimeouts()
*/
getTimeouts() {
return this.driver_.schedule(
new command.Command(command.Name.GET_TIMEOUT),
`WebDriver.manage().getTimeouts()`)
}
/**
* Schedules a command to set timeout durations associated with the current
* session.
*
* The following timeouts are supported (all timeouts are specified in
* milliseconds):
*
* - `implicit` specifies the maximum amount of time to wait for an element
* locator to succeed when {@linkplain WebDriver#findElement locating}
* {@linkplain WebDriver#findElements elements} on the page.
* Defaults to 0 milliseconds.
*
* - `pageLoad` specifies the maximum amount of time to wait for a page to
* finishing loading. Defaults to 300000 milliseconds.
*
* - `script` specifies the maximum amount of time to wait for an
* {@linkplain WebDriver#executeScript evaluated script} to run. If set to
* `null`, the script timeout will be indefinite.
* Defaults to 30000 milliseconds.
*
* @param {{script: (number|null|undefined),
* pageLoad: (number|null|undefined),
* implicit: (number|null|undefined)}} conf
* The desired timeout configuration.
* @return {!promise.Thenable<void>} A promise that will be resolved when the
* timeouts have been set.
* @throws {!TypeError} if an invalid options object is provided.
* @see #getTimeouts()
* @see <https://w3c.github.io/webdriver/webdriver-spec.html#dfn-set-timeouts>
*/
setTimeouts({script, pageLoad, implicit} = {}) {
let cmd = new command.Command(command.Name.SET_TIMEOUT);
let valid = false;
function setParam(key, value) {
if (value === null || typeof value === 'number') {
valid = true;
cmd.setParameter(key, value);
} else if (typeof value !== 'undefined') {
throw TypeError(
'invalid timeouts configuration:'
+ ` expected "${key}" to be a number, got ${typeof value}`);
}
}
setParam('implicit', implicit);
setParam('pageLoad', pageLoad);
setParam('script', script);
if (valid) {
return this.driver_.schedule(cmd, `WebDriver.manage().setTimeouts()`)
.catch(() => {
// Fallback to the legacy method.
let cmds = [];
if (typeof script === 'number') {
cmds.push(legacyTimeout(this.driver_, 'script', script));
}
if (typeof implicit === 'number') {
cmds.push(legacyTimeout(this.driver_, 'implicit', implicit));
}
if (typeof pageLoad === 'number') {
cmds.push(legacyTimeout(this.driver_, 'page load', pageLoad));
}
return Promise.all(cmds);
});
}
throw TypeError('no timeouts specified');
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
* @deprecated Use {@link #setTimeouts()} instead.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* @param {!WebDriver} driver
* @param {string} type
* @param {number} ms
* @return {!promise.Thenable<void>}
*/
function legacyTimeout(driver, type, ms) {
return driver.schedule(
new command.Command(command.Name.SET_TIMEOUT)
.setParameter('type', type)
.setParameter('ms', ms),
`WebDriver.manage().setTimeouts({${type}: ${ms}})`);
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @deprecated This has been deprecated in favor of
* {@link Options#setTimeouts()}, which supports setting multiple timeouts
* at once.
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
* @deprecated Use {@link Options#setTimeouts()
* driver.manage().setTimeouts({implicit: ms})}.
*/
implicitlyWait(ms) {
return this.driver_.manage().setTimeouts({implicit: ms});
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the script timeout has been set.
* @deprecated Use {@link Options#setTimeouts()
* driver.manage().setTimeouts({script: ms})}.
*/
setScriptTimeout(ms) {
return this.driver_.manage().setTimeouts({script: ms});
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the timeout has been set.
* @deprecated Use {@link Options#setTimeouts()
* driver.manage().setTimeouts({pageLoad: ms})}.
*/
pageLoadTimeout(ms) {
return this.driver_.manage().setTimeouts({pageLoad: ms});
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Thenable<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Thenable<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Thenable<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Thenable<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
// "name" supports the legacy drivers. "handle" is the W3C
// compliant parameter.
setParameter('name', nameOrHandle).
setParameter('handle', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Thenable<string>} */
this.id_ = driver.controlFlow().promise(resolve => resolve(id));
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return a.driver_.controlFlow().promise(resolve => resolve(true));
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Thenable<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Thenable<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this);
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Thenable<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the key sequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the key sequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analogous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctuation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('text', keys).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.getDriver().controlFlow().execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('text', keys).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Thenable<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Thenable<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Thenable<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Thenable<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dictated by the {@code disabled} attribute.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Thenable<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Thenable<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.CancellableThenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Thenable<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/**
* Cancel operation is only supported if the wrapped thenable is also
* cancellable.
* @param {(string|Error)=} opt_reason
* @override
*/
this.cancel = function(opt_reason) {
if (promise.CancellableThenable.isImplementation(el)) {
/** @type {!promise.CancellableThenable} */(el).cancel(opt_reason);
}
};
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.CancellableThenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Thenable<string>} */
this.text_ = driver.controlFlow().promise(resolve => resolve(text));
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Thenable<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Thenable<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Thenable<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.CancellableThenable<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/**
* Cancel operation is only supported if the wrapped thenable is also
* cancellable.
* @param {(string|Error)=} opt_reason
* @override
*/
this.cancel = function(opt_reason) {
if (promise.CancellableThenable.isImplementation(alert)) {
/** @type {!promise.CancellableThenable} */(alert).cancel(opt_reason);
}
};
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.CancellableThenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
IWebDriver: IWebDriver,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 14,518 | Also update line 2205 below | SeleniumHQ-selenium | py |
@@ -158,7 +158,7 @@ public class MessageCompose extends K9Activity implements OnClickListener,
"com.fsck.k9.activity.MessageCompose.quotedTextFormat";
private static final String STATE_KEY_NUM_ATTACHMENTS_LOADING = "numAttachmentsLoading";
private static final String STATE_KEY_WAITING_FOR_ATTACHMENTS = "waitingForAttachments";
-
+ private static final String STATE_FIRST_TIME_EMPTY_SUBJECT = "firstTimeEmpySubject";
private static final String LOADER_ARG_ATTACHMENT = "attachment";
private static final String FRAGMENT_WAITING_FOR_ATTACHMENT = "waitingForAttachment"; | 1 | package com.fsck.k9.activity;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import android.annotation.SuppressLint;
import android.annotation.TargetApi;
import android.app.AlertDialog;
import android.app.AlertDialog.Builder;
import android.app.Dialog;
import android.app.LoaderManager;
import android.app.PendingIntent;
import android.content.ClipData;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.IntentSender.SendIntentException;
import android.content.Loader;
import android.content.pm.ActivityInfo;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.os.Parcelable;
import android.support.annotation.Nullable;
import android.text.TextUtils;
import android.text.TextWatcher;
import android.util.Log;
import android.util.TypedValue;
import android.view.ContextThemeWrapper;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.View.OnFocusChangeListener;
import android.view.ViewGroup;
import android.view.Window;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.BaseAdapter;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageButton;
import android.widget.LinearLayout;
import android.widget.TextView;
import android.widget.Toast;
import com.fsck.k9.Account;
import com.fsck.k9.Account.MessageFormat;
import com.fsck.k9.Account.QuoteStyle;
import com.fsck.k9.FontSizes;
import com.fsck.k9.Identity;
import com.fsck.k9.K9;
import com.fsck.k9.Preferences;
import com.fsck.k9.R;
import com.fsck.k9.activity.compose.ComposeCryptoStatus;
import com.fsck.k9.activity.compose.CryptoSettingsDialog.OnCryptoModeChangedListener;
import com.fsck.k9.activity.compose.RecipientMvpView;
import com.fsck.k9.activity.compose.RecipientPresenter;
import com.fsck.k9.activity.compose.RecipientPresenter.CryptoMode;
import com.fsck.k9.activity.loader.AttachmentContentLoader;
import com.fsck.k9.activity.loader.AttachmentInfoLoader;
import com.fsck.k9.activity.misc.Attachment;
import com.fsck.k9.controller.MessagingController;
import com.fsck.k9.controller.MessagingListener;
import com.fsck.k9.fragment.ProgressDialogFragment;
import com.fsck.k9.fragment.ProgressDialogFragment.CancelListener;
import com.fsck.k9.helper.Contacts;
import com.fsck.k9.helper.HtmlConverter;
import com.fsck.k9.helper.IdentityHelper;
import com.fsck.k9.helper.MailTo;
import com.fsck.k9.helper.SimpleTextWatcher;
import com.fsck.k9.helper.Utility;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.Message.RecipientType;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.Multipart;
import com.fsck.k9.mail.Part;
import com.fsck.k9.mail.internet.MessageExtractor;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mailstore.LocalBodyPart;
import com.fsck.k9.mailstore.LocalMessage;
import com.fsck.k9.message.IdentityField;
import com.fsck.k9.message.IdentityHeaderParser;
import com.fsck.k9.message.InsertableHtmlContent;
import com.fsck.k9.message.MessageBuilder;
import com.fsck.k9.message.PgpMessageBuilder;
import com.fsck.k9.message.QuotedTextMode;
import com.fsck.k9.message.SimpleMessageBuilder;
import com.fsck.k9.message.SimpleMessageFormat;
import com.fsck.k9.provider.AttachmentProvider;
import com.fsck.k9.ui.EolConvertingEditText;
import com.fsck.k9.view.MessageWebView;
import org.htmlcleaner.CleanerProperties;
import org.htmlcleaner.HtmlCleaner;
import org.htmlcleaner.SimpleHtmlSerializer;
import org.htmlcleaner.TagNode;
import org.openintents.openpgp.IOpenPgpService2;
import org.openintents.openpgp.util.OpenPgpApi;
import org.openintents.openpgp.util.OpenPgpServiceConnection;
import org.openintents.openpgp.util.OpenPgpServiceConnection.OnBound;
@SuppressWarnings("deprecation")
public class MessageCompose extends K9Activity implements OnClickListener,
CancelListener, OnFocusChangeListener, OnCryptoModeChangedListener, MessageBuilder.Callback {
private static final int DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE = 1;
private static final int DIALOG_CONFIRM_DISCARD_ON_BACK = 2;
private static final int DIALOG_CHOOSE_IDENTITY = 3;
private static final int DIALOG_CONFIRM_DISCARD = 4;
private static final long INVALID_DRAFT_ID = MessagingController.INVALID_MESSAGE_ID;
private static final String ACTION_COMPOSE = "com.fsck.k9.intent.action.COMPOSE";
private static final String ACTION_REPLY = "com.fsck.k9.intent.action.REPLY";
private static final String ACTION_REPLY_ALL = "com.fsck.k9.intent.action.REPLY_ALL";
private static final String ACTION_FORWARD = "com.fsck.k9.intent.action.FORWARD";
private static final String ACTION_EDIT_DRAFT = "com.fsck.k9.intent.action.EDIT_DRAFT";
private static final String EXTRA_ACCOUNT = "account";
private static final String EXTRA_MESSAGE_BODY = "messageBody";
private static final String EXTRA_MESSAGE_REFERENCE = "message_reference";
private static final String STATE_KEY_ATTACHMENTS =
"com.fsck.k9.activity.MessageCompose.attachments";
private static final String STATE_KEY_QUOTED_TEXT_MODE =
"com.fsck.k9.activity.MessageCompose.QuotedTextShown";
private static final String STATE_KEY_SOURCE_MESSAGE_PROCED =
"com.fsck.k9.activity.MessageCompose.stateKeySourceMessageProced";
private static final String STATE_KEY_DRAFT_ID = "com.fsck.k9.activity.MessageCompose.draftId";
private static final String STATE_KEY_HTML_QUOTE = "com.fsck.k9.activity.MessageCompose.HTMLQuote";
private static final String STATE_IDENTITY_CHANGED =
"com.fsck.k9.activity.MessageCompose.identityChanged";
private static final String STATE_IDENTITY =
"com.fsck.k9.activity.MessageCompose.identity";
private static final String STATE_IN_REPLY_TO = "com.fsck.k9.activity.MessageCompose.inReplyTo";
private static final String STATE_REFERENCES = "com.fsck.k9.activity.MessageCompose.references";
private static final String STATE_KEY_READ_RECEIPT = "com.fsck.k9.activity.MessageCompose.messageReadReceipt";
private static final String STATE_KEY_DRAFT_NEEDS_SAVING = "com.fsck.k9.activity.MessageCompose.draftNeedsSaving";
private static final String STATE_KEY_FORCE_PLAIN_TEXT =
"com.fsck.k9.activity.MessageCompose.forcePlainText";
private static final String STATE_KEY_QUOTED_TEXT_FORMAT =
"com.fsck.k9.activity.MessageCompose.quotedTextFormat";
private static final String STATE_KEY_NUM_ATTACHMENTS_LOADING = "numAttachmentsLoading";
private static final String STATE_KEY_WAITING_FOR_ATTACHMENTS = "waitingForAttachments";
private static final String LOADER_ARG_ATTACHMENT = "attachment";
private static final String FRAGMENT_WAITING_FOR_ATTACHMENT = "waitingForAttachment";
private static final int MSG_PROGRESS_ON = 1;
private static final int MSG_PROGRESS_OFF = 2;
private static final int MSG_SKIPPED_ATTACHMENTS = 3;
private static final int MSG_SAVED_DRAFT = 4;
private static final int MSG_DISCARDED_DRAFT = 5;
private static final int MSG_PERFORM_STALLED_ACTION = 6;
private static final int ACTIVITY_REQUEST_PICK_ATTACHMENT = 1;
private static final int REQUEST_MASK_RECIPIENT_PRESENTER = (1<<8);
private static final int REQUEST_MASK_MESSAGE_BUILDER = (2<<8);
/**
* Regular expression to remove the first localized "Re:" prefix in subjects.
*
* Currently:
* - "Aw:" (german: abbreviation for "Antwort")
*/
private static final Pattern PREFIX = Pattern.compile("^AW[:\\s]\\s*", Pattern.CASE_INSENSITIVE);
/**
* The account used for message composition.
*/
private Account mAccount;
private Contacts mContacts;
/**
* This identity's settings are used for message composition.
* Note: This has to be an identity of the account {@link #mAccount}.
*/
private Identity mIdentity;
private boolean mIdentityChanged = false;
private boolean mSignatureChanged = false;
/**
* Reference to the source message (in case of reply, forward, or edit
* draft actions).
*/
private MessageReference mMessageReference;
private Message mSourceMessage;
/**
* "Original" message body
*
* <p>
* The contents of this string will be used instead of the body of a referenced message when
* replying to or forwarding a message.<br>
* Right now this is only used when replying to a signed or encrypted message. It then contains
* the stripped/decrypted body of that message.
* </p>
* <p><strong>Note:</strong>
* When this field is not {@code null} we assume that the message we are composing right now
* should be encrypted.
* </p>
*/
private String mSourceMessageBody;
/**
* Indicates that the source message has been processed at least once and should not
* be processed on any subsequent loads. This protects us from adding attachments that
* have already been added from the restore of the view state.
*/
private boolean mSourceMessageProcessed = false;
private int mMaxLoaderId = 0;
private RecipientPresenter recipientPresenter;
private MessageBuilder currentMessageBuilder;
private boolean mFinishAfterDraftSaved;
@Override
public void onFocusChange(View v, boolean hasFocus) {
switch(v.getId()) {
case R.id.message_content:
case R.id.subject:
if (hasFocus) {
recipientPresenter.onNonRecipientFieldFocused();
}
break;
}
}
@Override
public void onCryptoModeChanged(CryptoMode cryptoMode) {
recipientPresenter.onCryptoModeChanged(cryptoMode);
}
enum Action {
COMPOSE,
REPLY,
REPLY_ALL,
FORWARD,
EDIT_DRAFT
}
/**
* Contains the action we're currently performing (e.g. replying to a message)
*/
private Action mAction;
private boolean mReadReceipt = false;
private QuotedTextMode mQuotedTextMode = QuotedTextMode.NONE;
/**
* Contains the format of the quoted text (text vs. HTML).
*/
private SimpleMessageFormat mQuotedTextFormat;
/**
* When this it {@code true} the message format setting is ignored and we're always sending
* a text/plain message.
*/
private boolean mForcePlainText = false;
private TextView mChooseIdentityButton;
private EditText mSubjectView;
private EolConvertingEditText mSignatureView;
private EolConvertingEditText mMessageContentView;
private LinearLayout mAttachments;
private Button mQuotedTextShow;
private View mQuotedTextBar;
private ImageButton mQuotedTextEdit;
private EolConvertingEditText mQuotedText;
private MessageWebView mQuotedHTML;
private InsertableHtmlContent mQuotedHtmlContent; // Container for HTML reply as it's being built.
private String mOpenPgpProvider;
private OpenPgpServiceConnection mOpenPgpServiceConnection;
private String mReferences;
private String mInReplyTo;
private boolean mSourceProcessed = false;
/**
* The currently used message format.
*
* <p>
* <strong>Note:</strong>
* Don't modify this field directly. Use {@link #updateMessageFormat()}.
* </p>
*/
private SimpleMessageFormat mMessageFormat;
private QuoteStyle mQuoteStyle;
private boolean draftNeedsSaving = false;
private boolean isInSubActivity = false;
/**
* The database ID of this message's draft. This is used when saving drafts so the message in
* the database is updated instead of being created anew. This property is INVALID_DRAFT_ID
* until the first save.
*/
private long mDraftId = INVALID_DRAFT_ID;
/**
* Number of attachments currently being fetched.
*/
private int mNumAttachmentsLoading = 0;
private enum WaitingAction {
NONE,
SEND,
SAVE
}
/**
* Specifies what action to perform once attachments have been fetched.
*/
private WaitingAction mWaitingForAttachments = WaitingAction.NONE;
private Handler mHandler = new Handler() {
@Override
public void handleMessage(android.os.Message msg) {
switch (msg.what) {
case MSG_PROGRESS_ON:
setProgressBarIndeterminateVisibility(true);
break;
case MSG_PROGRESS_OFF:
setProgressBarIndeterminateVisibility(false);
break;
case MSG_SKIPPED_ATTACHMENTS:
Toast.makeText(
MessageCompose.this,
getString(R.string.message_compose_attachments_skipped_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_SAVED_DRAFT:
mDraftId = (Long) msg.obj;
Toast.makeText(
MessageCompose.this,
getString(R.string.message_saved_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_DISCARDED_DRAFT:
Toast.makeText(
MessageCompose.this,
getString(R.string.message_discarded_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_PERFORM_STALLED_ACTION:
performStalledAction();
break;
default:
super.handleMessage(msg);
break;
}
}
};
private Listener mListener = new Listener();
private FontSizes mFontSizes = K9.getFontSizes();
/**
* Compose a new message using the given account. If account is null the default account
* will be used.
*/
public static void actionCompose(Context context, Account account) {
String accountUuid = (account == null) ?
Preferences.getPreferences(context).getDefaultAccount().getUuid() :
account.getUuid();
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_ACCOUNT, accountUuid);
i.setAction(ACTION_COMPOSE);
context.startActivity(i);
}
/**
* Get intent for composing a new message as a reply to the given message. If replyAll is true
* the function is reply all instead of simply reply.
* @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message
*/
public static Intent getActionReplyIntent(
Context context,
LocalMessage message,
boolean replyAll,
String messageBody) {
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_MESSAGE_BODY, messageBody);
i.putExtra(EXTRA_MESSAGE_REFERENCE, message.makeMessageReference());
if (replyAll) {
i.setAction(ACTION_REPLY_ALL);
} else {
i.setAction(ACTION_REPLY);
}
return i;
}
public static Intent getActionReplyIntent(Context context, MessageReference messageReference) {
Intent intent = new Intent(context, MessageCompose.class);
intent.setAction(ACTION_REPLY);
intent.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference);
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent;
}
/**
* Compose a new message as a reply to the given message. If replyAll is true the function
* is reply all instead of simply reply.
* @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message
*/
public static void actionReply(
Context context,
LocalMessage message,
boolean replyAll,
String messageBody) {
context.startActivity(getActionReplyIntent(context, message, replyAll, messageBody));
}
/**
* Compose a new message as a forward of the given message.
* @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message
*/
public static void actionForward(
Context context,
LocalMessage message,
String messageBody) {
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_MESSAGE_BODY, messageBody);
i.putExtra(EXTRA_MESSAGE_REFERENCE, message.makeMessageReference());
i.setAction(ACTION_FORWARD);
context.startActivity(i);
}
/**
* Continue composition of the given message. This action modifies the way this Activity
* handles certain actions.
* Save will attempt to replace the message in the given folder with the updated version.
* Discard will delete the message from the given folder.
*/
public static void actionEditDraft(Context context, MessageReference messageReference) {
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference);
i.setAction(ACTION_EDIT_DRAFT);
context.startActivity(i);
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (UpgradeDatabases.actionUpgradeDatabases(this, getIntent())) {
finish();
return;
}
requestWindowFeature(Window.FEATURE_INDETERMINATE_PROGRESS);
if (K9.getK9ComposerThemeSetting() != K9.Theme.USE_GLOBAL) {
// theme the whole content according to the theme (except the action bar)
ContextThemeWrapper themeContext = new ContextThemeWrapper(this,
K9.getK9ThemeResourceId(K9.getK9ComposerTheme()));
@SuppressLint("InflateParams") // this is the top level activity element, it has no root
View v = LayoutInflater.from(themeContext).inflate(R.layout.message_compose, null);
TypedValue outValue = new TypedValue();
// background color needs to be forced
themeContext.getTheme().resolveAttribute(R.attr.messageViewBackgroundColor, outValue, true);
v.setBackgroundColor(outValue.data);
setContentView(v);
} else {
setContentView(R.layout.message_compose);
}
final Intent intent = getIntent();
mMessageReference = intent.getParcelableExtra(EXTRA_MESSAGE_REFERENCE);
mSourceMessageBody = intent.getStringExtra(EXTRA_MESSAGE_BODY);
if (K9.DEBUG && mSourceMessageBody != null) {
Log.d(K9.LOG_TAG, "Composing message with explicitly specified message body.");
}
final String accountUuid = (mMessageReference != null) ?
mMessageReference.getAccountUuid() :
intent.getStringExtra(EXTRA_ACCOUNT);
mAccount = Preferences.getPreferences(this).getAccount(accountUuid);
if (mAccount == null) {
mAccount = Preferences.getPreferences(this).getDefaultAccount();
}
if (mAccount == null) {
/*
* There are no accounts set up. This should not have happened. Prompt the
* user to set up an account as an acceptable bailout.
*/
startActivity(new Intent(this, Accounts.class));
draftNeedsSaving = false;
finish();
return;
}
mContacts = Contacts.getInstance(MessageCompose.this);
mChooseIdentityButton = (TextView) findViewById(R.id.identity);
mChooseIdentityButton.setOnClickListener(this);
RecipientMvpView recipientMvpView = new RecipientMvpView(this);
recipientPresenter = new RecipientPresenter(this, recipientMvpView, mAccount);
mSubjectView = (EditText) findViewById(R.id.subject);
mSubjectView.getInputExtras(true).putBoolean("allowEmoji", true);
EolConvertingEditText upperSignature = (EolConvertingEditText)findViewById(R.id.upper_signature);
EolConvertingEditText lowerSignature = (EolConvertingEditText)findViewById(R.id.lower_signature);
mMessageContentView = (EolConvertingEditText)findViewById(R.id.message_content);
mMessageContentView.getInputExtras(true).putBoolean("allowEmoji", true);
mAttachments = (LinearLayout)findViewById(R.id.attachments);
mQuotedTextShow = (Button)findViewById(R.id.quoted_text_show);
mQuotedTextBar = findViewById(R.id.quoted_text_bar);
mQuotedTextEdit = (ImageButton)findViewById(R.id.quoted_text_edit);
ImageButton mQuotedTextDelete = (ImageButton) findViewById(R.id.quoted_text_delete);
mQuotedText = (EolConvertingEditText)findViewById(R.id.quoted_text);
mQuotedText.getInputExtras(true).putBoolean("allowEmoji", true);
mQuotedHTML = (MessageWebView) findViewById(R.id.quoted_html);
mQuotedHTML.configure();
// Disable the ability to click links in the quoted HTML page. I think this is a nice feature, but if someone
// feels this should be a preference (or should go away all together), I'm ok with that too. -achen 20101130
mQuotedHTML.setWebViewClient(new WebViewClient() {
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
return true;
}
});
TextWatcher draftNeedsChangingTextWatcher = new SimpleTextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
draftNeedsSaving = true;
}
};
TextWatcher signTextWatcher = new SimpleTextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
draftNeedsSaving = true;
mSignatureChanged = true;
}
};
recipientMvpView.addTextChangedListener(draftNeedsChangingTextWatcher);
mSubjectView.addTextChangedListener(draftNeedsChangingTextWatcher);
mMessageContentView.addTextChangedListener(draftNeedsChangingTextWatcher);
mQuotedText.addTextChangedListener(draftNeedsChangingTextWatcher);
/*
* We set this to invisible by default. Other methods will turn it back on if it's
* needed.
*/
showOrHideQuotedText(QuotedTextMode.NONE);
mSubjectView.setOnFocusChangeListener(this);
mMessageContentView.setOnFocusChangeListener(this);
mQuotedTextShow.setOnClickListener(this);
mQuotedTextEdit.setOnClickListener(this);
mQuotedTextDelete.setOnClickListener(this);
if (savedInstanceState != null) {
/*
* This data gets used in onCreate, so grab it here instead of onRestoreInstanceState
*/
mSourceMessageProcessed = savedInstanceState.getBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, false);
}
if (initFromIntent(intent)) {
mAction = Action.COMPOSE;
draftNeedsSaving = true;
} else {
String action = intent.getAction();
if (ACTION_COMPOSE.equals(action)) {
mAction = Action.COMPOSE;
} else if (ACTION_REPLY.equals(action)) {
mAction = Action.REPLY;
} else if (ACTION_REPLY_ALL.equals(action)) {
mAction = Action.REPLY_ALL;
} else if (ACTION_FORWARD.equals(action)) {
mAction = Action.FORWARD;
} else if (ACTION_EDIT_DRAFT.equals(action)) {
mAction = Action.EDIT_DRAFT;
} else {
// This shouldn't happen
Log.w(K9.LOG_TAG, "MessageCompose was started with an unsupported action");
mAction = Action.COMPOSE;
}
}
if (mIdentity == null) {
mIdentity = mAccount.getIdentity(0);
}
if (mAccount.isSignatureBeforeQuotedText()) {
mSignatureView = upperSignature;
lowerSignature.setVisibility(View.GONE);
} else {
mSignatureView = lowerSignature;
upperSignature.setVisibility(View.GONE);
}
updateSignature();
mSignatureView.addTextChangedListener(signTextWatcher);
if (!mIdentity.getSignatureUse()) {
mSignatureView.setVisibility(View.GONE);
}
mReadReceipt = mAccount.isMessageReadReceiptAlways();
mQuoteStyle = mAccount.getQuoteStyle();
updateFrom();
if (!mSourceMessageProcessed) {
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL ||
mAction == Action.FORWARD || mAction == Action.EDIT_DRAFT) {
/*
* If we need to load the message we add ourself as a message listener here
* so we can kick it off. Normally we add in onResume but we don't
* want to reload the message every time the activity is resumed.
* There is no harm in adding twice.
*/
MessagingController.getInstance(getApplication()).addListener(mListener);
final Account account = Preferences.getPreferences(this).getAccount(mMessageReference.getAccountUuid());
final String folderName = mMessageReference.getFolderName();
final String sourceMessageUid = mMessageReference.getUid();
MessagingController.getInstance(getApplication()).loadMessageForView(account, folderName, sourceMessageUid, null);
}
if (mAction != Action.EDIT_DRAFT) {
String alwaysBccString = mAccount.getAlwaysBcc();
if (!TextUtils.isEmpty(alwaysBccString)) {
recipientPresenter.addBccAddresses(Address.parse(alwaysBccString));
}
}
}
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL) {
mMessageReference = mMessageReference.withModifiedFlag(Flag.ANSWERED);
}
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL ||
mAction == Action.EDIT_DRAFT) {
//change focus to message body.
mMessageContentView.requestFocus();
} else {
// Explicitly set focus to "To:" input field (see issue 2998)
recipientMvpView.requestFocusOnToField();
}
if (mAction == Action.FORWARD) {
mMessageReference = mMessageReference.withModifiedFlag(Flag.FORWARDED);
}
mOpenPgpProvider = mAccount.getOpenPgpProvider();
if (isCryptoProviderEnabled()) {
// attachKeyCheckBox = (CheckBox) findViewById(R.id.cb_attach_key);
// attachKeyCheckBox.setEnabled(mAccount.getCryptoKey() != 0);
mOpenPgpServiceConnection = new OpenPgpServiceConnection(this, mOpenPgpProvider, new OnBound() {
@Override
public void onBound(IOpenPgpService2 service) {
recipientPresenter.onCryptoProviderBound();
}
@Override
public void onError(Exception e) {
recipientPresenter.onCryptoProviderError(e);
}
});
mOpenPgpServiceConnection.bindToService();
updateMessageFormat();
}
// Set font size of input controls
int fontSize = mFontSizes.getMessageComposeInput();
recipientMvpView.setFontSizes(mFontSizes, fontSize);
mFontSizes.setViewTextSize(mSubjectView, fontSize);
mFontSizes.setViewTextSize(mMessageContentView, fontSize);
mFontSizes.setViewTextSize(mQuotedText, fontSize);
mFontSizes.setViewTextSize(mSignatureView, fontSize);
updateMessageFormat();
setTitle();
currentMessageBuilder = (MessageBuilder) getLastNonConfigurationInstance();
if (currentMessageBuilder != null) {
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.reattachCallback(this);
}
}
@Override
public void onDestroy() {
super.onDestroy();
if (mOpenPgpServiceConnection != null) {
mOpenPgpServiceConnection.unbindFromService();
}
}
/**
* Handle external intents that trigger the message compose activity.
*
* <p>
* Supported external intents:
* <ul>
* <li>{@link Intent#ACTION_VIEW}</li>
* <li>{@link Intent#ACTION_SENDTO}</li>
* <li>{@link Intent#ACTION_SEND}</li>
* <li>{@link Intent#ACTION_SEND_MULTIPLE}</li>
* </ul>
* </p>
*
* @param intent
* The (external) intent that started the activity.
*
* @return {@code true}, if this activity was started by an external intent. {@code false},
* otherwise.
*/
private boolean initFromIntent(final Intent intent) {
boolean startedByExternalIntent = false;
final String action = intent.getAction();
if (Intent.ACTION_VIEW.equals(action) || Intent.ACTION_SENDTO.equals(action)) {
/*
* Someone has clicked a mailto: link. The address is in the URI.
*/
if (intent.getData() != null) {
Uri uri = intent.getData();
if (MailTo.isMailTo(uri)) {
MailTo mailTo = MailTo.parse(uri);
initializeFromMailto(mailTo);
}
}
/*
* Note: According to the documentation ACTION_VIEW and ACTION_SENDTO don't accept
* EXTRA_* parameters.
* And previously we didn't process these EXTRAs. But it looks like nobody bothers to
* read the official documentation and just copies wrong sample code that happens to
* work with the AOSP Email application. And because even big players get this wrong,
* we're now finally giving in and read the EXTRAs for those actions (below).
*/
}
if (Intent.ACTION_SEND.equals(action) || Intent.ACTION_SEND_MULTIPLE.equals(action) ||
Intent.ACTION_SENDTO.equals(action) || Intent.ACTION_VIEW.equals(action)) {
startedByExternalIntent = true;
/*
* Note: Here we allow a slight deviation from the documented behavior.
* EXTRA_TEXT is used as message body (if available) regardless of the MIME
* type of the intent. In addition one or multiple attachments can be added
* using EXTRA_STREAM.
*/
CharSequence text = intent.getCharSequenceExtra(Intent.EXTRA_TEXT);
// Only use EXTRA_TEXT if the body hasn't already been set by the mailto URI
if (text != null && mMessageContentView.getText().length() == 0) {
mMessageContentView.setCharacters(text);
}
String type = intent.getType();
if (Intent.ACTION_SEND.equals(action)) {
Uri stream = intent.getParcelableExtra(Intent.EXTRA_STREAM);
if (stream != null) {
addAttachment(stream, type);
}
} else {
List<Parcelable> list = intent.getParcelableArrayListExtra(Intent.EXTRA_STREAM);
if (list != null) {
for (Parcelable parcelable : list) {
Uri stream = (Uri) parcelable;
if (stream != null) {
addAttachment(stream, type);
}
}
}
}
String subject = intent.getStringExtra(Intent.EXTRA_SUBJECT);
// Only use EXTRA_SUBJECT if the subject hasn't already been set by the mailto URI
if (subject != null && mSubjectView.getText().length() == 0) {
mSubjectView.setText(subject);
}
recipientPresenter.initFromSendOrViewIntent(intent);
}
return startedByExternalIntent;
}
@Override
public void onResume() {
super.onResume();
MessagingController.getInstance(getApplication()).addListener(mListener);
}
@Override
public void onPause() {
super.onPause();
MessagingController.getInstance(getApplication()).removeListener(mListener);
boolean isPausingOnConfigurationChange = (getChangingConfigurations() & ActivityInfo.CONFIG_ORIENTATION)
== ActivityInfo.CONFIG_ORIENTATION;
boolean isCurrentlyBuildingMessage = currentMessageBuilder != null;
if (isPausingOnConfigurationChange || isCurrentlyBuildingMessage || isInSubActivity) {
return;
}
checkToSaveDraftImplicitly();
}
/**
* The framework handles most of the fields, but we need to handle stuff that we
* dynamically show and hide:
* Attachment list,
* Cc field,
* Bcc field,
* Quoted text,
*/
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putInt(STATE_KEY_NUM_ATTACHMENTS_LOADING, mNumAttachmentsLoading);
outState.putString(STATE_KEY_WAITING_FOR_ATTACHMENTS, mWaitingForAttachments.name());
outState.putParcelableArrayList(STATE_KEY_ATTACHMENTS, createAttachmentList());
outState.putSerializable(STATE_KEY_QUOTED_TEXT_MODE, mQuotedTextMode);
outState.putBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, mSourceMessageProcessed);
outState.putLong(STATE_KEY_DRAFT_ID, mDraftId);
outState.putSerializable(STATE_IDENTITY, mIdentity);
outState.putBoolean(STATE_IDENTITY_CHANGED, mIdentityChanged);
outState.putString(STATE_IN_REPLY_TO, mInReplyTo);
outState.putString(STATE_REFERENCES, mReferences);
outState.putSerializable(STATE_KEY_HTML_QUOTE, mQuotedHtmlContent);
outState.putBoolean(STATE_KEY_READ_RECEIPT, mReadReceipt);
outState.putBoolean(STATE_KEY_DRAFT_NEEDS_SAVING, draftNeedsSaving);
outState.putBoolean(STATE_KEY_FORCE_PLAIN_TEXT, mForcePlainText);
outState.putSerializable(STATE_KEY_QUOTED_TEXT_FORMAT, mQuotedTextFormat);
recipientPresenter.onSaveInstanceState(outState);
}
@Override
public Object onRetainNonConfigurationInstance() {
if (currentMessageBuilder != null) {
currentMessageBuilder.detachCallback();
}
return currentMessageBuilder;
}
@Override
protected void onRestoreInstanceState(Bundle savedInstanceState) {
super.onRestoreInstanceState(savedInstanceState);
mAttachments.removeAllViews();
mMaxLoaderId = 0;
mNumAttachmentsLoading = savedInstanceState.getInt(STATE_KEY_NUM_ATTACHMENTS_LOADING);
mWaitingForAttachments = WaitingAction.NONE;
try {
String waitingFor = savedInstanceState.getString(STATE_KEY_WAITING_FOR_ATTACHMENTS);
mWaitingForAttachments = WaitingAction.valueOf(waitingFor);
} catch (Exception e) {
Log.w(K9.LOG_TAG, "Couldn't read value \" + STATE_KEY_WAITING_FOR_ATTACHMENTS +" +
"\" from saved instance state", e);
}
List<Attachment> attachments = savedInstanceState.getParcelableArrayList(STATE_KEY_ATTACHMENTS);
// noinspection ConstantConditions, we know this is set in onSaveInstanceState
for (Attachment attachment : attachments) {
addAttachmentView(attachment);
if (attachment.loaderId > mMaxLoaderId) {
mMaxLoaderId = attachment.loaderId;
}
if (attachment.state == Attachment.LoadingState.URI_ONLY) {
initAttachmentInfoLoader(attachment);
} else if (attachment.state == Attachment.LoadingState.METADATA) {
initAttachmentContentLoader(attachment);
}
}
mReadReceipt = savedInstanceState.getBoolean(STATE_KEY_READ_RECEIPT);
recipientPresenter.onRestoreInstanceState(savedInstanceState);
mQuotedHtmlContent =
(InsertableHtmlContent) savedInstanceState.getSerializable(STATE_KEY_HTML_QUOTE);
if (mQuotedHtmlContent != null && mQuotedHtmlContent.getQuotedContent() != null) {
mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent());
}
mDraftId = savedInstanceState.getLong(STATE_KEY_DRAFT_ID);
mIdentity = (Identity)savedInstanceState.getSerializable(STATE_IDENTITY);
mIdentityChanged = savedInstanceState.getBoolean(STATE_IDENTITY_CHANGED);
mInReplyTo = savedInstanceState.getString(STATE_IN_REPLY_TO);
mReferences = savedInstanceState.getString(STATE_REFERENCES);
draftNeedsSaving = savedInstanceState.getBoolean(STATE_KEY_DRAFT_NEEDS_SAVING);
mForcePlainText = savedInstanceState.getBoolean(STATE_KEY_FORCE_PLAIN_TEXT);
mQuotedTextFormat = (SimpleMessageFormat) savedInstanceState.getSerializable(
STATE_KEY_QUOTED_TEXT_FORMAT);
showOrHideQuotedText(
(QuotedTextMode) savedInstanceState.getSerializable(STATE_KEY_QUOTED_TEXT_MODE));
updateFrom();
updateMessageFormat();
}
private void setTitle() {
switch (mAction) {
case REPLY: {
setTitle(R.string.compose_title_reply);
break;
}
case REPLY_ALL: {
setTitle(R.string.compose_title_reply_all);
break;
}
case FORWARD: {
setTitle(R.string.compose_title_forward);
break;
}
case COMPOSE:
default: {
setTitle(R.string.compose_title_compose);
break;
}
}
}
@Nullable
private MessageBuilder createMessageBuilder(boolean isDraft) {
MessageBuilder builder;
if (!recipientPresenter.canSendOrError(isDraft)) {
return null;
}
ComposeCryptoStatus cryptoStatus = recipientPresenter.getCurrentCryptoStatus();
// TODO encrypt drafts for storage
if(!isDraft && cryptoStatus.shouldUsePgpMessageBuilder()) {
PgpMessageBuilder pgpBuilder = new PgpMessageBuilder(getApplicationContext(), getOpenPgpApi());
pgpBuilder.setCryptoStatus(cryptoStatus);
builder = pgpBuilder;
} else {
builder = new SimpleMessageBuilder(getApplicationContext());
}
builder.setSubject(mSubjectView.getText().toString())
.setTo(recipientPresenter.getToAddresses())
.setCc(recipientPresenter.getCcAddresses())
.setBcc(recipientPresenter.getBccAddresses())
.setInReplyTo(mInReplyTo)
.setReferences(mReferences)
.setRequestReadReceipt(mReadReceipt)
.setIdentity(mIdentity)
.setMessageFormat(mMessageFormat)
.setText(mMessageContentView.getCharacters())
.setAttachments(createAttachmentList())
.setSignature(mSignatureView.getCharacters())
.setQuoteStyle(mQuoteStyle)
.setQuotedTextMode(mQuotedTextMode)
.setQuotedText(mQuotedText.getCharacters())
.setQuotedHtmlContent(mQuotedHtmlContent)
.setReplyAfterQuote(mAccount.isReplyAfterQuote())
.setSignatureBeforeQuotedText(mAccount.isSignatureBeforeQuotedText())
.setIdentityChanged(mIdentityChanged)
.setSignatureChanged(mSignatureChanged)
.setCursorPosition(mMessageContentView.getSelectionStart())
.setMessageReference(mMessageReference)
.setDraft(isDraft);
return builder;
}
private void checkToSendMessage() {
if (recipientPresenter.checkRecipientsOkForSending()) {
return;
}
if (mWaitingForAttachments != WaitingAction.NONE) {
return;
}
if (mNumAttachmentsLoading > 0) {
mWaitingForAttachments = WaitingAction.SEND;
showWaitingForAttachmentDialog();
return;
}
performSendAfterChecks();
}
private void checkToSaveDraftAndSave() {
if (!mAccount.hasDraftsFolder()) {
Toast.makeText(this, R.string.compose_error_no_draft_folder, Toast.LENGTH_SHORT).show();
return;
}
if (mWaitingForAttachments != WaitingAction.NONE) {
return;
}
if (mNumAttachmentsLoading > 0) {
mWaitingForAttachments = WaitingAction.SAVE;
showWaitingForAttachmentDialog();
return;
}
mFinishAfterDraftSaved = true;
performSaveAfterChecks();
}
private void checkToSaveDraftImplicitly() {
if (!mAccount.hasDraftsFolder()) {
return;
}
if (!draftNeedsSaving) {
return;
}
mFinishAfterDraftSaved = false;
performSaveAfterChecks();
}
private void performSaveAfterChecks() {
currentMessageBuilder = createMessageBuilder(true);
if (currentMessageBuilder != null) {
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.buildAsync(this);
}
}
public void performSendAfterChecks() {
currentMessageBuilder = createMessageBuilder(false);
if (currentMessageBuilder != null) {
draftNeedsSaving = false;
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.buildAsync(this);
}
}
private void onDiscard() {
if (mDraftId != INVALID_DRAFT_ID) {
MessagingController.getInstance(getApplication()).deleteDraft(mAccount, mDraftId);
mDraftId = INVALID_DRAFT_ID;
}
mHandler.sendEmptyMessage(MSG_DISCARDED_DRAFT);
draftNeedsSaving = false;
finish();
}
private void onReadReceipt() {
CharSequence txt;
if (!mReadReceipt) {
txt = getString(R.string.read_receipt_enabled);
mReadReceipt = true;
} else {
txt = getString(R.string.read_receipt_disabled);
mReadReceipt = false;
}
Context context = getApplicationContext();
Toast toast = Toast.makeText(context, txt, Toast.LENGTH_SHORT);
toast.show();
}
private ArrayList<Attachment> createAttachmentList() {
ArrayList<Attachment> attachments = new ArrayList<>();
for (int i = 0, count = mAttachments.getChildCount(); i < count; i++) {
View view = mAttachments.getChildAt(i);
Attachment attachment = (Attachment) view.getTag();
attachments.add(attachment);
}
return attachments;
}
/**
* Kick off a picker for the specified MIME type and let Android take over.
*/
@SuppressLint("InlinedApi")
private void onAddAttachment() {
Intent i = new Intent(Intent.ACTION_GET_CONTENT);
i.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, true);
i.addCategory(Intent.CATEGORY_OPENABLE);
i.setType("*/*");
isInSubActivity = true;
startActivityForResult(Intent.createChooser(i, null), ACTIVITY_REQUEST_PICK_ATTACHMENT);
}
private void addAttachment(Uri uri) {
addAttachment(uri, null);
}
private void addAttachment(Uri uri, String contentType) {
Attachment attachment = new Attachment();
attachment.state = Attachment.LoadingState.URI_ONLY;
attachment.uri = uri;
attachment.contentType = contentType;
attachment.loaderId = ++mMaxLoaderId;
addAttachmentView(attachment);
initAttachmentInfoLoader(attachment);
}
private void initAttachmentInfoLoader(Attachment attachment) {
LoaderManager loaderManager = getLoaderManager();
Bundle bundle = new Bundle();
bundle.putParcelable(LOADER_ARG_ATTACHMENT, attachment);
loaderManager.initLoader(attachment.loaderId, bundle, mAttachmentInfoLoaderCallback);
}
private void initAttachmentContentLoader(Attachment attachment) {
LoaderManager loaderManager = getLoaderManager();
Bundle bundle = new Bundle();
bundle.putParcelable(LOADER_ARG_ATTACHMENT, attachment);
loaderManager.initLoader(attachment.loaderId, bundle, mAttachmentContentLoaderCallback);
}
private void addAttachmentView(Attachment attachment) {
boolean hasMetadata = (attachment.state != Attachment.LoadingState.URI_ONLY);
boolean isLoadingComplete = (attachment.state == Attachment.LoadingState.COMPLETE);
View view = getLayoutInflater().inflate(R.layout.message_compose_attachment, mAttachments, false);
TextView nameView = (TextView) view.findViewById(R.id.attachment_name);
View progressBar = view.findViewById(R.id.progressBar);
if (hasMetadata) {
nameView.setText(attachment.name);
} else {
nameView.setText(R.string.loading_attachment);
}
progressBar.setVisibility(isLoadingComplete ? View.GONE : View.VISIBLE);
ImageButton delete = (ImageButton) view.findViewById(R.id.attachment_delete);
delete.setOnClickListener(MessageCompose.this);
delete.setTag(view);
view.setTag(attachment);
mAttachments.addView(view);
}
private View getAttachmentView(int loaderId) {
for (int i = 0, childCount = mAttachments.getChildCount(); i < childCount; i++) {
View view = mAttachments.getChildAt(i);
Attachment tag = (Attachment) view.getTag();
if (tag != null && tag.loaderId == loaderId) {
return view;
}
}
return null;
}
private LoaderManager.LoaderCallbacks<Attachment> mAttachmentInfoLoaderCallback =
new LoaderManager.LoaderCallbacks<Attachment>() {
@Override
public Loader<Attachment> onCreateLoader(int id, Bundle args) {
onFetchAttachmentStarted();
Attachment attachment = args.getParcelable(LOADER_ARG_ATTACHMENT);
return new AttachmentInfoLoader(MessageCompose.this, attachment);
}
@Override
public void onLoadFinished(Loader<Attachment> loader, Attachment attachment) {
int loaderId = loader.getId();
View view = getAttachmentView(loaderId);
if (view != null) {
view.setTag(attachment);
TextView nameView = (TextView) view.findViewById(R.id.attachment_name);
nameView.setText(attachment.name);
attachment.loaderId = ++mMaxLoaderId;
initAttachmentContentLoader(attachment);
} else {
onFetchAttachmentFinished();
}
getLoaderManager().destroyLoader(loaderId);
}
@Override
public void onLoaderReset(Loader<Attachment> loader) {
onFetchAttachmentFinished();
}
};
private LoaderManager.LoaderCallbacks<Attachment> mAttachmentContentLoaderCallback =
new LoaderManager.LoaderCallbacks<Attachment>() {
@Override
public Loader<Attachment> onCreateLoader(int id, Bundle args) {
Attachment attachment = args.getParcelable(LOADER_ARG_ATTACHMENT);
return new AttachmentContentLoader(MessageCompose.this, attachment);
}
@Override
public void onLoadFinished(Loader<Attachment> loader, Attachment attachment) {
int loaderId = loader.getId();
View view = getAttachmentView(loaderId);
if (view != null) {
if (attachment.state == Attachment.LoadingState.COMPLETE) {
view.setTag(attachment);
View progressBar = view.findViewById(R.id.progressBar);
progressBar.setVisibility(View.GONE);
} else {
mAttachments.removeView(view);
}
}
onFetchAttachmentFinished();
getLoaderManager().destroyLoader(loaderId);
}
@Override
public void onLoaderReset(Loader<Attachment> loader) {
onFetchAttachmentFinished();
}
};
public OpenPgpApi getOpenPgpApi() {
return new OpenPgpApi(this, mOpenPgpServiceConnection.getService());
}
private void onFetchAttachmentStarted() {
mNumAttachmentsLoading += 1;
}
private void onFetchAttachmentFinished() {
// We're not allowed to perform fragment transactions when called from onLoadFinished().
// So we use the Handler to call performStalledAction().
mHandler.sendEmptyMessage(MSG_PERFORM_STALLED_ACTION);
}
private void performStalledAction() {
mNumAttachmentsLoading -= 1;
WaitingAction waitingFor = mWaitingForAttachments;
mWaitingForAttachments = WaitingAction.NONE;
if (waitingFor != WaitingAction.NONE) {
dismissWaitingForAttachmentDialog();
}
switch (waitingFor) {
case SEND: {
performSendAfterChecks();
break;
}
case SAVE: {
performSaveAfterChecks();
break;
}
case NONE:
break;
}
}
public void showContactPicker(int requestCode) {
requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER;
isInSubActivity = true;
startActivityForResult(mContacts.contactPickerIntent(), requestCode);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
isInSubActivity = false;
if ((requestCode & REQUEST_MASK_MESSAGE_BUILDER) == REQUEST_MASK_MESSAGE_BUILDER) {
requestCode ^= REQUEST_MASK_MESSAGE_BUILDER;
if (currentMessageBuilder == null) {
Log.e(K9.LOG_TAG, "Got a message builder activity result for no message builder, " +
"this is an illegal state!");
return;
}
currentMessageBuilder.onActivityResult(this, requestCode, resultCode, data);
return;
}
if ((requestCode & REQUEST_MASK_RECIPIENT_PRESENTER) == REQUEST_MASK_RECIPIENT_PRESENTER) {
requestCode ^= REQUEST_MASK_RECIPIENT_PRESENTER;
recipientPresenter.onActivityResult(resultCode, requestCode, data);
return;
}
if (resultCode != RESULT_OK) {
return;
}
if (data == null) {
return;
}
switch (requestCode) {
case ACTIVITY_REQUEST_PICK_ATTACHMENT:
addAttachmentsFromResultIntent(data);
draftNeedsSaving = true;
break;
}
}
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
private void addAttachmentsFromResultIntent(Intent data) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
ClipData clipData = data.getClipData();
if (clipData != null) {
for (int i = 0, end = clipData.getItemCount(); i < end; i++) {
Uri uri = clipData.getItemAt(i).getUri();
if (uri != null) {
addAttachment(uri);
}
}
return;
}
}
Uri uri = data.getData();
if (uri != null) {
addAttachment(uri);
}
}
private void onAccountChosen(Account account, Identity identity) {
if (!mAccount.equals(account)) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Switching account from " + mAccount + " to " + account);
}
// on draft edit, make sure we don't keep previous message UID
if (mAction == Action.EDIT_DRAFT) {
mMessageReference = null;
}
// test whether there is something to save
if (draftNeedsSaving || (mDraftId != INVALID_DRAFT_ID)) {
final long previousDraftId = mDraftId;
final Account previousAccount = mAccount;
// make current message appear as new
mDraftId = INVALID_DRAFT_ID;
// actual account switch
mAccount = account;
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Account switch, saving new draft in new account");
}
checkToSaveDraftImplicitly();
if (previousDraftId != INVALID_DRAFT_ID) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Account switch, deleting draft from previous account: "
+ previousDraftId);
}
MessagingController.getInstance(getApplication()).deleteDraft(previousAccount,
previousDraftId);
}
} else {
mAccount = account;
}
// Show CC/BCC text input field when switching to an account that always wants them
// displayed.
// Please note that we're not hiding the fields if the user switches back to an account
// that doesn't have this setting checked.
recipientPresenter.onSwitchAccount(mAccount);
// not sure how to handle mFolder, mSourceMessage?
}
switchToIdentity(identity);
}
private void switchToIdentity(Identity identity) {
mIdentity = identity;
mIdentityChanged = true;
draftNeedsSaving = true;
updateFrom();
updateSignature();
updateMessageFormat();
recipientPresenter.onSwitchIdentity(identity);
}
private void updateFrom() {
mChooseIdentityButton.setText(mIdentity.getEmail());
}
private void updateSignature() {
if (mIdentity.getSignatureUse()) {
mSignatureView.setCharacters(mIdentity.getSignature());
mSignatureView.setVisibility(View.VISIBLE);
} else {
mSignatureView.setVisibility(View.GONE);
}
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.attachment_delete:
/*
* The view is the delete button, and we have previously set the tag of
* the delete button to the view that owns it. We don't use parent because the
* view is very complex and could change in the future.
*/
mAttachments.removeView((View) view.getTag());
draftNeedsSaving = true;
break;
case R.id.quoted_text_show:
showOrHideQuotedText(QuotedTextMode.SHOW);
updateMessageFormat();
draftNeedsSaving = true;
break;
case R.id.quoted_text_delete:
showOrHideQuotedText(QuotedTextMode.HIDE);
updateMessageFormat();
draftNeedsSaving = true;
break;
case R.id.quoted_text_edit:
mForcePlainText = true;
if (mMessageReference != null) { // shouldn't happen...
// TODO - Should we check if mSourceMessageBody is already present and bypass the MessagingController call?
MessagingController.getInstance(getApplication()).addListener(mListener);
final Account account = Preferences.getPreferences(this).getAccount(mMessageReference.getAccountUuid());
final String folderName = mMessageReference.getFolderName();
final String sourceMessageUid = mMessageReference.getUid();
MessagingController.getInstance(getApplication()).loadMessageForView(account, folderName, sourceMessageUid, null);
}
break;
case R.id.identity:
showDialog(DIALOG_CHOOSE_IDENTITY);
break;
}
}
/**
* Show or hide the quoted text.
*
* @param mode
* The value to set {@link #mQuotedTextMode} to.
*/
private void showOrHideQuotedText(QuotedTextMode mode) {
mQuotedTextMode = mode;
switch (mode) {
case NONE:
case HIDE: {
if (mode == QuotedTextMode.NONE) {
mQuotedTextShow.setVisibility(View.GONE);
} else {
mQuotedTextShow.setVisibility(View.VISIBLE);
}
mQuotedTextBar.setVisibility(View.GONE);
mQuotedText.setVisibility(View.GONE);
mQuotedHTML.setVisibility(View.GONE);
mQuotedTextEdit.setVisibility(View.GONE);
break;
}
case SHOW: {
mQuotedTextShow.setVisibility(View.GONE);
mQuotedTextBar.setVisibility(View.VISIBLE);
if (mQuotedTextFormat == SimpleMessageFormat.HTML) {
mQuotedText.setVisibility(View.GONE);
mQuotedHTML.setVisibility(View.VISIBLE);
mQuotedTextEdit.setVisibility(View.VISIBLE);
} else {
mQuotedText.setVisibility(View.VISIBLE);
mQuotedHTML.setVisibility(View.GONE);
mQuotedTextEdit.setVisibility(View.GONE);
}
break;
}
}
}
private void askBeforeDiscard(){
if (K9.confirmDiscardMessage()) {
showDialog(DIALOG_CONFIRM_DISCARD);
} else {
onDiscard();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.send:
checkToSendMessage();
break;
case R.id.save:
checkToSaveDraftAndSave();
break;
case R.id.discard:
askBeforeDiscard();
break;
case R.id.add_from_contacts:
recipientPresenter.onMenuAddFromContacts();
break;
case R.id.add_attachment:
onAddAttachment();
break;
case R.id.read_receipt:
onReadReceipt();
break;
default:
return super.onOptionsItemSelected(item);
}
return true;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
getMenuInflater().inflate(R.menu.message_compose_option, menu);
// Disable the 'Save' menu option if Drafts folder is set to -NONE-
if (!mAccount.hasDraftsFolder()) {
menu.findItem(R.id.save).setEnabled(false);
}
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
super.onPrepareOptionsMenu(menu);
recipientPresenter.onPrepareOptionsMenu(menu);
return true;
}
@Override
public void onBackPressed() {
if (draftNeedsSaving) {
if (!mAccount.hasDraftsFolder()) {
showDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
} else {
showDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
}
} else {
// Check if editing an existing draft.
if (mDraftId == INVALID_DRAFT_ID) {
onDiscard();
} else {
super.onBackPressed();
}
}
}
private void showWaitingForAttachmentDialog() {
String title;
switch (mWaitingForAttachments) {
case SEND: {
title = getString(R.string.fetching_attachment_dialog_title_send);
break;
}
case SAVE: {
title = getString(R.string.fetching_attachment_dialog_title_save);
break;
}
default: {
return;
}
}
ProgressDialogFragment fragment = ProgressDialogFragment.newInstance(title,
getString(R.string.fetching_attachment_dialog_message));
fragment.show(getFragmentManager(), FRAGMENT_WAITING_FOR_ATTACHMENT);
}
public void onCancel(ProgressDialogFragment fragment) {
attachmentProgressDialogCancelled();
}
void attachmentProgressDialogCancelled() {
mWaitingForAttachments = WaitingAction.NONE;
}
private void dismissWaitingForAttachmentDialog() {
ProgressDialogFragment fragment = (ProgressDialogFragment)
getFragmentManager().findFragmentByTag(FRAGMENT_WAITING_FOR_ATTACHMENT);
if (fragment != null) {
fragment.dismiss();
}
}
@Override
public Dialog onCreateDialog(int id) {
switch (id) {
case DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE:
return new AlertDialog.Builder(this)
.setTitle(R.string.save_or_discard_draft_message_dlg_title)
.setMessage(R.string.save_or_discard_draft_message_instructions_fmt)
.setPositiveButton(R.string.save_draft_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
checkToSaveDraftAndSave();
}
})
.setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
onDiscard();
}
})
.create();
case DIALOG_CONFIRM_DISCARD_ON_BACK:
return new AlertDialog.Builder(this)
.setTitle(R.string.confirm_discard_draft_message_title)
.setMessage(R.string.confirm_discard_draft_message)
.setPositiveButton(R.string.cancel_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
}
})
.setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
Toast.makeText(MessageCompose.this,
getString(R.string.message_discarded_toast),
Toast.LENGTH_LONG).show();
onDiscard();
}
})
.create();
case DIALOG_CHOOSE_IDENTITY:
Context context = new ContextThemeWrapper(this,
(K9.getK9Theme() == K9.Theme.LIGHT) ?
R.style.Theme_K9_Dialog_Light :
R.style.Theme_K9_Dialog_Dark);
Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.send_as);
final IdentityAdapter adapter = new IdentityAdapter(context);
builder.setAdapter(adapter, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
IdentityContainer container = (IdentityContainer) adapter.getItem(which);
onAccountChosen(container.account, container.identity);
}
});
return builder.create();
case DIALOG_CONFIRM_DISCARD: {
return new AlertDialog.Builder(this)
.setTitle(R.string.dialog_confirm_delete_title)
.setMessage(R.string.dialog_confirm_delete_message)
.setPositiveButton(R.string.dialog_confirm_delete_confirm_button,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
onDiscard();
}
})
.setNegativeButton(R.string.dialog_confirm_delete_cancel_button,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
}
})
.create();
}
}
return super.onCreateDialog(id);
}
/**
* Add all attachments of an existing message as if they were added by hand.
*
* @param part
* The message part to check for being an attachment. This method will recurse if it's
* a multipart part.
* @param depth
* The recursion depth. Currently unused.
*
* @return {@code true} if all attachments were able to be attached, {@code false} otherwise.
*
* @throws MessagingException
* In case of an error
*/
private boolean loadAttachments(Part part, int depth) throws MessagingException {
if (part.getBody() instanceof Multipart) {
Multipart mp = (Multipart) part.getBody();
boolean ret = true;
for (int i = 0, count = mp.getCount(); i < count; i++) {
if (!loadAttachments(mp.getBodyPart(i), depth + 1)) {
ret = false;
}
}
return ret;
}
String contentType = MimeUtility.unfoldAndDecode(part.getContentType());
String name = MimeUtility.getHeaderParameter(contentType, "name");
if (name != null) {
if (part instanceof LocalBodyPart) {
LocalBodyPart localBodyPart = (LocalBodyPart) part;
String accountUuid = localBodyPart.getAccountUuid();
long attachmentId = localBodyPart.getId();
Uri uri = AttachmentProvider.getAttachmentUri(accountUuid, attachmentId);
addAttachment(uri);
return true;
}
return false;
}
return true;
}
/**
* Pull out the parts of the now loaded source message and apply them to the new message
* depending on the type of message being composed.
*
* @param message
* The source message used to populate the various text fields.
*/
private void processSourceMessage(LocalMessage message) {
try {
switch (mAction) {
case REPLY:
case REPLY_ALL: {
processMessageToReplyTo(message);
break;
}
case FORWARD: {
processMessageToForward(message);
break;
}
case EDIT_DRAFT: {
processDraftMessage(message);
break;
}
default: {
Log.w(K9.LOG_TAG, "processSourceMessage() called with unsupported action");
break;
}
}
} catch (MessagingException me) {
/**
* Let the user continue composing their message even if we have a problem processing
* the source message. Log it as an error, though.
*/
Log.e(K9.LOG_TAG, "Error while processing source message: ", me);
} finally {
mSourceMessageProcessed = true;
draftNeedsSaving = false;
}
updateMessageFormat();
}
private void processMessageToReplyTo(Message message) throws MessagingException {
if (message.getSubject() != null) {
final String subject = PREFIX.matcher(message.getSubject()).replaceFirst("");
if (!subject.toLowerCase(Locale.US).startsWith("re:")) {
mSubjectView.setText("Re: " + subject);
} else {
mSubjectView.setText(subject);
}
} else {
mSubjectView.setText("");
}
/*
* If a reply-to was included with the message use that, otherwise use the from
* or sender address.
*/
recipientPresenter.initFromReplyToMessage(message);
if (message.getMessageId() != null && message.getMessageId().length() > 0) {
mInReplyTo = message.getMessageId();
String[] refs = message.getReferences();
if (refs != null && refs.length > 0) {
mReferences = TextUtils.join("", refs) + " " + mInReplyTo;
} else {
mReferences = mInReplyTo;
}
} else {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "could not get Message-ID.");
}
}
// Quote the message and setup the UI.
populateUIWithQuotedMessage(mAccount.isDefaultQuotedTextShown());
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL) {
Identity useIdentity = IdentityHelper.getRecipientIdentityFromMessage(mAccount, message);
Identity defaultIdentity = mAccount.getIdentity(0);
if (useIdentity != defaultIdentity) {
switchToIdentity(useIdentity);
}
}
}
private void processMessageToForward(Message message) throws MessagingException {
String subject = message.getSubject();
if (subject != null && !subject.toLowerCase(Locale.US).startsWith("fwd:")) {
mSubjectView.setText("Fwd: " + subject);
} else {
mSubjectView.setText(subject);
}
mQuoteStyle = QuoteStyle.HEADER;
// "Be Like Thunderbird" - on forwarded messages, set the message ID
// of the forwarded message in the references and the reply to. TB
// only includes ID of the message being forwarded in the reference,
// even if there are multiple references.
if (!TextUtils.isEmpty(message.getMessageId())) {
mInReplyTo = message.getMessageId();
mReferences = mInReplyTo;
} else {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "could not get Message-ID.");
}
}
// Quote the message and setup the UI.
populateUIWithQuotedMessage(true);
if (!mSourceMessageProcessed) {
if (message.isSet(Flag.X_DOWNLOADED_PARTIAL) || !loadAttachments(message, 0)) {
mHandler.sendEmptyMessage(MSG_SKIPPED_ATTACHMENTS);
}
}
}
private void processDraftMessage(LocalMessage message) throws MessagingException {
String showQuotedTextMode = "NONE";
mDraftId = MessagingController.getInstance(getApplication()).getId(message);
mSubjectView.setText(message.getSubject());
recipientPresenter.initFromDraftMessage(message);
// Read In-Reply-To header from draft
final String[] inReplyTo = message.getHeader("In-Reply-To");
if (inReplyTo.length >= 1) {
mInReplyTo = inReplyTo[0];
}
// Read References header from draft
final String[] references = message.getHeader("References");
if (references.length >= 1) {
mReferences = references[0];
}
if (!mSourceMessageProcessed) {
loadAttachments(message, 0);
}
// Decode the identity header when loading a draft.
// See buildIdentityHeader(TextBody) for a detailed description of the composition of this blob.
Map<IdentityField, String> k9identity = new HashMap<>();
String[] identityHeaders = message.getHeader(K9.IDENTITY_HEADER);
if (identityHeaders.length > 0 && identityHeaders[0] != null) {
k9identity = IdentityHeaderParser.parse(identityHeaders[0]);
}
Identity newIdentity = new Identity();
if (k9identity.containsKey(IdentityField.SIGNATURE)) {
newIdentity.setSignatureUse(true);
newIdentity.setSignature(k9identity.get(IdentityField.SIGNATURE));
mSignatureChanged = true;
} else {
newIdentity.setSignatureUse(message.getFolder().getSignatureUse());
newIdentity.setSignature(mIdentity.getSignature());
}
if (k9identity.containsKey(IdentityField.NAME)) {
newIdentity.setName(k9identity.get(IdentityField.NAME));
mIdentityChanged = true;
} else {
newIdentity.setName(mIdentity.getName());
}
if (k9identity.containsKey(IdentityField.EMAIL)) {
newIdentity.setEmail(k9identity.get(IdentityField.EMAIL));
mIdentityChanged = true;
} else {
newIdentity.setEmail(mIdentity.getEmail());
}
if (k9identity.containsKey(IdentityField.ORIGINAL_MESSAGE)) {
mMessageReference = null;
try {
String originalMessage = k9identity.get(IdentityField.ORIGINAL_MESSAGE);
MessageReference messageReference = new MessageReference(originalMessage);
// Check if this is a valid account in our database
Preferences prefs = Preferences.getPreferences(getApplicationContext());
Account account = prefs.getAccount(messageReference.getAccountUuid());
if (account != null) {
mMessageReference = messageReference;
}
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Could not decode message reference in identity.", e);
}
}
int cursorPosition = 0;
if (k9identity.containsKey(IdentityField.CURSOR_POSITION)) {
try {
cursorPosition = Integer.parseInt(k9identity.get(IdentityField.CURSOR_POSITION));
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not parse cursor position for MessageCompose; continuing.", e);
}
}
if (k9identity.containsKey(IdentityField.QUOTED_TEXT_MODE)) {
showQuotedTextMode = k9identity.get(IdentityField.QUOTED_TEXT_MODE);
}
mIdentity = newIdentity;
updateSignature();
updateFrom();
Integer bodyLength = k9identity.get(IdentityField.LENGTH) != null
? Integer.valueOf(k9identity.get(IdentityField.LENGTH))
: 0;
Integer bodyOffset = k9identity.get(IdentityField.OFFSET) != null
? Integer.valueOf(k9identity.get(IdentityField.OFFSET))
: 0;
Integer bodyFooterOffset = k9identity.get(IdentityField.FOOTER_OFFSET) != null
? Integer.valueOf(k9identity.get(IdentityField.FOOTER_OFFSET))
: null;
Integer bodyPlainLength = k9identity.get(IdentityField.PLAIN_LENGTH) != null
? Integer.valueOf(k9identity.get(IdentityField.PLAIN_LENGTH))
: null;
Integer bodyPlainOffset = k9identity.get(IdentityField.PLAIN_OFFSET) != null
? Integer.valueOf(k9identity.get(IdentityField.PLAIN_OFFSET))
: null;
mQuoteStyle = k9identity.get(IdentityField.QUOTE_STYLE) != null
? QuoteStyle.valueOf(k9identity.get(IdentityField.QUOTE_STYLE))
: mAccount.getQuoteStyle();
QuotedTextMode quotedMode;
try {
quotedMode = QuotedTextMode.valueOf(showQuotedTextMode);
} catch (Exception e) {
quotedMode = QuotedTextMode.NONE;
}
// Always respect the user's current composition format preference, even if the
// draft was saved in a different format.
// TODO - The current implementation doesn't allow a user in HTML mode to edit a draft that wasn't saved with K9mail.
String messageFormatString = k9identity.get(IdentityField.MESSAGE_FORMAT);
MessageFormat messageFormat = null;
if (messageFormatString != null) {
try {
messageFormat = MessageFormat.valueOf(messageFormatString);
} catch (Exception e) { /* do nothing */ }
}
if (messageFormat == null) {
// This message probably wasn't created by us. The exception is legacy
// drafts created before the advent of HTML composition. In those cases,
// we'll display the whole message (including the quoted part) in the
// composition window. If that's the case, try and convert it to text to
// match the behavior in text mode.
mMessageContentView.setCharacters(getBodyTextFromMessage(message, SimpleMessageFormat.TEXT));
mForcePlainText = true;
showOrHideQuotedText(quotedMode);
return;
}
if (messageFormat == MessageFormat.HTML) {
Part part = MimeUtility.findFirstPartByMimeType(message, "text/html");
if (part != null) { // Shouldn't happen if we were the one who saved it.
mQuotedTextFormat = SimpleMessageFormat.HTML;
String text = MessageExtractor.getTextFromPart(part);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Loading message with offset " + bodyOffset + ", length " + bodyLength + ". Text length is " + text.length() + ".");
}
if (bodyOffset + bodyLength > text.length()) {
// The draft was edited outside of K-9 Mail?
Log.d(K9.LOG_TAG, "The identity field from the draft contains an invalid LENGTH/OFFSET");
bodyOffset = 0;
bodyLength = 0;
}
// Grab our reply text.
String bodyText = text.substring(bodyOffset, bodyOffset + bodyLength);
mMessageContentView.setCharacters(HtmlConverter.htmlToText(bodyText));
// Regenerate the quoted html without our user content in it.
StringBuilder quotedHTML = new StringBuilder();
quotedHTML.append(text.substring(0, bodyOffset)); // stuff before the reply
quotedHTML.append(text.substring(bodyOffset + bodyLength));
if (quotedHTML.length() > 0) {
mQuotedHtmlContent = new InsertableHtmlContent();
mQuotedHtmlContent.setQuotedContent(quotedHTML);
// We don't know if bodyOffset refers to the header or to the footer
mQuotedHtmlContent.setHeaderInsertionPoint(bodyOffset);
if (bodyFooterOffset != null) {
mQuotedHtmlContent.setFooterInsertionPoint(bodyFooterOffset);
} else {
mQuotedHtmlContent.setFooterInsertionPoint(bodyOffset);
}
mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent());
}
}
if (bodyPlainOffset != null && bodyPlainLength != null) {
processSourceMessageText(message, bodyPlainOffset, bodyPlainLength, false);
}
} else if (messageFormat == MessageFormat.TEXT) {
mQuotedTextFormat = SimpleMessageFormat.TEXT;
processSourceMessageText(message, bodyOffset, bodyLength, true);
} else {
Log.e(K9.LOG_TAG, "Unhandled message format.");
}
// Set the cursor position if we have it.
try {
mMessageContentView.setSelection(cursorPosition);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not set cursor position in MessageCompose; ignoring.", e);
}
showOrHideQuotedText(quotedMode);
}
/**
* Pull out the parts of the now loaded source message and apply them to the new message
* depending on the type of message being composed.
* @param message Source message
* @param bodyOffset Insertion point for reply.
* @param bodyLength Length of reply.
* @param viewMessageContent Update mMessageContentView or not.
* @throws MessagingException
*/
private void processSourceMessageText(Message message, Integer bodyOffset, Integer bodyLength,
boolean viewMessageContent) throws MessagingException {
Part textPart = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (textPart != null) {
String text = MessageExtractor.getTextFromPart(textPart);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Loading message with offset " + bodyOffset + ", length " + bodyLength + ". Text length is " + text.length() + ".");
}
// If we had a body length (and it was valid), separate the composition from the quoted text
// and put them in their respective places in the UI.
if (bodyLength > 0) {
try {
String bodyText = text.substring(bodyOffset, bodyOffset + bodyLength);
// Regenerate the quoted text without our user content in it nor added newlines.
StringBuilder quotedText = new StringBuilder();
if (bodyOffset == 0 && text.substring(bodyLength, bodyLength + 4).equals("\r\n\r\n")) {
// top-posting: ignore two newlines at start of quote
quotedText.append(text.substring(bodyLength + 4));
} else if (bodyOffset + bodyLength == text.length() &&
text.substring(bodyOffset - 2, bodyOffset).equals("\r\n")) {
// bottom-posting: ignore newline at end of quote
quotedText.append(text.substring(0, bodyOffset - 2));
} else {
quotedText.append(text.substring(0, bodyOffset)); // stuff before the reply
quotedText.append(text.substring(bodyOffset + bodyLength));
}
if (viewMessageContent) {
mMessageContentView.setCharacters(bodyText);
}
mQuotedText.setCharacters(quotedText);
} catch (IndexOutOfBoundsException e) {
// Invalid bodyOffset or bodyLength. The draft was edited outside of K-9 Mail?
Log.d(K9.LOG_TAG, "The identity field from the draft contains an invalid bodyOffset/bodyLength");
if (viewMessageContent) {
mMessageContentView.setCharacters(text);
}
}
} else {
if (viewMessageContent) {
mMessageContentView.setCharacters(text);
}
}
}
}
// Regexes to check for signature.
private static final Pattern DASH_SIGNATURE_PLAIN = Pattern.compile("\r\n-- \r\n.*", Pattern.DOTALL);
private static final Pattern DASH_SIGNATURE_HTML = Pattern.compile("(<br( /)?>|\r?\n)-- <br( /)?>", Pattern.CASE_INSENSITIVE);
private static final Pattern BLOCKQUOTE_START = Pattern.compile("<blockquote", Pattern.CASE_INSENSITIVE);
private static final Pattern BLOCKQUOTE_END = Pattern.compile("</blockquote>", Pattern.CASE_INSENSITIVE);
/**
* Build and populate the UI with the quoted message.
*
* @param showQuotedText
* {@code true} if the quoted text should be shown, {@code false} otherwise.
*
* @throws MessagingException
*/
private void populateUIWithQuotedMessage(boolean showQuotedText) throws MessagingException {
MessageFormat origMessageFormat = mAccount.getMessageFormat();
if (mForcePlainText || origMessageFormat == MessageFormat.TEXT) {
// Use plain text for the quoted message
mQuotedTextFormat = SimpleMessageFormat.TEXT;
} else if (origMessageFormat == MessageFormat.AUTO) {
// Figure out which message format to use for the quoted text by looking if the source
// message contains a text/html part. If it does, we use that.
mQuotedTextFormat =
(MimeUtility.findFirstPartByMimeType(mSourceMessage, "text/html") == null) ?
SimpleMessageFormat.TEXT : SimpleMessageFormat.HTML;
} else {
mQuotedTextFormat = SimpleMessageFormat.HTML;
}
// TODO -- I am assuming that mSourceMessageBody will always be a text part. Is this a safe assumption?
// Handle the original message in the reply
// If we already have mSourceMessageBody, use that. It's pre-populated if we've got crypto going on.
String content = (mSourceMessageBody != null) ?
mSourceMessageBody :
getBodyTextFromMessage(mSourceMessage, mQuotedTextFormat);
if (mQuotedTextFormat == SimpleMessageFormat.HTML) {
// Strip signature.
// closing tags such as </div>, </span>, </table>, </pre> will be cut off.
if (mAccount.isStripSignature() &&
(mAction == Action.REPLY || mAction == Action.REPLY_ALL)) {
Matcher dashSignatureHtml = DASH_SIGNATURE_HTML.matcher(content);
if (dashSignatureHtml.find()) {
Matcher blockquoteStart = BLOCKQUOTE_START.matcher(content);
Matcher blockquoteEnd = BLOCKQUOTE_END.matcher(content);
List<Integer> start = new ArrayList<>();
List<Integer> end = new ArrayList<>();
while (blockquoteStart.find()) {
start.add(blockquoteStart.start());
}
while (blockquoteEnd.find()) {
end.add(blockquoteEnd.start());
}
if (start.size() != end.size()) {
Log.d(K9.LOG_TAG, "There are " + start.size() + " <blockquote> tags, but " +
end.size() + " </blockquote> tags. Refusing to strip.");
} else if (start.size() > 0) {
// Ignore quoted signatures in blockquotes.
dashSignatureHtml.region(0, start.get(0));
if (dashSignatureHtml.find()) {
// before first <blockquote>.
content = content.substring(0, dashSignatureHtml.start());
} else {
for (int i = 0; i < start.size() - 1; i++) {
// within blockquotes.
if (end.get(i) < start.get(i + 1)) {
dashSignatureHtml.region(end.get(i), start.get(i + 1));
if (dashSignatureHtml.find()) {
content = content.substring(0, dashSignatureHtml.start());
break;
}
}
}
if (end.get(end.size() - 1) < content.length()) {
// after last </blockquote>.
dashSignatureHtml.region(end.get(end.size() - 1), content.length());
if (dashSignatureHtml.find()) {
content = content.substring(0, dashSignatureHtml.start());
}
}
}
} else {
// No blockquotes found.
content = content.substring(0, dashSignatureHtml.start());
}
}
// Fix the stripping off of closing tags if a signature was stripped,
// as well as clean up the HTML of the quoted message.
HtmlCleaner cleaner = new HtmlCleaner();
CleanerProperties properties = cleaner.getProperties();
// see http://htmlcleaner.sourceforge.net/parameters.php for descriptions
properties.setNamespacesAware(false);
properties.setAdvancedXmlEscape(false);
properties.setOmitXmlDeclaration(true);
properties.setOmitDoctypeDeclaration(false);
properties.setTranslateSpecialEntities(false);
properties.setRecognizeUnicodeChars(false);
TagNode node = cleaner.clean(content);
SimpleHtmlSerializer htmlSerialized = new SimpleHtmlSerializer(properties);
content = htmlSerialized.getAsString(node, "UTF8");
}
// Add the HTML reply header to the top of the content.
mQuotedHtmlContent = quoteOriginalHtmlMessage(mSourceMessage, content, mQuoteStyle);
// Load the message with the reply header.
mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent());
// TODO: Also strip the signature from the text/plain part
mQuotedText.setCharacters(quoteOriginalTextMessage(mSourceMessage,
getBodyTextFromMessage(mSourceMessage, SimpleMessageFormat.TEXT), mQuoteStyle));
} else if (mQuotedTextFormat == SimpleMessageFormat.TEXT) {
if (mAccount.isStripSignature() &&
(mAction == Action.REPLY || mAction == Action.REPLY_ALL)) {
if (DASH_SIGNATURE_PLAIN.matcher(content).find()) {
content = DASH_SIGNATURE_PLAIN.matcher(content).replaceFirst("\r\n");
}
}
mQuotedText.setCharacters(quoteOriginalTextMessage(mSourceMessage, content, mQuoteStyle));
}
if (showQuotedText) {
showOrHideQuotedText(QuotedTextMode.SHOW);
} else {
showOrHideQuotedText(QuotedTextMode.HIDE);
}
}
/**
* Fetch the body text from a message in the desired message format. This method handles
* conversions between formats (html to text and vice versa) if necessary.
* @param message Message to analyze for body part.
* @param format Desired format.
* @return Text in desired format.
* @throws MessagingException
*/
private String getBodyTextFromMessage(final Message message, final SimpleMessageFormat format)
throws MessagingException {
Part part;
if (format == SimpleMessageFormat.HTML) {
// HTML takes precedence, then text.
part = MimeUtility.findFirstPartByMimeType(message, "text/html");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: HTML requested, HTML found.");
}
return MessageExtractor.getTextFromPart(part);
}
part = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: HTML requested, text found.");
}
String text = MessageExtractor.getTextFromPart(part);
return HtmlConverter.textToHtml(text);
}
} else if (format == SimpleMessageFormat.TEXT) {
// Text takes precedence, then html.
part = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: Text requested, text found.");
}
return MessageExtractor.getTextFromPart(part);
}
part = MimeUtility.findFirstPartByMimeType(message, "text/html");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: Text requested, HTML found.");
}
String text = MessageExtractor.getTextFromPart(part);
return HtmlConverter.htmlToText(text);
}
}
// If we had nothing interesting, return an empty string.
return "";
}
// Regular expressions to look for various HTML tags. This is no HTML::Parser, but hopefully it's good enough for
// our purposes.
private static final Pattern FIND_INSERTION_POINT_HTML = Pattern.compile("(?si:.*?(<html(?:>|\\s+[^>]*>)).*)");
private static final Pattern FIND_INSERTION_POINT_HEAD = Pattern.compile("(?si:.*?(<head(?:>|\\s+[^>]*>)).*)");
private static final Pattern FIND_INSERTION_POINT_BODY = Pattern.compile("(?si:.*?(<body(?:>|\\s+[^>]*>)).*)");
private static final Pattern FIND_INSERTION_POINT_HTML_END = Pattern.compile("(?si:.*(</html>).*?)");
private static final Pattern FIND_INSERTION_POINT_BODY_END = Pattern.compile("(?si:.*(</body>).*?)");
// The first group in a Matcher contains the first capture group. We capture the tag found in the above REs so that
// we can locate the *end* of that tag.
private static final int FIND_INSERTION_POINT_FIRST_GROUP = 1;
// HTML bits to insert as appropriate
// TODO is it safe to assume utf-8 here?
private static final String FIND_INSERTION_POINT_HTML_CONTENT = "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\r\n<html>";
private static final String FIND_INSERTION_POINT_HTML_END_CONTENT = "</html>";
private static final String FIND_INSERTION_POINT_HEAD_CONTENT = "<head><meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"></head>";
// Index of the start of the beginning of a String.
private static final int FIND_INSERTION_POINT_START_OF_STRING = 0;
/**
* <p>Find the start and end positions of the HTML in the string. This should be the very top
* and bottom of the displayable message. It returns a {@link InsertableHtmlContent}, which
* contains both the insertion points and potentially modified HTML. The modified HTML should be
* used in place of the HTML in the original message.</p>
*
* <p>This method loosely mimics the HTML forward/reply behavior of BlackBerry OS 4.5/BIS 2.5, which in turn mimics
* Outlook 2003 (as best I can tell).</p>
*
* @param content Content to examine for HTML insertion points
* @return Insertion points and HTML to use for insertion.
*/
private InsertableHtmlContent findInsertionPoints(final String content) {
InsertableHtmlContent insertable = new InsertableHtmlContent();
// If there is no content, don't bother doing any of the regex dancing.
if (content == null || content.equals("")) {
return insertable;
}
// Search for opening tags.
boolean hasHtmlTag = false;
boolean hasHeadTag = false;
boolean hasBodyTag = false;
// First see if we have an opening HTML tag. If we don't find one, we'll add one later.
Matcher htmlMatcher = FIND_INSERTION_POINT_HTML.matcher(content);
if (htmlMatcher.matches()) {
hasHtmlTag = true;
}
// Look for a HEAD tag. If we're missing a BODY tag, we'll use the close of the HEAD to start our content.
Matcher headMatcher = FIND_INSERTION_POINT_HEAD.matcher(content);
if (headMatcher.matches()) {
hasHeadTag = true;
}
// Look for a BODY tag. This is the ideal place for us to start our content.
Matcher bodyMatcher = FIND_INSERTION_POINT_BODY.matcher(content);
if (bodyMatcher.matches()) {
hasBodyTag = true;
}
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Open: hasHtmlTag:" + hasHtmlTag + " hasHeadTag:" + hasHeadTag + " hasBodyTag:" + hasBodyTag);
}
// Given our inspections, let's figure out where to start our content.
// This is the ideal case -- there's a BODY tag and we insert ourselves just after it.
if (hasBodyTag) {
insertable.setQuotedContent(new StringBuilder(content));
insertable.setHeaderInsertionPoint(bodyMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP));
} else if (hasHeadTag) {
// Now search for a HEAD tag. We can insert after there.
// If BlackBerry sees a HEAD tag, it inserts right after that, so long as there is no BODY tag. It doesn't
// try to add BODY, either. Right or wrong, it seems to work fine.
insertable.setQuotedContent(new StringBuilder(content));
insertable.setHeaderInsertionPoint(headMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP));
} else if (hasHtmlTag) {
// Lastly, check for an HTML tag.
// In this case, it will add a HEAD, but no BODY.
StringBuilder newContent = new StringBuilder(content);
// Insert the HEAD content just after the HTML tag.
newContent.insert(htmlMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP), FIND_INSERTION_POINT_HEAD_CONTENT);
insertable.setQuotedContent(newContent);
// The new insertion point is the end of the HTML tag, plus the length of the HEAD content.
insertable.setHeaderInsertionPoint(htmlMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP) + FIND_INSERTION_POINT_HEAD_CONTENT.length());
} else {
// If we have none of the above, we probably have a fragment of HTML. Yahoo! and Gmail both do this.
// Again, we add a HEAD, but not BODY.
StringBuilder newContent = new StringBuilder(content);
// Add the HTML and HEAD tags.
newContent.insert(FIND_INSERTION_POINT_START_OF_STRING, FIND_INSERTION_POINT_HEAD_CONTENT);
newContent.insert(FIND_INSERTION_POINT_START_OF_STRING, FIND_INSERTION_POINT_HTML_CONTENT);
// Append the </HTML> tag.
newContent.append(FIND_INSERTION_POINT_HTML_END_CONTENT);
insertable.setQuotedContent(newContent);
insertable.setHeaderInsertionPoint(FIND_INSERTION_POINT_HTML_CONTENT.length() + FIND_INSERTION_POINT_HEAD_CONTENT.length());
}
// Search for closing tags. We have to do this after we deal with opening tags since it may
// have modified the message.
boolean hasHtmlEndTag = false;
boolean hasBodyEndTag = false;
// First see if we have an opening HTML tag. If we don't find one, we'll add one later.
Matcher htmlEndMatcher = FIND_INSERTION_POINT_HTML_END.matcher(insertable.getQuotedContent());
if (htmlEndMatcher.matches()) {
hasHtmlEndTag = true;
}
// Look for a BODY tag. This is the ideal place for us to place our footer.
Matcher bodyEndMatcher = FIND_INSERTION_POINT_BODY_END.matcher(insertable.getQuotedContent());
if (bodyEndMatcher.matches()) {
hasBodyEndTag = true;
}
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Close: hasHtmlEndTag:" + hasHtmlEndTag + " hasBodyEndTag:" + hasBodyEndTag);
}
// Now figure out where to put our footer.
// This is the ideal case -- there's a BODY tag and we insert ourselves just before it.
if (hasBodyEndTag) {
insertable.setFooterInsertionPoint(bodyEndMatcher.start(FIND_INSERTION_POINT_FIRST_GROUP));
} else if (hasHtmlEndTag) {
// Check for an HTML tag. Add ourselves just before it.
insertable.setFooterInsertionPoint(htmlEndMatcher.start(FIND_INSERTION_POINT_FIRST_GROUP));
} else {
// If we have none of the above, we probably have a fragment of HTML.
// Set our footer insertion point as the end of the string.
insertable.setFooterInsertionPoint(insertable.getQuotedContent().length());
}
return insertable;
}
static class SendMessageTask extends AsyncTask<Void, Void, Void> {
Context context;
Account account;
Contacts contacts;
Message message;
Long draftId;
SendMessageTask(Context context, Account account, Contacts contacts, Message message, Long draftId) {
this.context = context;
this.account = account;
this.contacts = contacts;
this.message = message;
this.draftId = draftId;
}
@Override
protected Void doInBackground(Void... params) {
try {
contacts.markAsContacted(message.getRecipients(RecipientType.TO));
contacts.markAsContacted(message.getRecipients(RecipientType.CC));
contacts.markAsContacted(message.getRecipients(RecipientType.BCC));
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to mark contact as contacted.", e);
}
MessagingController.getInstance(context).sendMessage(account, message, null);
if (draftId != null) {
// TODO set draft id to invalid in MessageCompose!
MessagingController.getInstance(context).deleteDraft(account, draftId);
}
return null;
}
}
class Listener extends MessagingListener {
@Override
public void loadMessageForViewStarted(Account account, String folder, String uid) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mHandler.sendEmptyMessage(MSG_PROGRESS_ON);
}
@Override
public void loadMessageForViewFinished(Account account, String folder, String uid, LocalMessage message) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
}
@Override
public void loadMessageForViewBodyAvailable(Account account, String folder, String uid, final Message message) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mSourceMessage = message;
runOnUiThread(new Runnable() {
@Override
public void run() {
loadLocalMessageForDisplay((LocalMessage) message);
}
});
}
@Override
public void loadMessageForViewFailed(Account account, String folder, String uid, Throwable t) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
// TODO show network error
}
@Override
public void messageUidChanged(Account account, String folder, String oldUid, String newUid) {
// Track UID changes of the source message
if (mMessageReference != null) {
final Account sourceAccount = Preferences.getPreferences(MessageCompose.this).getAccount(mMessageReference.getAccountUuid());
final String sourceFolder = mMessageReference.getFolderName();
final String sourceMessageUid = mMessageReference.getUid();
if (account.equals(sourceAccount) && (folder.equals(sourceFolder))) {
if (oldUid.equals(sourceMessageUid)) {
mMessageReference = mMessageReference.withModifiedUid(newUid);
}
if ((mSourceMessage != null) && (oldUid.equals(mSourceMessage.getUid()))) {
mSourceMessage.setUid(newUid);
}
}
}
}
}
private void loadLocalMessageForDisplay(LocalMessage message) {
// We check to see if we've previously processed the source message since this
// could be called when switching from HTML to text replies. If that happens, we
// only want to update the UI with quoted text (which picks the appropriate
// part).
if (mSourceProcessed) {
try {
populateUIWithQuotedMessage(true);
} catch (MessagingException e) {
// Hm, if we couldn't populate the UI after source reprocessing, let's just delete it?
showOrHideQuotedText(QuotedTextMode.HIDE);
Log.e(K9.LOG_TAG, "Could not re-process source message; deleting quoted text to be safe.", e);
}
updateMessageFormat();
} else {
processSourceMessage(message);
mSourceProcessed = true;
}
}
/**
* When we are launched with an intent that includes a mailto: URI, we can actually
* gather quite a few of our message fields from it.
*
* @param mailTo
* The MailTo object we use to initialize message field
*/
private void initializeFromMailto(MailTo mailTo) {
recipientPresenter.initFromMailto(mailTo);
String subject = mailTo.getSubject();
if (subject != null && !subject.isEmpty()) {
mSubjectView.setText(subject);
}
String body = mailTo.getBody();
if (body != null && !subject.isEmpty()) {
mMessageContentView.setCharacters(body);
}
}
private static class SaveMessageTask extends AsyncTask<Void, Void, Void> {
Context context;
Account account;
Contacts contacts;
Handler handler;
Message message;
long draftId;
boolean saveRemotely;
SaveMessageTask(Context context, Account account, Contacts contacts,
Handler handler, Message message, long draftId, boolean saveRemotely) {
this.context = context;
this.account = account;
this.contacts = contacts;
this.handler = handler;
this.message = message;
this.draftId = draftId;
this.saveRemotely = saveRemotely;
}
@Override
protected Void doInBackground(Void... params) {
final MessagingController messagingController = MessagingController.getInstance(context);
Message draftMessage = messagingController.saveDraft(account, message, draftId, saveRemotely);
draftId = messagingController.getId(draftMessage);
android.os.Message msg = android.os.Message.obtain(handler, MSG_SAVED_DRAFT, draftId);
handler.sendMessage(msg);
return null;
}
}
private static final int REPLY_WRAP_LINE_WIDTH = 72;
private static final int QUOTE_BUFFER_LENGTH = 512; // amount of extra buffer to allocate to accommodate quoting headers or prefixes
/**
* Add quoting markup to a text message.
* @param originalMessage Metadata for message being quoted.
* @param messageBody Text of the message to be quoted.
* @param quoteStyle Style of quoting.
* @return Quoted text.
* @throws MessagingException
*/
private String quoteOriginalTextMessage(final Message originalMessage, final String messageBody, final QuoteStyle quoteStyle) throws MessagingException {
String body = messageBody == null ? "" : messageBody;
String sentDate = getSentDateText(originalMessage);
if (quoteStyle == QuoteStyle.PREFIX) {
StringBuilder quotedText = new StringBuilder(body.length() + QUOTE_BUFFER_LENGTH);
if (sentDate.length() != 0) {
quotedText.append(String.format(
getString(R.string.message_compose_reply_header_fmt_with_date) + "\r\n",
sentDate,
Address.toString(originalMessage.getFrom())));
} else {
quotedText.append(String.format(
getString(R.string.message_compose_reply_header_fmt) + "\r\n",
Address.toString(originalMessage.getFrom()))
);
}
final String prefix = mAccount.getQuotePrefix();
final String wrappedText = Utility.wrap(body, REPLY_WRAP_LINE_WIDTH - prefix.length());
// "$" and "\" in the quote prefix have to be escaped for
// the replaceAll() invocation.
final String escapedPrefix = prefix.replaceAll("(\\\\|\\$)", "\\\\$1");
quotedText.append(wrappedText.replaceAll("(?m)^", escapedPrefix));
return quotedText.toString().replaceAll("\\\r", "");
} else if (quoteStyle == QuoteStyle.HEADER) {
StringBuilder quotedText = new StringBuilder(body.length() + QUOTE_BUFFER_LENGTH);
quotedText.append("\r\n");
quotedText.append(getString(R.string.message_compose_quote_header_separator)).append("\r\n");
if (originalMessage.getFrom() != null && Address.toString(originalMessage.getFrom()).length() != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_from)).append(" ").append(Address.toString(originalMessage.getFrom())).append("\r\n");
}
if (sentDate.length() != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_send_date)).append(" ").append(sentDate).append("\r\n");
}
if (originalMessage.getRecipients(RecipientType.TO) != null && originalMessage.getRecipients(RecipientType.TO).length != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_to)).append(" ").append(Address.toString(originalMessage.getRecipients(RecipientType.TO))).append("\r\n");
}
if (originalMessage.getRecipients(RecipientType.CC) != null && originalMessage.getRecipients(RecipientType.CC).length != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_cc)).append(" ").append(Address.toString(originalMessage.getRecipients(RecipientType.CC))).append("\r\n");
}
if (originalMessage.getSubject() != null) {
quotedText.append(getString(R.string.message_compose_quote_header_subject)).append(" ").append(originalMessage.getSubject()).append("\r\n");
}
quotedText.append("\r\n");
quotedText.append(body);
return quotedText.toString();
} else {
// Shouldn't ever happen.
return body;
}
}
/**
* Add quoting markup to a HTML message.
* @param originalMessage Metadata for message being quoted.
* @param messageBody Text of the message to be quoted.
* @param quoteStyle Style of quoting.
* @return Modified insertable message.
* @throws MessagingException
*/
private InsertableHtmlContent quoteOriginalHtmlMessage(final Message originalMessage, final String messageBody, final QuoteStyle quoteStyle) throws MessagingException {
InsertableHtmlContent insertable = findInsertionPoints(messageBody);
String sentDate = getSentDateText(originalMessage);
if (quoteStyle == QuoteStyle.PREFIX) {
StringBuilder header = new StringBuilder(QUOTE_BUFFER_LENGTH);
header.append("<div class=\"gmail_quote\">");
if (sentDate.length() != 0) {
header.append(HtmlConverter.textToHtmlFragment(String.format(
getString(R.string.message_compose_reply_header_fmt_with_date),
sentDate,
Address.toString(originalMessage.getFrom()))
));
} else {
header.append(HtmlConverter.textToHtmlFragment(String.format(
getString(R.string.message_compose_reply_header_fmt),
Address.toString(originalMessage.getFrom()))
));
}
header.append("<blockquote class=\"gmail_quote\" " +
"style=\"margin: 0pt 0pt 0pt 0.8ex; border-left: 1px solid rgb(204, 204, 204); padding-left: 1ex;\">\r\n");
String footer = "</blockquote></div>";
insertable.insertIntoQuotedHeader(header.toString());
insertable.insertIntoQuotedFooter(footer);
} else if (quoteStyle == QuoteStyle.HEADER) {
StringBuilder header = new StringBuilder();
header.append("<div style='font-size:10.0pt;font-family:\"Tahoma\",\"sans-serif\";padding:3.0pt 0in 0in 0in'>\r\n");
header.append("<hr style='border:none;border-top:solid #E1E1E1 1.0pt'>\r\n"); // This gets converted into a horizontal line during html to text conversion.
if (originalMessage.getFrom() != null && Address.toString(originalMessage.getFrom()).length() != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_from)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getFrom())))
.append("<br>\r\n");
}
if (sentDate.length() != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_send_date)).append("</b> ")
.append(sentDate)
.append("<br>\r\n");
}
if (originalMessage.getRecipients(RecipientType.TO) != null && originalMessage.getRecipients(RecipientType.TO).length != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_to)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getRecipients(RecipientType.TO))))
.append("<br>\r\n");
}
if (originalMessage.getRecipients(RecipientType.CC) != null && originalMessage.getRecipients(RecipientType.CC).length != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_cc)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getRecipients(RecipientType.CC))))
.append("<br>\r\n");
}
if (originalMessage.getSubject() != null) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_subject)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(originalMessage.getSubject()))
.append("<br>\r\n");
}
header.append("</div>\r\n");
header.append("<br>\r\n");
insertable.insertIntoQuotedHeader(header.toString());
}
return insertable;
}
/**
* Used to store an {@link Identity} instance together with the {@link Account} it belongs to.
*
* @see IdentityAdapter
*/
static class IdentityContainer {
public final Identity identity;
public final Account account;
IdentityContainer(Identity identity, Account account) {
this.identity = identity;
this.account = account;
}
}
/**
* Adapter for the <em>Choose identity</em> list view.
*
* <p>
* Account names are displayed as section headers, identities as selectable list items.
* </p>
*/
static class IdentityAdapter extends BaseAdapter {
private LayoutInflater mLayoutInflater;
private List<Object> mItems;
public IdentityAdapter(Context context) {
mLayoutInflater = (LayoutInflater) context.getSystemService(
Context.LAYOUT_INFLATER_SERVICE);
List<Object> items = new ArrayList<>();
Preferences prefs = Preferences.getPreferences(context.getApplicationContext());
Collection<Account> accounts = prefs.getAvailableAccounts();
for (Account account : accounts) {
items.add(account);
List<Identity> identities = account.getIdentities();
for (Identity identity : identities) {
items.add(new IdentityContainer(identity, account));
}
}
mItems = items;
}
@Override
public int getCount() {
return mItems.size();
}
@Override
public int getViewTypeCount() {
return 2;
}
@Override
public int getItemViewType(int position) {
return (mItems.get(position) instanceof Account) ? 0 : 1;
}
@Override
public boolean isEnabled(int position) {
return (mItems.get(position) instanceof IdentityContainer);
}
@Override
public Object getItem(int position) {
return mItems.get(position);
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public boolean hasStableIds() {
return false;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
Object item = mItems.get(position);
View view = null;
if (item instanceof Account) {
if (convertView != null && convertView.getTag() instanceof AccountHolder) {
view = convertView;
} else {
view = mLayoutInflater.inflate(R.layout.choose_account_item, parent, false);
AccountHolder holder = new AccountHolder();
holder.name = (TextView) view.findViewById(R.id.name);
holder.chip = view.findViewById(R.id.chip);
view.setTag(holder);
}
Account account = (Account) item;
AccountHolder holder = (AccountHolder) view.getTag();
holder.name.setText(account.getDescription());
holder.chip.setBackgroundColor(account.getChipColor());
} else if (item instanceof IdentityContainer) {
if (convertView != null && convertView.getTag() instanceof IdentityHolder) {
view = convertView;
} else {
view = mLayoutInflater.inflate(R.layout.choose_identity_item, parent, false);
IdentityHolder holder = new IdentityHolder();
holder.name = (TextView) view.findViewById(R.id.name);
holder.description = (TextView) view.findViewById(R.id.description);
view.setTag(holder);
}
IdentityContainer identityContainer = (IdentityContainer) item;
Identity identity = identityContainer.identity;
IdentityHolder holder = (IdentityHolder) view.getTag();
holder.name.setText(identity.getDescription());
holder.description.setText(getIdentityDescription(identity));
}
return view;
}
static class AccountHolder {
public TextView name;
public View chip;
}
static class IdentityHolder {
public TextView name;
public TextView description;
}
}
private static String getIdentityDescription(Identity identity) {
return String.format("%s <%s>", identity.getName(), identity.getEmail());
}
private void setMessageFormat(SimpleMessageFormat format) {
// This method will later be used to enable/disable the rich text editing mode.
mMessageFormat = format;
}
private void updateMessageFormat() {
MessageFormat origMessageFormat = mAccount.getMessageFormat();
SimpleMessageFormat messageFormat;
if (origMessageFormat == MessageFormat.TEXT) {
// The user wants to send text/plain messages. We don't override that choice under
// any circumstances.
messageFormat = SimpleMessageFormat.TEXT;
} else if (mForcePlainText && includeQuotedText()) {
// Right now we send a text/plain-only message when the quoted text was edited, no
// matter what the user selected for the message format.
messageFormat = SimpleMessageFormat.TEXT;
} else if (recipientPresenter.isForceTextMessageFormat()) {
// Right now we only support PGP inline which doesn't play well with HTML. So force
// plain text in those cases.
messageFormat = SimpleMessageFormat.TEXT;
} else if (origMessageFormat == MessageFormat.AUTO) {
if (mAction == Action.COMPOSE || mQuotedTextFormat == SimpleMessageFormat.TEXT ||
!includeQuotedText()) {
// If the message format is set to "AUTO" we use text/plain whenever possible. That
// is, when composing new messages and replying to or forwarding text/plain
// messages.
messageFormat = SimpleMessageFormat.TEXT;
} else {
messageFormat = SimpleMessageFormat.HTML;
}
} else {
// In all other cases use HTML
messageFormat = SimpleMessageFormat.HTML;
}
setMessageFormat(messageFormat);
}
private boolean includeQuotedText() {
return (mQuotedTextMode == QuotedTextMode.SHOW);
}
/**
* Extract the date from a message and convert it into a locale-specific
* date string suitable for use in a header for a quoted message.
*
* @return A string with the formatted date/time
*/
private String getSentDateText(Message message) {
try {
final int dateStyle = DateFormat.LONG;
final int timeStyle = DateFormat.LONG;
Date date = message.getSentDate();
Locale locale = getResources().getConfiguration().locale;
return DateFormat.getDateTimeInstance(dateStyle, timeStyle, locale)
.format(date);
} catch (Exception e) {
return "";
}
}
private boolean isCryptoProviderEnabled() {
return mOpenPgpProvider != null;
}
@Override
public void onMessageBuildSuccess(MimeMessage message, boolean isDraft) {
if (isDraft) {
draftNeedsSaving = false;
currentMessageBuilder = null;
if (mAction == Action.EDIT_DRAFT && mMessageReference != null) {
message.setUid(mMessageReference.getUid());
}
boolean saveRemotely = recipientPresenter.isAllowSavingDraftRemotely();
new SaveMessageTask(getApplicationContext(), mAccount, mContacts, mHandler,
message, mDraftId, saveRemotely).execute();
if (mFinishAfterDraftSaved) {
finish();
} else {
setProgressBarIndeterminateVisibility(false);
}
} else {
currentMessageBuilder = null;
new SendMessageTask(getApplicationContext(), mAccount, mContacts, message,
mDraftId != INVALID_DRAFT_ID ? mDraftId : null).execute();
finish();
}
}
@Override
public void onMessageBuildCancel() {
currentMessageBuilder = null;
setProgressBarIndeterminateVisibility(false);
}
@Override
public void onMessageBuildException(MessagingException me) {
Log.e(K9.LOG_TAG, "Error sending message", me);
Toast.makeText(MessageCompose.this,
getString(R.string.send_aborted, me.getLocalizedMessage()),
Toast.LENGTH_LONG).show();
currentMessageBuilder = null;
setProgressBarIndeterminateVisibility(false);
}
@Override
public void onMessageBuildReturnPendingIntent(PendingIntent pendingIntent, int requestCode) {
requestCode |= REQUEST_MASK_MESSAGE_BUILDER;
try {
startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0);
} catch (SendIntentException e) {
Log.e(K9.LOG_TAG, "Error starting pending intent from builder!", e);
}
}
public void launchUserInteractionPendingIntent(PendingIntent pendingIntent, int requestCode) {
requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER;
try {
startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0);
} catch (SendIntentException e) {
e.printStackTrace();
}
}
}
| 1 | 13,427 | Typo, should read `firstTimeEmptySubject` | k9mail-k-9 | java |
@@ -22,7 +22,7 @@ package transport
import "golang.org/x/net/context"
-// Handler handles a single transport-level request.
+// Handler handles a single, transport-level, unary request.
type Handler interface {
// Handle the given request, writing the response to the given
// ResponseWriter. | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
import "golang.org/x/net/context"
// Handler handles a single transport-level request.
type Handler interface {
// Handle the given request, writing the response to the given
// ResponseWriter.
//
// An error may be returned in case of failures. BadRequestError must be
// returned for invalid requests. All other failures are treated as
// UnexpectedErrors.
Handle(
ctx context.Context,
opts Options,
req *Request,
resw ResponseWriter,
) error
}
| 1 | 10,858 | The more I see this the more I think the transport layer _should_ actually refer to this as `UnaryHandler`. | yarpc-yarpc-go | go |
@@ -267,6 +267,7 @@ module Beaker
logger.warn "#{e.class} error in scp'ing. Forcing the connection to close, which should " <<
"raise an error."
close
+ raise "#{e}\n#{e.backtrace}"
end
| 1 | require 'socket'
require 'timeout'
require 'net/scp'
module Beaker
class SshConnection
attr_accessor :logger
attr_accessor :ip, :vmhostname, :hostname, :ssh_connection_preference
SUPPORTED_CONNECTION_METHODS = [:ip, :vmhostname, :hostname]
RETRYABLE_EXCEPTIONS = [
SocketError,
Timeout::Error,
Errno::ETIMEDOUT,
Errno::EHOSTDOWN,
Errno::EHOSTUNREACH,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::ENETUNREACH,
Net::SSH::Exception,
Net::SSH::Disconnect,
Net::SSH::AuthenticationFailed,
Net::SSH::ChannelRequestFailed,
Net::SSH::ChannelOpenFailed,
IOError,
]
def initialize name_hash, user = nil, ssh_opts = {}, options = {}
@vmhostname = name_hash[:vmhostname]
@ip = name_hash[:ip]
@hostname = name_hash[:hostname]
@user = user
@ssh_opts = ssh_opts
@logger = options[:logger]
@options = options
@ssh_connection_preference = @options[:ssh_connection_preference]
end
def self.connect name_hash, user = 'root', ssh_opts = {}, options = {}
connection = new name_hash, user, ssh_opts, options
connection.connect
connection
end
def connect_block host, user, ssh_opts
try = 1
last_wait = 2
wait = 3
begin
@logger.debug "Attempting ssh connection to #{host}, user: #{user}, opts: #{ssh_opts}"
Net::SSH.start(host, user, ssh_opts)
rescue *RETRYABLE_EXCEPTIONS => e
if try <= 11
@logger.warn "Try #{try} -- Host #{host} unreachable: #{e.class.name} - #{e.message}"
@logger.warn "Trying again in #{wait} seconds"
sleep wait
(last_wait, wait) = wait, last_wait + wait
try += 1
retry
else
@logger.warn "Failed to connect to #{host}, after #{try} attempts"
nil
end
end
end
# connect to the host
def connect
# Try three ways to connect to host (vmhostname, ip, hostname)
# Try each method in turn until we succeed
methods = @ssh_connection_preference.dup
while (not @ssh) && (not methods.empty?) do
unless instance_variable_get("@#{methods[0]}").nil?
if SUPPORTED_CONNECTION_METHODS.include?(methods[0])
@ssh = connect_block(instance_variable_get("@#{methods[0].to_s}"), @user, @ssh_opts)
else
@logger.warn "Beaker does not support #{methods[0]} to SSH to host, trying next available method."
@ssh_connection_preference.delete(methods[0])
end
else
@logger.warn "Skipping #{methods[0]} method to ssh to host as its value is not set. Refer to https://github.com/puppetlabs/beaker/tree/master/docs/how_to/ssh_connection_preference.md to remove this warning"
end
methods.shift
end
unless @ssh
@logger.error "Failed to connect to #{@hostname}, attempted #{@ssh_connection_preference.join(', ')}"
raise RuntimeError, "Cannot connect to #{@hostname}"
end
@ssh
end
# closes this SshConnection
def close
begin
if @ssh and not @ssh.closed?
@ssh.close
else
@logger.warn("ssh.close: connection is already closed, no action needed")
end
rescue *RETRYABLE_EXCEPTIONS => e
@logger.warn "Attemped ssh.close, (caught #{e.class.name} - #{e.message})."
rescue => e
@logger.warn "ssh.close threw unexpected Error: #{e.class.name} - #{e.message}. Shutting down, and re-raising error below"
@ssh.shutdown!
raise e
ensure
@ssh = nil
@logger.debug("ssh connection to #{@hostname} has been terminated")
end
end
# Wait for the ssh connection to fail, returns true on connection failure and false otherwise
# @param [Hash{Symbol=>String}] options Options hash to control method conditionals
# @option options [Boolean] :pty Should we request a terminal when attempting
# to send a command over this connection?
# @option options [String] :stdin Any input to be sent along with the command
# @param [IO] stdout_callback An IO stream to send connection stdout to, defaults to nil
# @param [IO] stderr_callback An IO stream to send connection stderr to, defaults to nil
# @return [Boolean] true if connection failed, false otherwise
def wait_for_connection_failure options = {}, stdout_callback = nil, stderr_callback = stdout_callback
try = 1
last_wait = 2
wait = 3
command = 'echo echo' #can be run on all platforms (I'm looking at you, windows)
while try < 11
result = Result.new(@hostname, command)
begin
@logger.notify "Waiting for connection failure on #{@hostname} (attempt #{try}, try again in #{wait} second(s))"
@logger.debug("\n#{@hostname} #{Time.new.strftime('%H:%M:%S')}$ #{command}")
@ssh.open_channel do |channel|
request_terminal_for( channel, command ) if options[:pty]
channel.exec(command) do |terminal, success|
raise Net::SSH::Exception.new("FAILED: to execute command on a new channel on #{@hostname}") unless success
register_stdout_for terminal, result, stdout_callback
register_stderr_for terminal, result, stderr_callback
register_exit_code_for terminal, result
process_stdin_for( terminal, options[:stdin] ) if options[:stdin]
end
end
loop_tries = 0
#loop is actually loop_forever, so let it try 3 times and then quit instead of endless blocking
@ssh.loop { loop_tries += 1 ; loop_tries < 4 }
rescue *RETRYABLE_EXCEPTIONS => e
@logger.debug "Connection on #{@hostname} failed as expected (#{e.class.name} - #{e.message})"
close #this connection is bad, shut it down
return true
end
slept = 0
stdout_callback.call("sleep #{wait} second(s): ")
while slept < wait
sleep slept
stdout_callback.call('.')
slept += 1
end
stdout_callback.call("\n")
(last_wait, wait) = wait, last_wait + wait
try += 1
end
false
end
def try_to_execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
result = Result.new(@hostname, command)
@ssh.open_channel do |channel|
request_terminal_for( channel, command ) if options[:pty]
channel.exec(command) do |terminal, success|
raise Net::SSH::Exception.new("FAILED: to execute command on a new channel on #{@hostname}") unless success
register_stdout_for terminal, result, stdout_callback
register_stderr_for terminal, result, stderr_callback
register_exit_code_for terminal, result
process_stdin_for( terminal, options[:stdin] ) if options[:stdin]
end
end
# Process SSH activity until we stop doing that - which is when our
# channel is finished with...
begin
@ssh.loop
rescue *RETRYABLE_EXCEPTIONS => e
# this would indicate that the connection failed post execution, since the channel exec was successful
@logger.warn "ssh channel on #{@hostname} received exception post command execution #{e.class.name} - #{e.message}"
close
end
result.finalize!
@logger.last_result = result
result
end
def execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
# ensure that we have a current connection object
connect
try_to_execute(command, options, stdout_callback, stderr_callback)
end
def request_terminal_for channel, command
channel.request_pty do |ch, success|
if success
@logger.debug "Allocated a PTY on #{@hostname} for #{command.inspect}"
else
raise Net::SSH::Exception.new("FAILED: could not allocate a pty when requested on " +
"#{@hostname} for #{command.inspect}")
end
end
end
def register_stdout_for channel, output, callback = nil
channel.on_data do |ch, data|
callback[data] if callback
output.stdout << data
output.output << data
end
end
def register_stderr_for channel, output, callback = nil
channel.on_extended_data do |ch, type, data|
if type == 1
callback[data] if callback
output.stderr << data
output.output << data
end
end
end
def register_exit_code_for channel, output
channel.on_request("exit-status") do |ch, data|
output.exit_code = data.read_long
end
end
def process_stdin_for channel, stdin
# queue stdin data, force it to packets, and signal eof: this
# triggers action in many remote commands, notably including
# 'puppet apply'. It must be sent at some point before the rest
# of the action.
channel.send_data stdin.to_s
channel.process
channel.eof!
end
def scp_to source, target, options = {}
local_opts = options.dup
if local_opts[:recursive].nil?
local_opts[:recursive] = File.directory?(source)
end
local_opts[:chunk_size] ||= 16384
result = Result.new(@hostname, [source, target])
result.stdout = "\n"
begin
@ssh.scp.upload! source, target, local_opts do |ch, name, sent, total|
result.stdout << "\tcopying %s: %10d/%d\n" % [name, sent, total]
end
rescue => e
logger.warn "#{e.class} error in scp'ing. Forcing the connection to close, which should " <<
"raise an error."
close
end
# Setting these values allows reporting via result.log(test_name)
result.stdout << " SCP'ed file #{source} to #{@hostname}:#{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
return result
end
def scp_from source, target, options = {}
local_opts = options.dup
if local_opts[:recursive].nil?
local_opts[:recursive] = true
end
local_opts[:chunk_size] ||= 16384
result = Result.new(@hostname, [source, target])
result.stdout = "\n"
begin
@ssh.scp.download! source, target, local_opts do |ch, name, sent, total|
result.stdout << "\tcopying %s: %10d/%d\n" % [name, sent, total]
end
rescue => e
logger.warn "#{e.class} error in scp'ing. Forcing the connection to close, which should " <<
"raise an error."
close
end
# Setting these values allows reporting via result.log(test_name)
result.stdout << " SCP'ed file #{@hostname}:#{source} to #{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
result
end
end
end
| 1 | 16,250 | The `warn` message here seems to indicate that the forced closure of the SSH connection should raise an error; is that getting swallowed up somewhere and not raising? | voxpupuli-beaker | rb |
@@ -616,6 +616,8 @@ class FilenamePrompt(_BasePrompt):
self._init_texts(question)
self._init_key_label()
+ self._expands_user = False
+
self._lineedit = LineEdit(self)
if question.default:
self._lineedit.setText(question.default) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Showing prompts above the statusbar."""
import os.path
import html
import collections
import functools
import dataclasses
from typing import Deque, MutableSequence, Optional, cast
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QTimer, QDir, QModelIndex,
QItemSelectionModel, QObject, QEventLoop)
from PyQt5.QtWidgets import (QWidget, QGridLayout, QVBoxLayout, QLineEdit,
QLabel, QFileSystemModel, QTreeView, QSizePolicy,
QSpacerItem)
from qutebrowser.browser import downloads
from qutebrowser.config import config, configtypes, configexc, stylesheet
from qutebrowser.utils import usertypes, log, utils, qtutils, objreg, message
from qutebrowser.keyinput import modeman
from qutebrowser.api import cmdutils
from qutebrowser.utils import urlmatch
prompt_queue = cast('PromptQueue', None)
@dataclasses.dataclass
class AuthInfo:
"""Authentication info returned by a prompt."""
user: str
password: str
class Error(Exception):
"""Base class for errors in this module."""
class UnsupportedOperationError(Error):
"""Raised when the prompt class doesn't support the requested operation."""
class PromptQueue(QObject):
"""Global manager and queue for upcoming prompts.
The way in which multiple questions are handled deserves some explanation.
If a question is blocking, we *need* to ask it immediately, and can't wait
for previous questions to finish. We could theoretically ask a blocking
question inside of another blocking one, so in ask_question we simply save
the current question on the stack, let the user answer the *most recent*
question, and then restore the previous state.
With a non-blocking question, things are a bit easier. We simply add it to
self._queue if we're still busy handling another question, since it can be
answered at any time.
In either case, as soon as we finished handling a question, we call
_pop_later() which schedules a _pop to ask the next question in _queue. We
schedule it rather than doing it immediately because then the order of how
things happen is clear, e.g. on_mode_left can't happen after we already set
up the *new* question.
Attributes:
_shutting_down: Whether we're currently shutting down the prompter and
should ignore future questions to avoid segfaults.
_loops: A list of local EventLoops to spin in when blocking.
_queue: A deque of waiting questions.
_question: The current Question object if we're handling a question.
Signals:
show_prompts: Emitted with a Question object when prompts should be
shown.
"""
show_prompts = pyqtSignal(usertypes.Question)
def __init__(self, parent=None):
super().__init__(parent)
self._question = None
self._shutting_down = False
self._loops: MutableSequence[qtutils.EventLoop] = []
self._queue: Deque[usertypes.Question] = collections.deque()
message.global_bridge.mode_left.connect(self._on_mode_left)
def __repr__(self):
return utils.get_repr(self, loops=len(self._loops),
queue=len(self._queue), question=self._question)
def _pop_later(self):
"""Helper to call self._pop as soon as everything else is done."""
QTimer.singleShot(0, self._pop)
def _pop(self):
"""Pop a question from the queue and ask it, if there are any."""
log.prompt.debug("Popping from queue {}".format(self._queue))
if self._queue:
question = self._queue.popleft()
if not question.is_aborted:
# the question could already be aborted, e.g. by a cancelled
# download. See
# https://github.com/qutebrowser/qutebrowser/issues/415 and
# https://github.com/qutebrowser/qutebrowser/issues/1249
self.ask_question(question, blocking=False)
def shutdown(self):
"""Cancel all blocking questions.
Quits and removes all running event loops.
Return:
True if loops needed to be aborted,
False otherwise.
"""
log.prompt.debug("Shutting down with loops {}".format(self._loops))
self._shutting_down = True
if self._loops:
for loop in self._loops:
loop.quit()
loop.deleteLater()
return True
else:
return False
@pyqtSlot(usertypes.Question, bool)
def ask_question(self, question, blocking):
"""Display a prompt for a given question.
Args:
question: The Question object to ask.
blocking: If True, this function blocks and returns the result.
Return:
The answer of the user when blocking=True.
None if blocking=False.
"""
log.prompt.debug("Asking question {}, blocking {}, loops {}, queue "
"{}".format(question, blocking, self._loops,
self._queue))
if self._shutting_down:
# If we're currently shutting down we have to ignore this question
# to avoid segfaults - see
# https://github.com/qutebrowser/qutebrowser/issues/95
log.prompt.debug("Ignoring question because we're shutting down.")
question.abort()
return None
if self._question is not None and not blocking:
# We got an async question, but we're already busy with one, so we
# just queue it up for later.
log.prompt.debug("Adding {} to queue.".format(question))
self._queue.append(question)
return None
if blocking:
# If we're blocking we save the old question on the stack, so we
# can restore it after exec, if exec gets called multiple times.
log.prompt.debug("New question is blocking, saving {}".format(
self._question))
old_question = self._question
if old_question is not None:
old_question.interrupted = True
self._question = question
self.show_prompts.emit(question)
if blocking:
loop = qtutils.EventLoop()
self._loops.append(loop)
loop.destroyed.connect(lambda: self._loops.remove(loop))
question.completed.connect(loop.quit)
question.completed.connect(loop.deleteLater)
log.prompt.debug("Starting loop.exec() for {}".format(question))
flags = cast(QEventLoop.ProcessEventsFlags,
QEventLoop.ExcludeSocketNotifiers)
loop.exec(flags)
log.prompt.debug("Ending loop.exec() for {}".format(question))
log.prompt.debug("Restoring old question {}".format(old_question))
self._question = old_question
self.show_prompts.emit(old_question)
if old_question is None:
# Nothing left to restore, so we can go back to popping async
# questions.
if self._queue:
self._pop_later()
return question.answer
else:
question.completed.connect(self._pop_later)
return None
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
"""Abort question when a prompt mode was left."""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
if self._question is None:
return
log.prompt.debug("Left mode {}, hiding {}".format(
mode, self._question))
self.show_prompts.emit(None)
if self._question.answer is None and not self._question.is_aborted:
log.prompt.debug("Cancelling {} because {} was left".format(
self._question, mode))
self._question.cancel()
self._question = None
class PromptContainer(QWidget):
"""Container for prompts to be shown above the statusbar.
This is a per-window object, however each window shows the same prompt.
Attributes:
_layout: The layout used to show prompts in.
_win_id: The window ID this object is associated with.
Signals:
update_geometry: Emitted when the geometry should be updated.
"""
STYLESHEET = """
QWidget#PromptContainer {
{% if conf.statusbar.position == 'top' %}
border-bottom-left-radius: {{ conf.prompt.radius }}px;
border-bottom-right-radius: {{ conf.prompt.radius }}px;
{% else %}
border-top-left-radius: {{ conf.prompt.radius }}px;
border-top-right-radius: {{ conf.prompt.radius }}px;
{% endif %}
}
QWidget {
font: {{ conf.fonts.prompts }};
color: {{ conf.colors.prompts.fg }};
background-color: {{ conf.colors.prompts.bg }};
}
QLineEdit {
border: {{ conf.colors.prompts.border }};
}
QTreeView {
selection-color: {{ conf.colors.prompts.selected.fg }};
selection-background-color: {{ conf.colors.prompts.selected.bg }};
border: {{ conf.colors.prompts.border }};
}
QTreeView::branch {
background-color: {{ conf.colors.prompts.bg }};
}
QTreeView::item:selected, QTreeView::item:selected:hover,
QTreeView::branch:selected {
color: {{ conf.colors.prompts.selected.fg }};
background-color: {{ conf.colors.prompts.selected.bg }};
}
"""
update_geometry = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._layout = QVBoxLayout(self)
self._layout.setContentsMargins(10, 10, 10, 10)
self._win_id = win_id
self._prompt: Optional[_BasePrompt] = None
self.setObjectName('PromptContainer')
self.setAttribute(Qt.WA_StyledBackground, True)
stylesheet.set_register(self)
message.global_bridge.prompt_done.connect(self._on_prompt_done)
prompt_queue.show_prompts.connect(self._on_show_prompts)
message.global_bridge.mode_left.connect(self._on_global_mode_left)
def __repr__(self):
return utils.get_repr(self, win_id=self._win_id)
@pyqtSlot(usertypes.Question)
def _on_show_prompts(self, question):
"""Show a prompt for the given question.
Args:
question: A Question object or None.
"""
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting old prompt {}".format(widget))
widget.hide()
widget.deleteLater()
if question is None:
log.prompt.debug("No prompts left, hiding prompt container.")
self._prompt = None
self.hide()
return
classes = {
usertypes.PromptMode.yesno: YesNoPrompt,
usertypes.PromptMode.text: LineEditPrompt,
usertypes.PromptMode.user_pwd: AuthenticationPrompt,
usertypes.PromptMode.download: DownloadFilenamePrompt,
usertypes.PromptMode.alert: AlertPrompt,
}
klass = classes[question.mode]
prompt = klass(question)
log.prompt.debug("Displaying prompt {}".format(prompt))
self._prompt = prompt
# If this question was interrupted, we already connected the signal
if not question.interrupted:
question.aborted.connect(
functools.partial(self._on_aborted, prompt.KEY_MODE))
modeman.enter(self._win_id, prompt.KEY_MODE, 'question asked')
self.setSizePolicy(prompt.sizePolicy())
self._layout.addWidget(prompt)
prompt.show()
self.show()
prompt.setFocus()
self.update_geometry.emit()
@pyqtSlot()
def _on_aborted(self, key_mode):
"""Leave KEY_MODE whenever a prompt is aborted."""
try:
modeman.leave(self._win_id, key_mode, 'aborted', maybe=True)
except objreg.RegistryUnavailableError:
# window was deleted: ignore
pass
@pyqtSlot(usertypes.KeyMode)
def _on_prompt_done(self, key_mode):
"""Leave the prompt mode in this window if a question was answered."""
modeman.leave(self._win_id, key_mode, ':prompt-accept', maybe=True)
@pyqtSlot(usertypes.KeyMode)
def _on_global_mode_left(self, mode):
"""Leave prompt/yesno mode in this window if it was left elsewhere.
This ensures no matter where a prompt was answered, we leave the prompt
mode and dispose of the prompt object in every window.
"""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
modeman.leave(self._win_id, mode, 'left in other window', maybe=True)
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting prompt {}".format(widget))
widget.hide()
widget.deleteLater()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno])
def prompt_accept(self, value=None, *, save=False):
"""Accept the current prompt.
//
This executes the next action depending on the question mode, e.g. asks
for the password or leaves the mode.
Args:
value: If given, uses this value instead of the entered one.
For boolean prompts, "yes"/"no" are accepted as value.
save: Save the value to the config.
"""
assert self._prompt is not None
question = self._prompt.question
try:
done = self._prompt.accept(value, save=save)
except Error as e:
raise cmdutils.CommandError(str(e))
if done:
message.global_bridge.prompt_done.emit(self._prompt.KEY_MODE)
question.done()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt], maxsplit=0)
def prompt_open_download(self, cmdline: str = None,
pdfjs: bool = False) -> None:
"""Immediately open a download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
pdfjs: Open the download via PDF.js.
"""
assert self._prompt is not None
try:
self._prompt.download_open(cmdline, pdfjs=pdfjs)
except UnsupportedOperationError:
pass
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt])
@cmdutils.argument('which', choices=['next', 'prev'])
def prompt_item_focus(self, which):
"""Shift the focus of the prompt file completion menu to another item.
Args:
which: 'next', 'prev'
"""
assert self._prompt is not None
try:
self._prompt.item_focus(which)
except UnsupportedOperationError:
pass
@cmdutils.register(
instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno])
def prompt_yank(self, sel=False):
"""Yank URL to clipboard or primary selection.
Args:
sel: Use the primary selection instead of the clipboard.
"""
assert self._prompt is not None
question = self._prompt.question
if question.url is None:
message.error('No URL found.')
return
if sel and utils.supports_selection():
target = 'primary selection'
else:
sel = False
target = 'clipboard'
utils.set_clipboard(question.url, sel)
message.info("Yanked to {}: {}".format(target, question.url))
class LineEdit(QLineEdit):
"""A line edit used in prompts."""
def __init__(self, parent=None):
super().__init__(parent)
self.setStyleSheet("""
QLineEdit {
background-color: transparent;
}
""")
self.setAttribute(Qt.WA_MacShowFocusRect, False)
def keyPressEvent(self, e):
"""Override keyPressEvent to paste primary selection on Shift + Ins."""
if e.key() == Qt.Key_Insert and e.modifiers() == Qt.ShiftModifier:
try:
text = utils.get_clipboard(selection=True, fallback=True)
except utils.ClipboardError: # pragma: no cover
e.ignore()
else:
e.accept()
self.insert(text)
return
super().keyPressEvent(e)
def __repr__(self):
return utils.get_repr(self)
class _BasePrompt(QWidget):
"""Base class for all prompts."""
KEY_MODE = usertypes.KeyMode.prompt
def __init__(self, question, parent=None):
super().__init__(parent)
self.question = question
self._vbox = QVBoxLayout(self)
self._vbox.setSpacing(15)
self._key_grid = None
def __repr__(self):
return utils.get_repr(self, question=self.question, constructor=True)
def _init_texts(self, question):
assert question.title is not None, question
title = '<font size="4"><b>{}</b></font>'.format(
html.escape(question.title))
title_label = QLabel(title, self)
self._vbox.addWidget(title_label)
if question.text is not None:
# Not doing any HTML escaping here as the text can be formatted
text_label = QLabel(question.text)
text_label.setWordWrap(True)
text_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self._vbox.addWidget(text_label)
def _init_key_label(self):
assert self._key_grid is None, self._key_grid
self._key_grid = QGridLayout()
self._key_grid.setVerticalSpacing(0)
all_bindings = config.key_instance.get_reverse_bindings_for(
self.KEY_MODE.name)
labels = []
for cmd, text in self._allowed_commands():
bindings = all_bindings.get(cmd, [])
if bindings:
binding = None
preferred = ['<enter>', '<escape>']
for pref in preferred:
if pref in bindings:
binding = pref
if binding is None:
binding = bindings[0]
key_label = QLabel('<b>{}</b>'.format(html.escape(binding)))
text_label = QLabel(text)
labels.append((key_label, text_label))
for i, (key_label, text_label) in enumerate(labels):
self._key_grid.addWidget(key_label, i, 0)
self._key_grid.addWidget(text_label, i, 1)
spacer = QSpacerItem(0, 0, QSizePolicy.Expanding)
self._key_grid.addItem(spacer, 0, 2)
self._vbox.addLayout(self._key_grid)
def _check_save_support(self, save):
if save:
raise UnsupportedOperationError("Saving answers is only possible "
"with yes/no prompts.")
def accept(self, value=None, save=False):
raise NotImplementedError
def download_open(self, cmdline, pdfjs):
"""Open the download directly if this is a download prompt."""
utils.unused(cmdline)
utils.unused(pdfjs)
raise UnsupportedOperationError
def item_focus(self, _which):
"""Switch to next file item if this is a filename prompt.."""
raise UnsupportedOperationError
def _allowed_commands(self):
"""Get the commands we could run as response to this message."""
raise NotImplementedError
class LineEditPrompt(_BasePrompt):
"""A prompt for a single text value."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._lineedit = LineEdit(self)
self._init_texts(question)
self._vbox.addWidget(self._lineedit)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.selectAll()
self.setFocusProxy(self._lineedit)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
self.question.answer = text
return True
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class FilenamePrompt(_BasePrompt):
"""A prompt for a filename."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
self._lineedit = LineEdit(self)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.textEdited.connect(self._set_fileview_root)
self._vbox.addWidget(self._lineedit)
self.setFocusProxy(self._lineedit)
self._init_fileview()
self._set_fileview_root(question.default)
if config.val.prompt.filebrowser:
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._to_complete = ''
self._root_index = QModelIndex()
def _directories_hide_show_model(self):
"""Get rid of non-matching directories."""
num_rows = self._file_model.rowCount(self._root_index)
for row in range(num_rows):
index = self._file_model.index(row, 0, self._root_index)
filename = index.data()
hidden = self._to_complete not in filename and filename != '..'
self._file_view.setRowHidden(index.row(), index.parent(), hidden)
@pyqtSlot(str)
def _set_fileview_root(self, path, *, tabbed=False):
"""Set the root path for the file display."""
separators = os.sep
if os.altsep is not None:
separators += os.altsep
dirname = os.path.dirname(path)
basename = os.path.basename(path)
if not tabbed:
self._to_complete = ''
try:
if not path:
pass
elif path in separators and os.path.isdir(path):
# Input "/" -> don't strip anything
pass
elif path[-1] in separators and os.path.isdir(path):
# Input like /foo/bar/ -> show /foo/bar/ contents
path = path.rstrip(separators)
elif os.path.isdir(dirname) and not tabbed:
# Input like /foo/ba -> show /foo contents
path = dirname
self._to_complete = basename
else:
return
except OSError:
log.prompt.exception("Failed to get directory information")
return
self._root_index = self._file_model.setRootPath(path)
self._file_view.setRootIndex(self._root_index)
self._directories_hide_show_model()
@pyqtSlot(QModelIndex)
def _insert_path(self, index, *, clicked=True):
"""Handle an element selection.
Args:
index: The QModelIndex of the selected element.
clicked: Whether the element was clicked.
"""
if index == QModelIndex():
path = os.path.join(self._file_model.rootPath(), self._to_complete)
else:
path = os.path.normpath(self._file_model.filePath(index))
if clicked:
path += os.sep
else:
# On Windows, when we have C:\foo and tab over .., we get C:\
path = path.rstrip(os.sep)
log.prompt.debug('Inserting path {}'.format(path))
self._lineedit.setText(path)
self._lineedit.setFocus()
self._set_fileview_root(path, tabbed=True)
if clicked:
# Avoid having a ..-subtree highlighted
self._file_view.setCurrentIndex(QModelIndex())
def _init_fileview(self):
self._file_view = QTreeView(self)
self._file_model = QFileSystemModel(self)
self._file_view.setModel(self._file_model)
self._file_view.clicked.connect(self._insert_path)
if config.val.prompt.filebrowser:
self._vbox.addWidget(self._file_view)
else:
self._file_view.hide()
# Only show name
self._file_view.setHeaderHidden(True)
for col in range(1, 4):
self._file_view.setColumnHidden(col, True)
# Nothing selected initially
self._file_view.setCurrentIndex(QModelIndex())
# The model needs to be sorted so we get the correct first/last index
self._file_model.directoryLoaded.connect(
lambda: self._file_model.sort(0))
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
text = downloads.transform_path(text)
if text is None:
message.error("Invalid filename")
return False
self.question.answer = text
return True
def item_focus(self, which):
# This duplicates some completion code, but I don't see a nicer way...
assert which in ['prev', 'next'], which
selmodel = self._file_view.selectionModel()
parent = self._file_view.rootIndex()
first_index = self._file_model.index(0, 0, parent)
row = self._file_model.rowCount(parent) - 1
last_index = self._file_model.index(row, 0, parent)
if not first_index.isValid():
# No entries
return
assert last_index.isValid()
idx = selmodel.currentIndex()
if not idx.isValid():
# No item selected yet
idx = last_index if which == 'prev' else first_index
elif which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid():
idx = last_index if which == 'prev' else first_index
idx = self._do_completion(idx, which)
selmodel.setCurrentIndex(
idx,
QItemSelectionModel.ClearAndSelect | # type: ignore[arg-type]
QItemSelectionModel.Rows)
self._insert_path(idx, clicked=False)
def _do_completion(self, idx, which):
while idx.isValid() and self._file_view.isIndexHidden(idx):
if which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
return idx
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class DownloadFilenamePrompt(FilenamePrompt):
"""A prompt for a filename for downloads."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._file_model.setFilter(
QDir.AllDirs | QDir.Drives | QDir.NoDot) # type: ignore[arg-type]
def accept(self, value=None, save=False):
done = super().accept(value, save)
answer = self.question.answer
if answer is not None:
self.question.answer = downloads.FileDownloadTarget(answer)
return done
def download_open(self, cmdline, pdfjs):
if pdfjs:
target: 'downloads._DownloadTarget' = downloads.PDFJSDownloadTarget()
else:
target = downloads.OpenFileDownloadTarget(cmdline)
self.question.answer = target
self.question.done()
message.global_bridge.prompt_done.emit(self.KEY_MODE)
def _allowed_commands(self):
cmds = [
('prompt-accept', 'Accept'),
('mode-leave', 'Abort'),
('prompt-open-download', "Open download"),
('prompt-open-download --pdfjs', "Open download via PDF.js"),
('prompt-yank', "Yank URL"),
]
return cmds
class AuthenticationPrompt(_BasePrompt):
"""A prompt for username/password."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
user_label = QLabel("Username:", self)
self._user_lineedit = LineEdit(self)
password_label = QLabel("Password:", self)
self._password_lineedit = LineEdit(self)
self._password_lineedit.setEchoMode(QLineEdit.Password)
grid = QGridLayout()
grid.addWidget(user_label, 1, 0)
grid.addWidget(self._user_lineedit, 1, 1)
grid.addWidget(password_label, 2, 0)
grid.addWidget(self._password_lineedit, 2, 1)
self._vbox.addLayout(grid)
self._init_key_label()
assert not question.default, question.default
self.setFocusProxy(self._user_lineedit)
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
if ':' not in value:
raise Error("Value needs to be in the format "
"username:password, but {} was given".format(
value))
username, password = value.split(':', maxsplit=1)
self.question.answer = AuthInfo(username, password)
return True
elif self._user_lineedit.hasFocus():
# Earlier, tab was bound to :prompt-accept, so to still support
# that we simply switch the focus when tab was pressed.
self._password_lineedit.setFocus()
return False
else:
self.question.answer = AuthInfo(self._user_lineedit.text(),
self._password_lineedit.text())
return True
def item_focus(self, which):
"""Support switching between fields with tab."""
assert which in ['prev', 'next'], which
if which == 'next' and self._user_lineedit.hasFocus():
self._password_lineedit.setFocus()
elif which == 'prev' and self._password_lineedit.hasFocus():
self._user_lineedit.setFocus()
def _allowed_commands(self):
return [('prompt-accept', "Accept"),
('mode-leave', "Abort")]
class YesNoPrompt(_BasePrompt):
"""A prompt with yes/no answers."""
KEY_MODE = usertypes.KeyMode.yesno
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def _check_save_support(self, save):
if save and self.question.option is None:
raise Error("No setting available to save the answer for this "
"question.")
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is None:
if self.question.default is None:
raise Error("No default value was set for this question!")
self.question.answer = self.question.default
elif value == 'yes':
self.question.answer = True
elif value == 'no':
self.question.answer = False
else:
raise Error("Invalid value {} - expected yes/no!".format(value))
if save:
opt = config.instance.get_opt(self.question.option)
assert isinstance(opt.typ, configtypes.Bool)
pattern = urlmatch.UrlPattern(self.question.url)
try:
config.instance.set_obj(opt.name, self.question.answer,
pattern=pattern, save_yaml=True)
except configexc.Error as e:
raise Error(str(e))
return True
def _allowed_commands(self):
cmds = []
cmds.append(('prompt-accept yes', "Yes"))
if self.question.option is not None:
cmds.append(('prompt-accept --save yes', "Always"))
cmds.append(('prompt-accept no', "No"))
if self.question.option is not None:
cmds.append(('prompt-accept --save no', "Never"))
if self.question.default is not None:
assert self.question.default in [True, False]
default = 'yes' if self.question.default else 'no'
cmds.append(('prompt-accept', "Use default ({})".format(default)))
cmds.append(('mode-leave', "Abort"))
cmds.append(('prompt-yank', "Yank URL"))
return cmds
class AlertPrompt(_BasePrompt):
"""A prompt without any answer possibility."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
raise Error("No value is permitted with alert prompts!")
# Simply mark prompt as done without setting self.question.answer
return True
def _allowed_commands(self):
return [('prompt-accept', "Hide")]
def init():
"""Initialize global prompt objects."""
global prompt_queue
prompt_queue = PromptQueue()
message.global_bridge.ask_question.connect( # type: ignore[call-arg]
prompt_queue.ask_question, Qt.DirectConnection)
| 1 | 26,360 | I wonder if this is a fitting name. Why "expands user"? Wouldn't be `user_expanded` or so be more fitting? | qutebrowser-qutebrowser | py |
@@ -28,10 +28,11 @@ var axe = axe || { utils: {} };
*/
function virtualDOMfromNode(node, shadowId) {
const vNodeCache = {};
- return {
+ const vNode = {
shadowId: shadowId,
children: [],
actualNode: node,
+ _isHidden: null, // will be populated by axe.utils.isHidden
get isFocusable() {
if (!vNodeCache._isFocusable) {
vNodeCache._isFocusable = axe.commons.dom.isFocusable(node); | 1 | /*eslint no-use-before-define: 0*/
var axe = axe || { utils: {} };
/**
* This implemnts the flatten-tree algorithm specified:
* Originally here https://drafts.csswg.org/css-scoping/#flat-tree
* Hopefully soon published here: https://www.w3.org/TR/css-scoping-1/#flat-tree
*
* Some notable information:
******* NOTE: as of Chrome 59, this is broken in Chrome so that tests fail completely
******* removed functionality for now
* 1. <slot> elements do not have boxes by default (i.e. they do not get rendered and
* their CSS properties are ignored)
* 2. <slot> elements can be made to have a box by overriding the display property
* which is 'contents' by default
* 3. Even boxed <slot> elements do not show up in the accessibility tree until
* they have a tabindex applied to them OR they have a role applied to them AND
* they have a box (this is observed behavior in Safari on OS X, I cannot find
* the spec for this)
*/
/**
* Wrap the real node and provide list of the flattened children
*
* @param node {Node} - the node in question
* @param shadowId {String} - the ID of the shadow DOM to which this node belongs
* @return {Object} - the wrapped node
*/
function virtualDOMfromNode(node, shadowId) {
const vNodeCache = {};
return {
shadowId: shadowId,
children: [],
actualNode: node,
get isFocusable() {
if (!vNodeCache._isFocusable) {
vNodeCache._isFocusable = axe.commons.dom.isFocusable(node);
}
return vNodeCache._isFocusable;
},
get tabbableElements() {
if (!vNodeCache._tabbableElements) {
vNodeCache._tabbableElements = axe.commons.dom.getTabbableElements(
this
);
}
return vNodeCache._tabbableElements;
}
};
}
/**
* find all the fallback content for a <slot> and return these as an array
* this array will also include any #text nodes
*
* @param node {Node} - the slot Node
* @return Array{Nodes}
*/
function getSlotChildren(node) {
var retVal = [];
node = node.firstChild;
while (node) {
retVal.push(node);
node = node.nextSibling;
}
return retVal;
}
/**
* Recursvely returns an array of the virtual DOM nodes at this level
* excluding comment nodes and the shadow DOM nodes <content> and <slot>
*
* @param {Node} node the current node
* @param {String} shadowId, optional ID of the shadow DOM that is the closest shadow
* ancestor of the node
*/
axe.utils.getFlattenedTree = function(node, shadowId) {
// using a closure here and therefore cannot easily refactor toreduce the statements
/*eslint max-statements: ["error", 31] */
var retVal, realArray, nodeName;
function reduceShadowDOM(res, child) {
var replacements = axe.utils.getFlattenedTree(child, shadowId);
if (replacements) {
res = res.concat(replacements);
}
return res;
}
if (node.documentElement) {
// document
node = node.documentElement;
}
nodeName = node.nodeName.toLowerCase();
if (axe.utils.isShadowRoot(node)) {
// generate an ID for this shadow root and overwrite the current
// closure shadowId with this value so that it cascades down the tree
retVal = virtualDOMfromNode(node, shadowId);
shadowId =
'a' +
Math.random()
.toString()
.substring(2);
realArray = Array.from(node.shadowRoot.childNodes);
retVal.children = realArray.reduce(reduceShadowDOM, []);
return [retVal];
} else {
if (nodeName === 'content') {
realArray = Array.from(node.getDistributedNodes());
return realArray.reduce(reduceShadowDOM, []);
} else if (
nodeName === 'slot' &&
typeof node.assignedNodes === 'function'
) {
realArray = Array.from(node.assignedNodes());
if (!realArray.length) {
// fallback content
realArray = getSlotChildren(node);
}
var styl = window.getComputedStyle(node);
// check the display property
if (false && styl.display !== 'contents') {
// intentionally commented out
// has a box
retVal = virtualDOMfromNode(node, shadowId);
retVal.children = realArray.reduce(reduceShadowDOM, []);
return [retVal];
} else {
return realArray.reduce(reduceShadowDOM, []);
}
} else {
if (node.nodeType === 1) {
retVal = virtualDOMfromNode(node, shadowId);
realArray = Array.from(node.childNodes);
retVal.children = realArray.reduce(reduceShadowDOM, []);
return [retVal];
} else if (node.nodeType === 3) {
// text
return [virtualDOMfromNode(node)];
}
return undefined;
}
}
};
/**
* Recursively return a single node from a virtual dom tree
*
* @param {Object} vNode The flattened, virtual DOM tree
* @param {Node} node The HTML DOM node
*/
axe.utils.getNodeFromTree = function(vNode, node) {
var found;
if (vNode.actualNode === node) {
return vNode;
}
vNode.children.forEach(candidate => {
if (found) {
return;
}
if (candidate.actualNode === node) {
found = candidate;
} else {
found = axe.utils.getNodeFromTree(candidate, node);
}
});
return found;
};
| 1 | 14,375 | I see what you are doing here, but to stay with the `getter/setter` pattern, should we introduce `set isHidden(value)` & `get isHidden()` which them maintains `_isHidden` with in `vNodeCache`. This will avoid what looks like accessing an internal property like `_isHidden` from `axe.utils.isHidden` & keeps things neat. | dequelabs-axe-core | js |
@@ -41,6 +41,7 @@ public class CSharpBasicPackageTransformer implements ModelToViewTransformer<Pro
"csharp/gapic_snippets_csproj.snip";
private static final String UNITTEST_CSPROJ_TEMPLATE_FILENAME =
"csharp/gapic_unittest_csproj.snip";
+ private static final String SAMPLE_CSPROJ_TEMPLATE_FILENAME = "csharp/gapic_samples_csproj.snip";
private static final CSharpAliasMode ALIAS_MODE = CSharpAliasMode.MessagesOnly;
| 1 | /* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.csharp;
import com.google.api.codegen.config.GapicInterfaceContext;
import com.google.api.codegen.config.GapicProductConfig;
import com.google.api.codegen.config.InterfaceModel;
import com.google.api.codegen.config.ProtoApiModel;
import com.google.api.codegen.gapic.GapicCodePathMapper;
import com.google.api.codegen.transformer.FileHeaderTransformer;
import com.google.api.codegen.transformer.ModelToViewTransformer;
import com.google.api.codegen.transformer.StandardImportSectionTransformer;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.csharp.CSharpAliasMode;
import com.google.api.codegen.viewmodel.ViewModel;
import com.google.api.codegen.viewmodel.metadata.SimpleInitFileView;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Predicate;
/* Transforms a ProtoApiModel into the smoke tests of an API for C#. */
public class CSharpBasicPackageTransformer implements ModelToViewTransformer<ProtoApiModel> {
private static final String SMOKETEST_CSPROJ_TEMPLATE_FILENAME =
"csharp/gapic_smoketest_csproj.snip";
private static final String SNIPPETS_CSPROJ_TEMPLATE_FILENAME =
"csharp/gapic_snippets_csproj.snip";
private static final String UNITTEST_CSPROJ_TEMPLATE_FILENAME =
"csharp/gapic_unittest_csproj.snip";
private static final CSharpAliasMode ALIAS_MODE = CSharpAliasMode.MessagesOnly;
private final GapicCodePathMapper pathMapper;
private final String templateFilename;
private final String fileBaseSuffix;
private final Predicate<GapicInterfaceContext> shouldGenerateFn;
private final CSharpCommonTransformer csharpCommonTransformer = new CSharpCommonTransformer();
private final FileHeaderTransformer fileHeaderTransformer =
new FileHeaderTransformer(new StandardImportSectionTransformer());
private CSharpBasicPackageTransformer(
GapicCodePathMapper pathMapper,
String templateFilename,
String fileBaseSuffix,
Predicate<GapicInterfaceContext> shouldGenerateFn) {
this.pathMapper = pathMapper;
this.templateFilename = templateFilename;
this.fileBaseSuffix = fileBaseSuffix;
this.shouldGenerateFn = shouldGenerateFn;
}
public static CSharpBasicPackageTransformer forSmokeTests(GapicCodePathMapper pathMapper) {
return new CSharpBasicPackageTransformer(
pathMapper,
SMOKETEST_CSPROJ_TEMPLATE_FILENAME,
".SmokeTests.csproj",
shouldGenSmokeTestPackage());
}
public static CSharpBasicPackageTransformer forSnippets(GapicCodePathMapper pathMapper) {
return new CSharpBasicPackageTransformer(
pathMapper, SNIPPETS_CSPROJ_TEMPLATE_FILENAME, ".Snippets.csproj", parameter -> true);
}
public static CSharpBasicPackageTransformer forUnitTests(GapicCodePathMapper pathMapper) {
return new CSharpBasicPackageTransformer(
pathMapper, UNITTEST_CSPROJ_TEMPLATE_FILENAME, ".Tests.csproj", parameter -> true);
}
private static Predicate<GapicInterfaceContext> shouldGenSmokeTestPackage() {
return parameter -> parameter.getInterfaceConfig().getSmokeTestConfig() != null;
}
@Override
public List<ViewModel> transform(ProtoApiModel model, GapicProductConfig productConfig) {
List<ViewModel> surfaceDocs = new ArrayList<>();
SurfaceNamer namer = new CSharpSurfaceNamer(productConfig.getPackageName(), ALIAS_MODE);
for (InterfaceModel apiInterface : model.getInterfaces(productConfig)) {
if (!productConfig.hasInterfaceConfig(apiInterface)) {
continue;
}
GapicInterfaceContext context =
GapicInterfaceContext.create(
apiInterface,
productConfig,
csharpCommonTransformer.createTypeTable(namer.getPackageName(), ALIAS_MODE),
namer,
new CSharpFeatureConfig());
if (shouldGenerateFn.test(context)) {
surfaceDocs.add(generateCsproj(context));
}
}
return surfaceDocs;
}
@Override
public List<String> getTemplateFileNames() {
return Arrays.asList(templateFilename);
}
private SimpleInitFileView generateCsproj(GapicInterfaceContext context) {
GapicProductConfig productConfig = context.getProductConfig();
String outputPath =
pathMapper.getOutputPath(context.getInterface().getFullName(), productConfig);
return SimpleInitFileView.create(
templateFilename,
outputPath + File.separator + productConfig.getPackageName() + fileBaseSuffix,
fileHeaderTransformer.generateFileHeader(context));
}
}
| 1 | 29,985 | nit: for consistency, have this line and the previous formatted similarly | googleapis-gapic-generator | java |
@@ -165,6 +165,9 @@ public class PrivacyParameters {
private PrivacyStorageProvider storageProvider;
private EnclaveFactory enclaveFactory;
private boolean multiTenancyEnabled;
+ private Path orionKeyStoreFile;
+ private Path orionKeyStorePasswordFile;
+ private Path orionClientWhitelistFile;
public Builder setPrivacyAddress(final Integer privacyAddress) {
this.privacyAddress = privacyAddress; | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.core;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.hyperledger.besu.crypto.KeyPairUtil;
import org.hyperledger.besu.crypto.SECP256K1;
import org.hyperledger.besu.enclave.Enclave;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.ethereum.privacy.storage.PrivacyStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.PrivateStateStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Path;
import java.util.Base64;
import java.util.Optional;
import com.google.common.io.Files;
public class PrivacyParameters {
public static final URI DEFAULT_ENCLAVE_URL = URI.create("http://localhost:8888");
public static final PrivacyParameters DEFAULT = new PrivacyParameters();
private Integer privacyAddress = Address.PRIVACY;
private boolean enabled;
private URI enclaveUri;
private String enclavePublicKey;
private File enclavePublicKeyFile;
private Optional<SECP256K1.KeyPair> signingKeyPair = Optional.empty();
private Enclave enclave;
private PrivacyStorageProvider privateStorageProvider;
private WorldStateArchive privateWorldStateArchive;
private PrivateStateStorage privateStateStorage;
private boolean multiTenancyEnabled;
public Integer getPrivacyAddress() {
return privacyAddress;
}
public void setPrivacyAddress(final Integer privacyAddress) {
this.privacyAddress = privacyAddress;
}
public Boolean isEnabled() {
return enabled;
}
public void setEnabled(final boolean enabled) {
this.enabled = enabled;
}
public URI getEnclaveUri() {
return enclaveUri;
}
public void setEnclaveUri(final URI enclaveUri) {
this.enclaveUri = enclaveUri;
}
public String getEnclavePublicKey() {
return enclavePublicKey;
}
public void setEnclavePublicKey(final String enclavePublicKey) {
this.enclavePublicKey = enclavePublicKey;
}
public File getEnclavePublicKeyFile() {
return enclavePublicKeyFile;
}
public void setEnclavePublicKeyFile(final File enclavePublicKeyFile) {
this.enclavePublicKeyFile = enclavePublicKeyFile;
}
public Optional<SECP256K1.KeyPair> getSigningKeyPair() {
return signingKeyPair;
}
public void setSigningKeyPair(final SECP256K1.KeyPair signingKeyPair) {
this.signingKeyPair = Optional.ofNullable(signingKeyPair);
}
public WorldStateArchive getPrivateWorldStateArchive() {
return privateWorldStateArchive;
}
public void setPrivateWorldStateArchive(final WorldStateArchive privateWorldStateArchive) {
this.privateWorldStateArchive = privateWorldStateArchive;
}
public PrivacyStorageProvider getPrivateStorageProvider() {
return privateStorageProvider;
}
public void setPrivateStorageProvider(final PrivacyStorageProvider privateStorageProvider) {
this.privateStorageProvider = privateStorageProvider;
}
public PrivateStateStorage getPrivateStateStorage() {
return privateStateStorage;
}
public void setPrivateStateStorage(final PrivateStateStorage privateStateStorage) {
this.privateStateStorage = privateStateStorage;
}
public Enclave getEnclave() {
return enclave;
}
public void setEnclave(final Enclave enclave) {
this.enclave = enclave;
}
private void setMultiTenancyEnabled(final boolean multiTenancyEnabled) {
this.multiTenancyEnabled = multiTenancyEnabled;
}
public boolean isMultiTenancyEnabled() {
return multiTenancyEnabled;
}
@Override
public String toString() {
return "PrivacyParameters{"
+ "enabled="
+ enabled
+ ", multiTenancyEnabled = "
+ multiTenancyEnabled
+ ", enclaveUri='"
+ enclaveUri
+ '\''
+ '}';
}
public static class Builder {
private boolean enabled;
private URI enclaveUrl;
private Integer privacyAddress = Address.PRIVACY;
private File enclavePublicKeyFile;
private String enclavePublicKey;
private Path privateKeyPath;
private PrivacyStorageProvider storageProvider;
private EnclaveFactory enclaveFactory;
private boolean multiTenancyEnabled;
public Builder setPrivacyAddress(final Integer privacyAddress) {
this.privacyAddress = privacyAddress;
return this;
}
public Builder setEnclaveUrl(final URI enclaveUrl) {
this.enclaveUrl = enclaveUrl;
return this;
}
public Builder setEnabled(final boolean enabled) {
this.enabled = enabled;
return this;
}
public Builder setStorageProvider(final PrivacyStorageProvider privateStorageProvider) {
this.storageProvider = privateStorageProvider;
return this;
}
public Builder setPrivateKeyPath(final Path privateKeyPath) {
this.privateKeyPath = privateKeyPath;
return this;
}
public Builder setEnclaveFactory(final EnclaveFactory enclaveFactory) {
this.enclaveFactory = enclaveFactory;
return this;
}
public Builder setMultiTenancyEnabled(final boolean multiTenancyEnabled) {
this.multiTenancyEnabled = multiTenancyEnabled;
return this;
}
public PrivacyParameters build() {
final PrivacyParameters config = new PrivacyParameters();
if (enabled) {
final WorldStateStorage privateWorldStateStorage =
storageProvider.createWorldStateStorage();
final WorldStatePreimageStorage privatePreimageStorage =
storageProvider.createWorldStatePreimageStorage();
final WorldStateArchive privateWorldStateArchive =
new WorldStateArchive(privateWorldStateStorage, privatePreimageStorage);
final PrivateStateStorage privateStateStorage = storageProvider.createPrivateStateStorage();
config.setPrivateWorldStateArchive(privateWorldStateArchive);
config.setEnclavePublicKey(enclavePublicKey);
config.setEnclavePublicKeyFile(enclavePublicKeyFile);
config.setPrivateStorageProvider(storageProvider);
config.setPrivateStateStorage(privateStateStorage);
config.setEnclave(enclaveFactory.createVertxEnclave(enclaveUrl));
if (privateKeyPath != null) {
config.setSigningKeyPair(KeyPairUtil.load(privateKeyPath.toFile()));
}
}
config.setEnabled(enabled);
config.setEnclaveUri(enclaveUrl);
config.setPrivacyAddress(privacyAddress);
config.setMultiTenancyEnabled(multiTenancyEnabled);
return config;
}
public Builder setEnclavePublicKeyUsingFile(final File publicKeyFile) throws IOException {
this.enclavePublicKeyFile = publicKeyFile;
this.enclavePublicKey = Files.asCharSource(publicKeyFile, UTF_8).read();
validatePublicKey(publicKeyFile);
return this;
}
private void validatePublicKey(final File publicKeyFile) {
if (publicKeyFile.length() != 44) {
throw new IllegalArgumentException(
"Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key.");
}
// throws exception if invalid base 64
Base64.getDecoder().decode(this.enclavePublicKey);
}
}
}
| 1 | 21,228 | As before, shouldn't mention these as being orion options I don't think. Perhaps just enclaveKeyStoreFile etc. | hyperledger-besu | java |
@@ -152,10 +152,8 @@ async function observeRestResponse( res ) {
// The response may fail to resolve if the test ends before it completes.
try {
args.push( await res.text() );
+ console.log( ...args ); // eslint-disable-line no-console
} catch ( err ) {} // eslint-disable-line no-empty
-
- // eslint-disable-next-line no-console
- console.log( ...args );
}
}
| 1 | /**
* External dependencies
*/
import { get } from 'lodash';
/**
* WordPress dependencies
*/
import {
clearLocalStorage,
enablePageDialogAccept,
setBrowserViewport,
} from '@wordpress/e2e-test-utils';
/**
* Internal dependencies
*/
import {
clearSessionStorage,
deactivateAllOtherPlugins,
resetSiteKit,
} from '../utils';
/**
* Environment variables
*/
const { PUPPETEER_TIMEOUT } = process.env;
/**
* Set of console logging types observed to protect against unexpected yet
* handled (i.e. not catastrophic) errors or warnings. Each key corresponds
* to the Puppeteer ConsoleMessage type, its value the corresponding function
* on the console global object.
*
* @type {Object<string,string>}
*/
const OBSERVED_CONSOLE_MESSAGE_TYPES = {
warning: 'warn',
error: 'error',
};
/**
* Array of page event tuples of [ eventName, handler ].
*
* @type {Array}
*/
const pageEvents = [];
// The Jest timeout is increased because these tests are a bit slow
jest.setTimeout( PUPPETEER_TIMEOUT || 100000 );
/**
* Adds an event listener to the page to handle additions of page event
* handlers, to assure that they are removed at test teardown.
*/
function capturePageEventsForTearDown() {
page.on( 'newListener', ( eventName, listener ) => {
pageEvents.push( [ eventName, listener ] );
} );
}
/**
* Removes all bound page event handlers.
*/
function removePageEvents() {
while ( pageEvents.length ) {
const [ eventName, handler ] = pageEvents.pop();
page.removeListener( eventName, handler );
}
}
/**
* Adds a page event handler to emit uncaught exception to process if one of
* the observed console logging types is encountered.
*/
function observeConsoleLogging() {
page.on( 'console', ( message ) => {
const type = message.type();
if ( ! OBSERVED_CONSOLE_MESSAGE_TYPES.hasOwnProperty( type ) ) {
return;
}
let text = message.text();
// An exception is made for _blanket_ deprecation warnings: Those
// which log regardless of whether a deprecated feature is in use.
if ( text.includes( 'This is a global warning' ) ) {
return;
}
// Viewing posts on the front end can result in this error, which
// has nothing to do with Gutenberg.
if ( text.includes( 'net::ERR_UNKNOWN_URL_SCHEME' ) ) {
return;
}
// A bug present in WordPress 5.2 will produce console warnings when
// loading the Dashicons font. These can be safely ignored, as they do
// not otherwise regress on application behavior. This logic should be
// removed once the associated ticket has been closed.
//
// See: https://core.trac.wordpress.org/ticket/47183
if (
text.startsWith( 'Failed to decode downloaded font:' ) ||
text.startsWith( 'OTS parsing error:' )
) {
return;
}
const logFunction = OBSERVED_CONSOLE_MESSAGE_TYPES[ type ];
// As of Puppeteer 1.6.1, `message.text()` wrongly returns an object of
// type JSHandle for error logging, instead of the expected string.
//
// See: https://github.com/GoogleChrome/puppeteer/issues/3397
//
// The recommendation there to asynchronously resolve the error value
// upon a console event may be prone to a race condition with the test
// completion, leaving a possibility of an error not being surfaced
// correctly. Instead, the logic here synchronously inspects the
// internal object shape of the JSHandle to find the error text. If it
// cannot be found, the default text value is used instead.
text = get( message.args(), [ 0, '_remoteObject', 'description' ], text );
// Disable reason: We intentionally bubble up the console message
// which, unless the test explicitly anticipates the logging via
// @wordpress/jest-console matchers, will cause the intended test
// failure.
// eslint-disable-next-line no-console
console[ logFunction ]( text );
} );
}
/**
* Observe the given REST request.
*/
function observeRestRequest( req ) {
if ( req.url().match( 'wp-json' ) ) {
// eslint-disable-next-line no-console
console.log( '>>>', req.method(), req.url(), req.postData() );
}
}
/**
* Observe the given REST response.
*/
async function observeRestResponse( res ) {
if ( res.url().match( 'wp-json' ) ) {
const args = [ res.status(), res.request().method(), res.url() ];
// The response may fail to resolve if the test ends before it completes.
try {
args.push( await res.text() );
} catch ( err ) {} // eslint-disable-line no-empty
// eslint-disable-next-line no-console
console.log( ...args );
}
}
// Before every test suite run, delete all content created by the test. This ensures
// other posts/comments/etc. aren't dirtying tests and tests don't depend on
// each other's side-effects.
beforeAll( async() => {
capturePageEventsForTearDown();
enablePageDialogAccept();
observeConsoleLogging();
if ( '1' === process.env.DEBUG_REST ) {
page.on( 'request', observeRestRequest );
page.on( 'response', observeRestResponse );
}
await setBrowserViewport( 'large' );
await deactivateAllOtherPlugins();
await resetSiteKit();
} );
afterEach( async() => {
await clearLocalStorage();
await clearSessionStorage();
await setBrowserViewport( 'large' );
} );
afterAll( async() => {
await deactivateAllOtherPlugins();
await resetSiteKit();
removePageEvents();
await page.setRequestInterception( false );
} );
| 1 | 24,621 | What's the thinking here? Only logging when the test has not ended yet? | google-site-kit-wp | js |
@@ -57,6 +57,12 @@ def _column_op(f):
args = [arg._scol if isinstance(arg, IndexOpsMixin) else arg for arg in args]
scol = f(self._scol, *args)
+ # If f is a logistic operator, fill NULL with False
+ log_ops = ['eq', 'ne', 'lt', 'le', 'ge', 'gt']
+ is_log_op = any(f == getattr(spark.Column, f'__{log_op}__') for log_op in log_ops)
+ if is_log_op:
+ scol = F.when(scol.isNull(), False).otherwise(scol)
+
return self._with_new_scol(scol)
else:
# Different DataFrame anchors | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base and utility classes for Koalas objects.
"""
from functools import wraps
from typing import Union
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Window
from pyspark.sql.types import DoubleType, FloatType, LongType, StringType, TimestampType
from pyspark.sql.functions import monotonically_increasing_id
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.internal import _InternalFrame
from databricks.koalas.typedef import pandas_wraps, spark_type_to_pandas_dtype
from databricks.koalas.utils import align_diff_series, scol_for
def _column_op(f):
"""
A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas
Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: Koalas Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self, *args):
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is Koalas Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if all(self._kdf is col._kdf for col in cols):
# Same DataFrame anchors
args = [arg._scol if isinstance(arg, IndexOpsMixin) else arg for arg in args]
scol = f(self._scol, *args)
return self._with_new_scol(scol)
else:
# Different DataFrame anchors
def apply_func(this_column, *that_columns):
return f(this_column, *that_columns)
return align_diff_series(apply_func, self, *args, how="full")
return wrapper
def _numpy_column_op(f):
@wraps(f)
def wrapper(self, *args):
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, 's')))
else:
new_args.append(arg)
return _column_op(f)(self, *new_args)
return wrapper
def _wrap_accessor_spark(accessor, fn, return_type=None):
"""
Wrap an accessor property or method, e.g., Series.dt.date with a spark function.
"""
if return_type:
return _column_op(
lambda col: fn(col).cast(return_type)
)(accessor._data)
else:
return _column_op(fn)(accessor._data)
def _wrap_accessor_pandas(accessor, fn, return_type):
"""
Wrap an accessor property or method, e.g, Series.dt.date with a pandas function.
"""
return pandas_wraps(fn, return_col=return_type)(accessor._data)
class IndexOpsMixin(object):
"""common ops mixin to support a unified interface / docs for Series / Index
Assuming there are following attributes or properties and function.
:ivar _scol: Spark Column instance
:type _scol: pyspark.Column
:ivar _kdf: Parent's Koalas DataFrame
:type _kdf: ks.DataFrame
:ivar spark_type: Spark data type
:type spark_type: spark.types.DataType
def _with_new_scol(self, scol: spark.Column) -> IndexOpsMixin
Creates new object with the new column
"""
def __init__(self, internal: _InternalFrame, kdf):
self._internal = internal # type: _InternalFrame
self._kdf = kdf
@property
def _scol(self):
return self._internal.scol
# arithmetic operators
__neg__ = _column_op(spark.Column.__neg__)
def __add__(self, other):
if isinstance(self.spark_type, StringType):
# Concatenate string columns
if isinstance(other, IndexOpsMixin) and isinstance(other.spark_type, StringType):
return _column_op(F.concat)(self, other)
# Handle df['col'] + 'literal'
elif isinstance(other, str):
return _column_op(F.concat)(self, F.lit(other))
else:
raise TypeError('string addition can only be applied to string series or literals.')
else:
return _column_op(spark.Column.__add__)(self, other)
def __sub__(self, other):
# Note that timestamp subtraction casts arguments to integer. This is to mimic Pandas's
# behaviors. Pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
if isinstance(other, IndexOpsMixin) and isinstance(self.spark_type, TimestampType):
if not isinstance(other.spark_type, TimestampType):
raise TypeError('datetime subtraction can only be applied to datetime series.')
return self.astype('bigint') - other.astype('bigint')
else:
return _column_op(spark.Column.__sub__)(self, other)
__mul__ = _column_op(spark.Column.__mul__)
__div__ = _numpy_column_op(spark.Column.__div__)
__truediv__ = _numpy_column_op(spark.Column.__truediv__)
__mod__ = _column_op(spark.Column.__mod__)
def __radd__(self, other):
# Handle 'literal' + df['col']
if isinstance(self.spark_type, StringType) and isinstance(other, str):
return self._with_new_scol(F.concat(F.lit(other), self._scol))
else:
return _column_op(spark.Column.__radd__)(self, other)
__rsub__ = _column_op(spark.Column.__rsub__)
__rmul__ = _column_op(spark.Column.__rmul__)
__rdiv__ = _numpy_column_op(spark.Column.__rdiv__)
__rtruediv__ = _numpy_column_op(spark.Column.__rtruediv__)
def __floordiv__(self, other):
return self._with_new_scol(
F.floor(_numpy_column_op(spark.Column.__div__)(self, other)._scol))
def __rfloordiv__(self, other):
return self._with_new_scol(
F.floor(_numpy_column_op(spark.Column.__rdiv__)(self, other)._scol))
__rmod__ = _column_op(spark.Column.__rmod__)
__pow__ = _column_op(spark.Column.__pow__)
__rpow__ = _column_op(spark.Column.__rpow__)
# logistic operators
__eq__ = _column_op(spark.Column.__eq__)
__ne__ = _column_op(spark.Column.__ne__)
__lt__ = _column_op(spark.Column.__lt__)
__le__ = _column_op(spark.Column.__le__)
__ge__ = _column_op(spark.Column.__ge__)
__gt__ = _column_op(spark.Column.__gt__)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _column_op(spark.Column.__and__)
__or__ = _column_op(spark.Column.__or__)
__invert__ = _column_op(spark.Column.__invert__)
__rand__ = _column_op(spark.Column.__rand__)
__ror__ = _column_op(spark.Column.__ror__)
@property
def dtype(self):
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ks.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ks.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
return spark_type_to_pandas_dtype(self.spark_type)
@property
def empty(self):
"""
Returns true if the current object is empty. Otherwise, returns false.
>>> ks.range(10).id.empty
False
>>> ks.range(0).id.empty
True
>>> ks.DataFrame({}, index=list('abc')).index.empty
False
"""
return self._internal._sdf.rdd.isEmpty()
@property
def hasnans(self):
"""
Return True if it has any missing values. Otherwise, it returns False.
>>> ks.DataFrame({}, index=list('abc')).index.hasnans
False
>>> ks.Series(['a', None]).hasnans
True
>>> ks.Series([1.0, 2.0, np.nan]).hasnans
True
>>> ks.Series([1, 2, 3]).hasnans
False
>>> ks.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans
False
"""
sdf = self._internal._sdf.select(self._scol)
col = self._scol
ret = sdf.select(F.max(col.isNull() | F.isnan(col))).collect()[0][0]
return ret
@property
def is_monotonic(self):
"""
Return boolean if values in the object are monotonically increasing.
.. note:: the current implementation of is_monotonic_increasing uses Spark's
Window without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
is_monotonic : boolean
Examples
--------
>>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018'])
>>> ser.is_monotonic
True
>>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})
>>> df.dates.is_monotonic
False
>>> df.index.is_monotonic
True
>>> ser = ks.Series([1])
>>> ser.is_monotonic
True
>>> ser = ks.Series([])
>>> ser.is_monotonic
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic
True
>>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic
False
>>> ser.index.is_monotonic
True
"""
col = self._scol
window = Window.orderBy(monotonically_increasing_id()).rowsBetween(-1, -1)
return self._with_new_scol((col >= F.lag(col, 1).over(window)) & col.isNotNull()).all()
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are monotonically decreasing.
.. note:: the current implementation of is_monotonic_decreasing uses Spark's
Window without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
is_monotonic : boolean
Examples
--------
>>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018'])
>>> ser.is_monotonic_decreasing
True
>>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})
>>> df.dates.is_monotonic_decreasing
False
>>> df.index.is_monotonic_decreasing
False
>>> ser = ks.Series([1])
>>> ser.is_monotonic_decreasing
True
>>> ser = ks.Series([])
>>> ser.is_monotonic_decreasing
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing
True
>>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic_decreasing
True
>>> ser.index.is_monotonic_decreasing
False
"""
col = self._scol
window = Window.orderBy(monotonically_increasing_id()).rowsBetween(-1, -1)
return self._with_new_scol((col <= F.lag(col, 1).over(window)) & col.isNotNull()).all()
@property
def ndim(self):
"""
Return an int representing the number of array dimensions.
Return 1 for Series / Index / MultiIndex.
Examples
--------
For Series
>>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])
>>> s.ndim
1
For Index
>>> s.index.ndim
1
For MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index.ndim
1
"""
return 1
def astype(self, dtype):
"""
Cast a Koalas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ks.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
Name: 0, dtype: int32
>>> ser.astype('int64')
0 1
1 2
Name: 0, dtype: int64
>>> ser.rename("a").to_frame().set_index("a").index.astype('int64')
Int64Index([1, 2], dtype='int64', name='a')
"""
from databricks.koalas.typedef import as_spark_type
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
return self._with_new_scol(self._scol.cast(spark_type))
def isin(self, values):
"""
Check whether `values` are contained in Series.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : list or set
The sequence of values to test.
Returns
-------
isin : Series (bool dtype)
Examples
--------
>>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
>>> s.rename("a").to_frame().set_index("a").index.isin(['lama'])
Index([True, False, True, False, True, False], dtype='object', name='a')
"""
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
return self._with_new_scol(self._scol.isin(list(values))).rename(self.name)
def isnull(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ks.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
Name: 0, dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.isna()
Index([False, False, True], dtype='object', name='a')
"""
if isinstance(self.spark_type, (FloatType, DoubleType)):
return self._with_new_scol(self._scol.isNull() | F.isnan(self._scol)).rename(self.name)
else:
return self._with_new_scol(self._scol.isNull()).rename(self.name)
isna = isnull
def notnull(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = ks.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
Name: 0, dtype: float64
>>> ser.notna()
0 True
1 True
2 False
Name: 0, dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.notna()
Index([True, True, False], dtype='object', name='a')
"""
return (~self.isnull()).rename(self.name)
notna = notnull
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ks.Series([True, True]).all()
True
>>> ks.Series([True, False]).all()
False
>>> ks.Series([0, 1]).all()
False
>>> ks.Series([1, 2, 3]).all()
True
>>> ks.Series([True, True, None]).all()
True
>>> ks.Series([True, False, None]).all()
False
>>> ks.Series([]).all()
True
>>> ks.Series([np.nan]).all()
True
>>> df = ks.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.all()
False
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
sdf = self._internal._sdf.select(self._scol)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast('boolean'), F.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ks.Series([False, False]).any()
False
>>> ks.Series([True, False]).any()
True
>>> ks.Series([0, 0]).any()
False
>>> ks.Series([0, 1, 2]).any()
True
>>> ks.Series([False, False, None]).any()
False
>>> ks.Series([True, False, None]).any()
True
>>> ks.Series([]).any()
False
>>> ks.Series([np.nan]).any()
False
>>> df = ks.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.any()
True
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
sdf = self._internal._sdf.select(self._scol)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use max as its alternative:
ret = sdf.select(F.max(F.coalesce(col.cast('boolean'), F.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift Series/Index by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input Series/Index, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.Col1.shift(periods=3)
0 NaN
1 NaN
2 NaN
3 10.0
4 20.0
Name: Col1, dtype: float64
>>> df.Col2.shift(periods=3, fill_value=0)
0 0
1 0
2 0
3 13
4 23
Name: Col2, dtype: int64
>>> df.index.shift(periods=3, fill_value=0)
Int64Index([0, 0, 0, 0, 1], dtype='int64')
"""
return self._shift(periods, fill_value)
def _shift(self, periods, fill_value, part_cols=()):
if not isinstance(periods, int):
raise ValueError('periods should be an int; however, got [%s]' % type(periods))
col = self._scol
window = Window.partitionBy(*part_cols).orderBy(self._internal.index_scols)\
.rowsBetween(-periods, -periods)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col).rename(self.name)
| 1 | 12,847 | @HyukjinKwon @ueshin (cc @itholic @charlesdong1991 ) Not sure if this is the right implementation ... | databricks-koalas | py |
@@ -10,7 +10,13 @@ module.exports = function(url, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
- let result = parser.parse(url, true);
+ let result;
+ try {
+ result = parser.parse(url, true);
+ } catch (e) {
+ return callback(new Error('URL malformed, cannot be parsed'));
+ }
+
if (result.protocol !== 'mongodb:' && result.protocol !== 'mongodb+srv:') {
return callback(new Error('Invalid schema, expected `mongodb` or `mongodb+srv`'));
} | 1 | 'use strict';
const ReadPreference = require('mongodb-core').ReadPreference,
parser = require('url'),
f = require('util').format,
Logger = require('mongodb-core').Logger,
dns = require('dns');
module.exports = function(url, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
let result = parser.parse(url, true);
if (result.protocol !== 'mongodb:' && result.protocol !== 'mongodb+srv:') {
return callback(new Error('Invalid schema, expected `mongodb` or `mongodb+srv`'));
}
if (result.protocol === 'mongodb:') {
return parseHandler(url, options, callback);
}
// Otherwise parse this as an SRV record
if (result.hostname.split('.').length < 3) {
return callback(new Error('URI does not have hostname, domain name and tld'));
}
result.domainLength = result.hostname.split('.').length;
if (result.pathname && result.pathname.match(',')) {
return callback(new Error('Invalid URI, cannot contain multiple hostnames'));
}
if (result.port) {
return callback(new Error('Ports not accepted with `mongodb+srv` URIs'));
}
let srvAddress = `_mongodb._tcp.${result.host}`;
dns.resolveSrv(srvAddress, function(err, addresses) {
if (err) return callback(err);
if (addresses.length === 0) {
return callback(new Error('No addresses found at host'));
}
for (let i = 0; i < addresses.length; i++) {
if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) {
return callback(new Error('Server record does not share hostname with parent URI'));
}
}
let base = result.auth ? `mongodb://${result.auth}@` : `mongodb://`;
let connectionStrings = addresses.map(function(address, i) {
if (i === 0) return `${base}${address.name}:${address.port}`;
else return `${address.name}:${address.port}`;
});
let connectionString = connectionStrings.join(',') + '/';
let connectionStringOptions = [];
// Default to SSL true
if (!options.ssl && !result.search) {
connectionStringOptions.push('ssl=true');
} else if (!options.ssl && result.search && !result.search.match('ssl')) {
connectionStringOptions.push('ssl=true');
}
// Keep original uri options
if (result.search) {
connectionStringOptions.push(result.search.replace('?', ''));
}
dns.resolveTxt(result.host, function(err, record) {
if (err && err.code !== 'ENODATA') return callback(err);
if (err && err.code === 'ENODATA') record = null;
if (record) {
if (record.length > 1) {
return callback(new Error('Multiple text records not allowed'));
}
record = record[0];
if (record.length > 1) record = record.join('');
else record = record[0];
if (!record.includes('authSource') && !record.includes('replicaSet')) {
return callback(new Error('Text record must only set `authSource` or `replicaSet`'));
}
connectionStringOptions.push(record);
}
// Add any options to the connection string
if (connectionStringOptions.length) {
connectionString += `?${connectionStringOptions.join('&')}`;
}
parseHandler(connectionString, options, callback);
});
});
};
function matchesParentDomain(srvAddress, parentDomain) {
let regex = /^.*?\./;
let srv = `.${srvAddress.replace(regex, '')}`;
let parent = `.${parentDomain.replace(regex, '')}`;
if (srv.endsWith(parent)) return true;
else return false;
}
function parseHandler(address, options, callback) {
let result, err;
try {
result = parseConnectionString(address, options);
} catch (e) {
err = e;
}
return err ? callback(err, null) : callback(null, result);
}
function parseConnectionString(url, options) {
// Variables
let connection_part = '';
let auth_part = '';
let query_string_part = '';
let dbName = 'admin';
// Url parser result
let result = parser.parse(url, true);
if ((result.hostname == null || result.hostname === '') && url.indexOf('.sock') === -1) {
throw new Error('No hostname or hostnames provided in connection string');
}
if (result.port === '0') {
throw new Error('Invalid port (zero) with hostname');
}
if (!isNaN(parseInt(result.port, 10)) && parseInt(result.port, 10) > 65535) {
throw new Error('Invalid port (larger than 65535) with hostname');
}
if (
result.path &&
result.path.length > 0 &&
result.path[0] !== '/' &&
url.indexOf('.sock') === -1
) {
throw new Error('Missing delimiting slash between hosts and options');
}
if (result.query) {
for (let name in result.query) {
if (name.indexOf('::') !== -1) {
throw new Error('Double colon in host identifier');
}
if (result.query[name] === '') {
throw new Error('Query parameter ' + name + ' is an incomplete value pair');
}
}
}
if (result.auth) {
let parts = result.auth.split(':');
if (url.indexOf(result.auth) !== -1 && parts.length > 2) {
throw new Error('Username with password containing an unescaped colon');
}
if (url.indexOf(result.auth) !== -1 && result.auth.indexOf('@') !== -1) {
throw new Error('Username containing an unescaped at-sign');
}
}
// Remove query
let clean = url.split('?').shift();
// Extract the list of hosts
let strings = clean.split(',');
let hosts = [];
for (let i = 0; i < strings.length; i++) {
let hostString = strings[i];
if (hostString.indexOf('mongodb') !== -1) {
if (hostString.indexOf('@') !== -1) {
hosts.push(hostString.split('@').pop());
} else {
hosts.push(hostString.substr('mongodb://'.length));
}
} else if (hostString.indexOf('/') !== -1) {
hosts.push(hostString.split('/').shift());
} else if (hostString.indexOf('/') === -1) {
hosts.push(hostString.trim());
}
}
for (let i = 0; i < hosts.length; i++) {
let r = parser.parse(f('mongodb://%s', hosts[i].trim()));
if (r.path && r.path.indexOf(':') !== -1) {
// Not connecting to a socket so check for an extra slash in the hostname.
// Using String#split as perf is better than match.
if (r.path.split('/').length > 1 && r.path.indexOf('::') === -1) {
throw new Error('Slash in host identifier');
} else {
throw new Error('Double colon in host identifier');
}
}
}
// If we have a ? mark cut the query elements off
if (url.indexOf('?') !== -1) {
query_string_part = url.substr(url.indexOf('?') + 1);
connection_part = url.substring('mongodb://'.length, url.indexOf('?'));
} else {
connection_part = url.substring('mongodb://'.length);
}
// Check if we have auth params
if (connection_part.indexOf('@') !== -1) {
auth_part = connection_part.split('@')[0];
connection_part = connection_part.split('@')[1];
}
// Check there is not more than one unescaped slash
if (connection_part.split('/').length > 2) {
throw new Error(
"Unsupported host '" +
connection_part.split('?')[0] +
"', hosts must be URL encoded and contain at most one unencoded slash"
);
}
// Check if the connection string has a db
if (connection_part.indexOf('.sock') !== -1) {
if (connection_part.indexOf('.sock/') !== -1) {
dbName = connection_part.split('.sock/')[1];
// Check if multiple database names provided, or just an illegal trailing backslash
if (dbName.indexOf('/') !== -1) {
if (dbName.split('/').length === 2 && dbName.split('/')[1].length === 0) {
throw new Error('Illegal trailing backslash after database name');
}
throw new Error('More than 1 database name in URL');
}
connection_part = connection_part.split(
'/',
connection_part.indexOf('.sock') + '.sock'.length
);
}
} else if (connection_part.indexOf('/') !== -1) {
// Check if multiple database names provided, or just an illegal trailing backslash
if (connection_part.split('/').length > 2) {
if (connection_part.split('/')[2].length === 0) {
throw new Error('Illegal trailing backslash after database name');
}
throw new Error('More than 1 database name in URL');
}
dbName = connection_part.split('/')[1];
connection_part = connection_part.split('/')[0];
}
// URI decode the host information
connection_part = decodeURIComponent(connection_part);
// Result object
let object = {};
// Pick apart the authentication part of the string
let authPart = auth_part || '';
let auth = authPart.split(':', 2);
// Decode the authentication URI components and verify integrity
let user = decodeURIComponent(auth[0]);
if (auth[0] !== encodeURIComponent(user)) {
throw new Error('Username contains an illegal unescaped character');
}
auth[0] = user;
if (auth[1]) {
let pass = decodeURIComponent(auth[1]);
if (auth[1] !== encodeURIComponent(pass)) {
throw new Error('Password contains an illegal unescaped character');
}
auth[1] = pass;
}
// Add auth to final object if we have 2 elements
if (auth.length === 2) object.auth = { user: auth[0], password: auth[1] };
// if user provided auth options, use that
if (options && options.auth != null) object.auth = options.auth;
// Variables used for temporary storage
let hostPart;
let urlOptions;
let servers;
let compression;
let serverOptions = { socketOptions: {} };
let dbOptions = { read_preference_tags: [] };
let replSetServersOptions = { socketOptions: {} };
let mongosOptions = { socketOptions: {} };
// Add server options to final object
object.server_options = serverOptions;
object.db_options = dbOptions;
object.rs_options = replSetServersOptions;
object.mongos_options = mongosOptions;
// Let's check if we are using a domain socket
if (url.match(/\.sock/)) {
// Split out the socket part
let domainSocket = url.substring(
url.indexOf('mongodb://') + 'mongodb://'.length,
url.lastIndexOf('.sock') + '.sock'.length
);
// Clean out any auth stuff if any
if (domainSocket.indexOf('@') !== -1) domainSocket = domainSocket.split('@')[1];
domainSocket = decodeURIComponent(domainSocket);
servers = [{ domain_socket: domainSocket }];
} else {
// Split up the db
hostPart = connection_part;
// Deduplicate servers
let deduplicatedServers = {};
// Parse all server results
servers = hostPart
.split(',')
.map(function(h) {
let _host, _port, ipv6match;
//check if it matches [IPv6]:port, where the port number is optional
if ((ipv6match = /\[([^\]]+)\](?::(.+))?/.exec(h))) {
_host = ipv6match[1];
_port = parseInt(ipv6match[2], 10) || 27017;
} else {
//otherwise assume it's IPv4, or plain hostname
let hostPort = h.split(':', 2);
_host = hostPort[0] || 'localhost';
_port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017;
// Check for localhost?safe=true style case
if (_host.indexOf('?') !== -1) _host = _host.split(/\?/)[0];
}
// No entry returned for duplicate servr
if (deduplicatedServers[_host + '_' + _port]) return null;
deduplicatedServers[_host + '_' + _port] = 1;
// Return the mapped object
return { host: _host, port: _port };
})
.filter(function(x) {
return x != null;
});
}
// Get the db name
object.dbName = dbName || 'admin';
// Split up all the options
urlOptions = (query_string_part || '').split(/[&;]/);
// Ugh, we have to figure out which options go to which constructor manually.
urlOptions.forEach(function(opt) {
if (!opt) return;
var splitOpt = opt.split('='),
name = splitOpt[0],
value = splitOpt[1];
// Options implementations
switch (name) {
case 'slaveOk':
case 'slave_ok':
serverOptions.slave_ok = value === 'true';
dbOptions.slaveOk = value === 'true';
break;
case 'maxPoolSize':
case 'poolSize':
serverOptions.poolSize = parseInt(value, 10);
replSetServersOptions.poolSize = parseInt(value, 10);
break;
case 'appname':
object.appname = decodeURIComponent(value);
break;
case 'autoReconnect':
case 'auto_reconnect':
serverOptions.auto_reconnect = value === 'true';
break;
case 'ssl':
if (value === 'prefer') {
serverOptions.ssl = value;
replSetServersOptions.ssl = value;
mongosOptions.ssl = value;
break;
}
serverOptions.ssl = value === 'true';
replSetServersOptions.ssl = value === 'true';
mongosOptions.ssl = value === 'true';
break;
case 'sslValidate':
serverOptions.sslValidate = value === 'true';
replSetServersOptions.sslValidate = value === 'true';
mongosOptions.sslValidate = value === 'true';
break;
case 'replicaSet':
case 'rs_name':
replSetServersOptions.rs_name = value;
break;
case 'reconnectWait':
replSetServersOptions.reconnectWait = parseInt(value, 10);
break;
case 'retries':
replSetServersOptions.retries = parseInt(value, 10);
break;
case 'readSecondary':
case 'read_secondary':
replSetServersOptions.read_secondary = value === 'true';
break;
case 'fsync':
dbOptions.fsync = value === 'true';
break;
case 'journal':
dbOptions.j = value === 'true';
break;
case 'safe':
dbOptions.safe = value === 'true';
break;
case 'nativeParser':
case 'native_parser':
dbOptions.native_parser = value === 'true';
break;
case 'readConcernLevel':
dbOptions.readConcern = { level: value };
break;
case 'connectTimeoutMS':
serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
mongosOptions.socketOptions.connectTimeoutMS = parseInt(value, 10);
break;
case 'socketTimeoutMS':
serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
mongosOptions.socketOptions.socketTimeoutMS = parseInt(value, 10);
break;
case 'w':
dbOptions.w = parseInt(value, 10);
if (isNaN(dbOptions.w)) dbOptions.w = value;
break;
case 'authSource':
dbOptions.authSource = value;
break;
case 'gssapiServiceName':
dbOptions.gssapiServiceName = value;
break;
case 'authMechanism':
if (value === 'GSSAPI') {
// If no password provided decode only the principal
if (object.auth == null) {
let urlDecodeAuthPart = decodeURIComponent(authPart);
if (urlDecodeAuthPart.indexOf('@') === -1)
throw new Error('GSSAPI requires a provided principal');
object.auth = { user: urlDecodeAuthPart, password: null };
} else {
object.auth.user = decodeURIComponent(object.auth.user);
}
} else if (value === 'MONGODB-X509') {
object.auth = { user: decodeURIComponent(authPart) };
}
// Only support GSSAPI or MONGODB-CR for now
if (
value !== 'GSSAPI' &&
value !== 'MONGODB-X509' &&
value !== 'MONGODB-CR' &&
value !== 'DEFAULT' &&
value !== 'SCRAM-SHA-1' &&
value !== 'PLAIN'
)
throw new Error(
'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, SCRAM-SHA-1 or MONGODB-CR is supported by authMechanism'
);
// Authentication mechanism
dbOptions.authMechanism = value;
break;
case 'authMechanismProperties':
{
// Split up into key, value pairs
let values = value.split(',');
let o = {};
// For each value split into key, value
values.forEach(function(x) {
let v = x.split(':');
o[v[0]] = v[1];
});
// Set all authMechanismProperties
dbOptions.authMechanismProperties = o;
// Set the service name value
if (typeof o.SERVICE_NAME === 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME;
if (typeof o.SERVICE_REALM === 'string') dbOptions.gssapiServiceRealm = o.SERVICE_REALM;
if (typeof o.CANONICALIZE_HOST_NAME === 'string')
dbOptions.gssapiCanonicalizeHostName =
o.CANONICALIZE_HOST_NAME === 'true' ? true : false;
}
break;
case 'wtimeoutMS':
dbOptions.wtimeout = parseInt(value, 10);
break;
case 'readPreference':
if (!ReadPreference.isValid(value))
throw new Error(
'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest'
);
dbOptions.readPreference = value;
break;
case 'maxStalenessSeconds':
dbOptions.maxStalenessSeconds = parseInt(value, 10);
break;
case 'readPreferenceTags':
{
// Decode the value
value = decodeURIComponent(value);
// Contains the tag object
let tagObject = {};
if (value == null || value === '') {
dbOptions.read_preference_tags.push(tagObject);
break;
}
// Split up the tags
let tags = value.split(/,/);
for (let i = 0; i < tags.length; i++) {
let parts = tags[i].trim().split(/:/);
tagObject[parts[0]] = parts[1];
}
// Set the preferences tags
dbOptions.read_preference_tags.push(tagObject);
}
break;
case 'compressors':
{
compression = serverOptions.compression || {};
let compressors = value.split(',');
if (
!compressors.every(function(compressor) {
return compressor === 'snappy' || compressor === 'zlib';
})
) {
throw new Error('Compressors must be at least one of snappy or zlib');
}
compression.compressors = compressors;
serverOptions.compression = compression;
}
break;
case 'zlibCompressionLevel':
{
compression = serverOptions.compression || {};
let zlibCompressionLevel = parseInt(value, 10);
if (zlibCompressionLevel < -1 || zlibCompressionLevel > 9) {
throw new Error('zlibCompressionLevel must be an integer between -1 and 9');
}
compression.zlibCompressionLevel = zlibCompressionLevel;
serverOptions.compression = compression;
}
break;
case 'retryWrites':
dbOptions.retryWrites = value === 'true';
break;
case 'minSize':
dbOptions.minSize = parseInt(value, 10);
break;
default:
{
let logger = Logger('URL Parser');
logger.warn(`${name} is not supported as a connection string option`);
}
break;
}
});
// No tags: should be null (not [])
if (dbOptions.read_preference_tags.length === 0) {
dbOptions.read_preference_tags = null;
}
// Validate if there are an invalid write concern combinations
if (
(dbOptions.w === -1 || dbOptions.w === 0) &&
(dbOptions.journal === true || dbOptions.fsync === true || dbOptions.safe === true)
)
throw new Error('w set to -1 or 0 cannot be combined with safe/w/journal/fsync');
// If no read preference set it to primary
if (!dbOptions.readPreference) {
dbOptions.readPreference = 'primary';
}
// make sure that user-provided options are applied with priority
dbOptions = Object.assign(dbOptions, options);
// Add servers to result
object.servers = servers;
// Returned parsed object
return object;
}
| 1 | 14,166 | Do we want to add any specific error on how the url is malformed? | mongodb-node-mongodb-native | js |
@@ -190,6 +190,10 @@ void Host::appendLogsInternal(folly::EventBase* eb,
{
std::lock_guard<std::mutex> g(self->lock_);
self->setResponse(r);
+ self->lastLogIdSent_++;
+ if (self->lastLogIdSent_ < self->logIdToSend_) {
+ ++self->lastLogIdSent_;
+ }
}
self->noMoreRequestCV_.notify_all();
return; | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/raftex/Host.h"
#include "kvstore/raftex/RaftPart.h"
#include "kvstore/wal/FileBasedWal.h"
#include "network/NetworkUtils.h"
#include <folly/io/async/EventBase.h>
#include <folly/executors/IOThreadPoolExecutor.h>
DEFINE_uint32(max_appendlog_batch_size, 128,
"The max number of logs in each appendLog request batch");
DEFINE_uint32(max_outstanding_requests, 1024,
"The max number of outstanding appendLog requests");
DEFINE_int32(raft_rpc_timeout_ms, 500, "rpc timeout for raft client");
namespace nebula {
namespace raftex {
using nebula::network::NetworkUtils;
Host::Host(const HostAddr& addr, std::shared_ptr<RaftPart> part, bool isLearner)
: part_(std::move(part))
, addr_(addr)
, isLearner_(isLearner)
, idStr_(folly::stringPrintf(
"%s[Host: %s:%d] ",
part_->idStr_.c_str(),
NetworkUtils::intToIPv4(addr_.first).c_str(),
addr_.second))
, cachingPromise_(folly::SharedPromise<cpp2::AppendLogResponse>()) {
}
void Host::waitForStop() {
std::unique_lock<std::mutex> g(lock_);
CHECK(stopped_);
noMoreRequestCV_.wait(g, [this] {
return !requestOnGoing_;
});
LOG(INFO) << idStr_ << "The host has been stopped!";
}
cpp2::ErrorCode Host::checkStatus() const {
CHECK(!lock_.try_lock());
if (stopped_) {
VLOG(2) << idStr_ << "The host is stopped, just return";
return cpp2::ErrorCode::E_HOST_STOPPED;
}
if (paused_) {
VLOG(2) << idStr_
<< "The host is paused, due to losing leadership";
return cpp2::ErrorCode::E_NOT_A_LEADER;
}
return cpp2::ErrorCode::SUCCEEDED;
}
folly::Future<cpp2::AskForVoteResponse> Host::askForVote(
const cpp2::AskForVoteRequest& req) {
{
std::lock_guard<std::mutex> g(lock_);
auto res = checkStatus();
if (res != cpp2::ErrorCode::SUCCEEDED) {
VLOG(2) << idStr_
<< "The Host is not in a proper status, do not send";
cpp2::AskForVoteResponse resp;
resp.set_error_code(res);
return resp;
}
}
auto client = tcManager().client(addr_);
return client->future_askForVote(req);
}
folly::Future<cpp2::AppendLogResponse> Host::appendLogs(
folly::EventBase* eb,
TermID term,
LogID logId,
LogID committedLogId,
TermID prevLogTerm,
LogID prevLogId) {
VLOG(3) << idStr_ << "Entering Host::appendLogs()";
VLOG(2) << idStr_
<< "Append logs to the host [term = " << term
<< ", logId = " << logId
<< ", committedLogId = " << committedLogId
<< ", lastLogTermSent = " << prevLogTerm
<< ", lastLogIdSent = " << prevLogId
<< "]";
auto ret = folly::Future<cpp2::AppendLogResponse>::makeEmpty();
std::shared_ptr<cpp2::AppendLogRequest> req;
{
std::lock_guard<std::mutex> g(lock_);
auto res = checkStatus();
if (logId <= lastLogIdSent_) {
LOG(INFO) << idStr_ << "The log " << logId << " has been sended"
<< ", lastLogIdSent " << lastLogIdSent_;
cpp2::AppendLogResponse r;
r.set_error_code(cpp2::ErrorCode::SUCCEEDED);
return r;
}
if (requestOnGoing_ && res == cpp2::ErrorCode::SUCCEEDED) {
if (cachingPromise_.size() <= FLAGS_max_outstanding_requests) {
pendingReq_ = std::make_tuple(term,
logId,
committedLogId);
return cachingPromise_.getFuture();
} else {
PLOG_EVERY_N(INFO, 200) << idStr_
<< "Too many requests are waiting, return error";
cpp2::AppendLogResponse r;
r.set_error_code(cpp2::ErrorCode::E_TOO_MANY_REQUESTS);
return r;
}
}
if (res != cpp2::ErrorCode::SUCCEEDED) {
VLOG(2) << idStr_
<< "The host is not in a proper status, just return";
cpp2::AppendLogResponse r;
r.set_error_code(res);
return r;
}
VLOG(2) << idStr_ << "About to send the AppendLog request";
// No request is ongoing, let's send a new request
if (UNLIKELY(lastLogIdSent_ == 0 && lastLogTermSent_ == 0)) {
LOG(INFO) << idStr_ << "This is the first time to send the logs to this host";
lastLogIdSent_ = prevLogId;
lastLogTermSent_ = prevLogTerm;
}
if (prevLogTerm < lastLogTermSent_ || prevLogId < lastLogIdSent_) {
LOG(INFO) << idStr_ << "We have sended this log, so go on from id " << lastLogIdSent_
<< ", term " << lastLogTermSent_ << "; current prev log id " << prevLogId
<< ", current prev log term " << prevLogTerm;
}
logTermToSend_ = term;
logIdToSend_ = logId;
committedLogId_ = committedLogId;
pendingReq_ = std::make_tuple(0, 0, 0);
promise_ = std::move(cachingPromise_);
cachingPromise_ = folly::SharedPromise<cpp2::AppendLogResponse>();
ret = promise_.getFuture();
requestOnGoing_ = true;
req = prepareAppendLogRequest();
}
// Get a new promise
appendLogsInternal(eb, std::move(req));
return ret;
}
void Host::setResponse(const cpp2::AppendLogResponse& r) {
CHECK(!lock_.try_lock());
promise_.setValue(r);
cachingPromise_.setValue(r);
cachingPromise_ = folly::SharedPromise<cpp2::AppendLogResponse>();
pendingReq_ = std::make_tuple(0, 0, 0);
requestOnGoing_ = false;
}
void Host::appendLogsInternal(folly::EventBase* eb,
std::shared_ptr<cpp2::AppendLogRequest> req) {
sendAppendLogRequest(eb, std::move(req)).via(eb).then(
[eb, self = shared_from_this()] (folly::Try<cpp2::AppendLogResponse>&& t) {
VLOG(3) << self->idStr_ << "appendLogs() call got response";
if (t.hasException()) {
VLOG(2) << self->idStr_ << t.exception().what();
cpp2::AppendLogResponse r;
r.set_error_code(cpp2::ErrorCode::E_EXCEPTION);
{
std::lock_guard<std::mutex> g(self->lock_);
self->setResponse(r);
}
self->noMoreRequestCV_.notify_all();
return;
}
cpp2::AppendLogResponse resp = std::move(t).value();
VLOG(3) << self->idStr_ << "AppendLogResponse "
<< "code " << static_cast<int32_t>(resp.get_error_code())
<< ", currTerm " << resp.get_current_term()
<< ", lastLogId " << resp.get_last_log_id()
<< ", lastLogTerm " << resp.get_last_log_term()
<< ", commitLogId " << resp.get_committed_log_id();
switch (resp.get_error_code()) {
case cpp2::ErrorCode::SUCCEEDED: {
VLOG(2) << self->idStr_
<< "AppendLog request sent successfully";
std::shared_ptr<cpp2::AppendLogRequest> newReq;
{
std::lock_guard<std::mutex> g(self->lock_);
auto res = self->checkStatus();
if (res != cpp2::ErrorCode::SUCCEEDED) {
VLOG(2) << self->idStr_
<< "The host is not in a proper status,"
" just return";
cpp2::AppendLogResponse r;
r.set_error_code(res);
self->setResponse(r);
} else {
self->lastLogIdSent_ = resp.get_last_log_id();
self->lastLogTermSent_ = resp.get_last_log_term();
if (self->lastLogIdSent_ < self->logIdToSend_) {
// More to send
VLOG(2) << self->idStr_
<< "There are more logs to send";
newReq = self->prepareAppendLogRequest();
} else {
VLOG(2) << self->idStr_
<< "Fulfill the promise, size = " << self->promise_.size();
// Fulfill the promise
self->promise_.setValue(resp);
if (self->noRequest()) {
VLOG(2) << self->idStr_ << "No request any more!";
self->requestOnGoing_ = false;
} else {
auto& tup = self->pendingReq_;
self->logTermToSend_ = std::get<0>(tup);
self->logIdToSend_ = std::get<1>(tup);
self->committedLogId_ = std::get<2>(tup);
VLOG(2) << self->idStr_
<< "Sending the pending request in the queue"
<< ", from " << self->lastLogIdSent_ + 1
<< " to " << self->logIdToSend_;
newReq = self->prepareAppendLogRequest();
self->promise_ = std::move(self->cachingPromise_);
self->cachingPromise_
= folly::SharedPromise<cpp2::AppendLogResponse>();
self->pendingReq_ = std::make_tuple(0, 0, 0);
}
}
}
}
if (newReq) {
self->appendLogsInternal(eb, newReq);
} else {
self->noMoreRequestCV_.notify_all();
}
return;
}
case cpp2::ErrorCode::E_LOG_GAP: {
VLOG(2) << self->idStr_
<< "The host's log is behind, need to catch up";
std::shared_ptr<cpp2::AppendLogRequest> newReq;
{
std::lock_guard<std::mutex> g(self->lock_);
auto res = self->checkStatus();
if (res != cpp2::ErrorCode::SUCCEEDED) {
VLOG(2) << self->idStr_
<< "The host is not in a proper status,"
" skip catching up the gap";
cpp2::AppendLogResponse r;
r.set_error_code(res);
self->setResponse(r);
} else {
self->lastLogIdSent_ = resp.get_last_log_id();
self->lastLogTermSent_ = resp.get_last_log_term();
newReq = self->prepareAppendLogRequest();
}
}
if (newReq) {
self->appendLogsInternal(eb, newReq);
} else {
self->noMoreRequestCV_.notify_all();
}
return;
}
case cpp2::ErrorCode::E_WAITING_SNAPSHOT: {
VLOG(2) << self->idStr_
<< "The host is waiting for the snapshot, so we need to send log from "
<< " current committedLogId " << self->committedLogId_;
std::shared_ptr<cpp2::AppendLogRequest> newReq;
{
std::lock_guard<std::mutex> g(self->lock_);
auto res = self->checkStatus();
if (res != cpp2::ErrorCode::SUCCEEDED) {
VLOG(2) << self->idStr_
<< "The host is not in a proper status,"
" skip waiting the snapshot";
cpp2::AppendLogResponse r;
r.set_error_code(res);
self->setResponse(r);
} else {
self->lastLogIdSent_ = self->committedLogId_;
self->lastLogTermSent_ = self->logTermToSend_;
newReq = self->prepareAppendLogRequest();
}
}
if (newReq) {
self->appendLogsInternal(eb, newReq);
} else {
self->noMoreRequestCV_.notify_all();
}
return;
}
case cpp2::ErrorCode::E_LOG_STALE: {
VLOG(2) << self->idStr_ << "Log stale, reset lastLogIdSent " << self->lastLogIdSent_
<< " to the followers lastLodId " << resp.get_last_log_id();
{
std::lock_guard<std::mutex> g(self->lock_);
auto res = self->checkStatus();
if (res != cpp2::ErrorCode::SUCCEEDED) {
VLOG(2) << self->idStr_
<< "The host is not in a proper status,"
" skip waiting the snapshot";
cpp2::AppendLogResponse r;
r.set_error_code(res);
self->setResponse(r);
} else {
self->lastLogIdSent_ = resp.get_last_log_id();
self->lastLogTermSent_ = resp.get_last_log_term();
// For log stale, we think the request has been succeeded
cpp2::AppendLogResponse r;
r.set_error_code(cpp2::ErrorCode::SUCCEEDED);
self->setResponse(r);
}
}
self->noMoreRequestCV_.notify_all();
return;
}
default: {
PLOG_EVERY_N(ERROR, 100)
<< self->idStr_
<< "Failed to append logs to the host (Err: "
<< static_cast<int32_t>(resp.get_error_code())
<< ")";
{
std::lock_guard<std::mutex> g(self->lock_);
self->setResponse(resp);
}
self->noMoreRequestCV_.notify_all();
return;
}
}
});
}
std::shared_ptr<cpp2::AppendLogRequest>
Host::prepareAppendLogRequest() {
CHECK(!lock_.try_lock());
auto req = std::make_shared<cpp2::AppendLogRequest>();
req->set_space(part_->spaceId());
req->set_part(part_->partitionId());
req->set_current_term(logTermToSend_);
req->set_last_log_id(logIdToSend_);
req->set_leader_ip(part_->address().first);
req->set_leader_port(part_->address().second);
req->set_committed_log_id(committedLogId_);
req->set_last_log_term_sent(lastLogTermSent_);
req->set_last_log_id_sent(lastLogIdSent_);
VLOG(2) << idStr_ << "Prepare AppendLogs request from Log "
<< lastLogIdSent_ + 1 << " to " << logIdToSend_;
auto it = part_->wal()->iterator(lastLogIdSent_ + 1, logIdToSend_);
if (it->valid()) {
VLOG(2) << idStr_ << "Prepare the list of log entries to send";
auto term = it->logTerm();
req->set_log_term(term);
std::vector<cpp2::LogEntry> logs;
for (size_t cnt = 0;
it->valid()
&& it->logTerm() == term
&& cnt < FLAGS_max_appendlog_batch_size;
++(*it), ++cnt) {
cpp2::LogEntry le;
le.set_cluster(it->logSource());
le.set_log_str(it->logMsg().toString());
logs.emplace_back(std::move(le));
}
req->set_log_str_list(std::move(logs));
req->set_sending_snapshot(false);
} else {
req->set_sending_snapshot(true);
if (!sendingSnapshot_) {
LOG(INFO) << idStr_ << "Can't find log " << lastLogIdSent_ + 1
<< " in wal, send the snapshot";
sendingSnapshot_ = true;
part_->snapshot_->sendSnapshot(part_, addr_).then([this] (Status&& status) {
if (status.ok()) {
LOG(INFO) << idStr_ << "Send snapshot succeeded!";
} else {
LOG(INFO) << idStr_ << "Send snapshot failed!";
// TODO(heng): we should tell the follower i am failed.
}
sendingSnapshot_ = false;
});
} else {
LOG(INFO) << idStr_ << "The snapshot req is in queue, please wait for a moment";
}
}
return req;
}
folly::Future<cpp2::AppendLogResponse> Host::sendAppendLogRequest(
folly::EventBase* eb,
std::shared_ptr<cpp2::AppendLogRequest> req) {
VLOG(2) << idStr_ << "Entering Host::sendAppendLogRequest()";
{
std::lock_guard<std::mutex> g(lock_);
auto res = checkStatus();
if (res != cpp2::ErrorCode::SUCCEEDED) {
LOG(WARNING) << idStr_
<< "The Host is not in a proper status, do not send";
cpp2::AppendLogResponse resp;
resp.set_error_code(res);
return resp;
}
}
VLOG(1) << idStr_ << "Sending request space " << req->get_space()
<< ", part " << req->get_part()
<< ", current term " << req->get_current_term()
<< ", last_log_id " << req->get_last_log_id()
<< ", committed_id " << req->get_committed_log_id()
<< ", last_log_term_sent" << req->get_last_log_term_sent()
<< ", last_log_id_sent " << req->get_last_log_id_sent();
// Get client connection
auto client = tcManager().client(addr_, eb, false, FLAGS_raft_rpc_timeout_ms);
return client->future_appendLog(*req);
}
bool Host::noRequest() const {
CHECK(!lock_.try_lock());
static auto emptyTup = std::make_tuple(0, 0, 0);
return pendingReq_ == emptyTup;
}
} // namespace raftex
} // namespace nebula
| 1 | 23,382 | why NOT self->lastLogIdSent_ = self->logIdToSend_ | vesoft-inc-nebula | cpp |
@@ -599,6 +599,18 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi
return reconcile.Result{Requeue: true}, nil
}
}
+ // Set the Provisioned status condition for adopted clusters (and in case we upgraded to/past where that condition was introduced)
+ if err := r.updateCondition(
+ cd,
+ hivev1.ProvisionedCondition,
+ corev1.ConditionTrue,
+ hivev1.ProvisionedProvisionedReason,
+ "Cluster is provisioned",
+ cdLog,
+ ); err != nil {
+ cdLog.WithError(err).Error("Error updating Provisioned status condition")
+ return reconcile.Result{}, err
+ }
// update SyncSetFailedCondition status condition
cdLog.Debug("Check if any syncsets are failing") | 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
routev1 "github.com/openshift/api/route/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
librarygocontroller "github.com/openshift/library-go/pkg/controller"
"github.com/openshift/library-go/pkg/manifest"
"github.com/openshift/library-go/pkg/verify"
"github.com/openshift/library-go/pkg/verify/store/sigstore"
apihelpers "github.com/openshift/hive/apis/helpers"
hivev1 "github.com/openshift/hive/apis/hive/v1"
hiveintv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1"
"github.com/openshift/hive/pkg/constants"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
"github.com/openshift/hive/pkg/controller/utils"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/imageset"
"github.com/openshift/hive/pkg/remoteclient"
k8slabels "github.com/openshift/hive/pkg/util/labels"
)
var (
// controllerKind contains the schema.GroupVersionKind for this controller type.
controllerKind = hivev1.SchemeGroupVersion.WithKind("ClusterDeployment")
// clusterDeploymentConditions are the cluster deployment conditions controlled or
// initialized by cluster deployment controller
clusterDeploymentConditions = []hivev1.ClusterDeploymentConditionType{
hivev1.DNSNotReadyCondition,
hivev1.InstallImagesNotResolvedCondition,
hivev1.ProvisionFailedCondition,
hivev1.SyncSetFailedCondition,
hivev1.InstallLaunchErrorCondition,
hivev1.DeprovisionLaunchErrorCondition,
hivev1.ProvisionStoppedCondition,
hivev1.AuthenticationFailureClusterDeploymentCondition,
hivev1.RequirementsMetCondition,
hivev1.ProvisionedCondition,
// ClusterInstall conditions copied over to cluster deployment
hivev1.ClusterInstallFailedClusterDeploymentCondition,
hivev1.ClusterInstallCompletedClusterDeploymentCondition,
hivev1.ClusterInstallStoppedClusterDeploymentCondition,
hivev1.ClusterInstallRequirementsMetClusterDeploymentCondition,
// ImageSet condition initialized by cluster deployment
hivev1.InstallerImageResolutionFailedCondition,
}
)
const (
ControllerName = hivev1.ClusterDeploymentControllerName
defaultRequeueTime = 10 * time.Second
maxProvisions = 3
platformAuthFailureReason = "PlatformAuthError"
platformAuthSuccessReason = "PlatformAuthSuccess"
clusterImageSetNotFoundReason = "ClusterImageSetNotFound"
clusterImageSetFoundReason = "ClusterImageSetFound"
defaultDNSNotReadyTimeout = 10 * time.Minute
dnsNotReadyReason = "DNSNotReady"
dnsNotReadyTimedoutReason = "DNSNotReadyTimedOut"
dnsUnsupportedPlatformReason = "DNSUnsupportedPlatform"
dnsZoneResourceConflictReason = "DNSZoneResourceConflict"
dnsReadyReason = "DNSReady"
dnsReadyAnnotation = "hive.openshift.io/dnsready"
installAttemptsLimitReachedReason = "InstallAttemptsLimitReached"
installOnlyOnceSetReason = "InstallOnlyOnceSet"
provisionNotStoppedReason = "ProvisionNotStopped"
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up.
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
regionUnknown = "unknown"
)
// Add creates a new ClusterDeployment controller and adds it to the manager with default RBAC.
func Add(mgr manager.Manager) error {
logger := log.WithField("controller", ControllerName)
concurrentReconciles, clientRateLimiter, queueRateLimiter, err := controllerutils.GetControllerConfig(mgr.GetClient(), ControllerName)
if err != nil {
logger.WithError(err).Error("could not get controller configurations")
return err
}
return AddToManager(mgr, NewReconciler(mgr, logger, clientRateLimiter), concurrentReconciles, queueRateLimiter)
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager, logger log.FieldLogger, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler {
r := &ReconcileClusterDeployment{
Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter),
scheme: mgr.GetScheme(),
logger: logger,
expectations: controllerutils.NewExpectations(logger),
watchingClusterInstall: map[string]struct{}{},
validateCredentialsForClusterDeployment: controllerutils.ValidateCredentialsForClusterDeployment,
}
r.remoteClusterAPIClientBuilder = func(cd *hivev1.ClusterDeployment) remoteclient.Builder {
return remoteclient.NewBuilder(r.Client, cd, ControllerName)
}
protectedDeleteEnvVar := os.Getenv(constants.ProtectedDeleteEnvVar)
if protectedDelete, err := strconv.ParseBool(protectedDeleteEnvVar); protectedDelete && err == nil {
logger.Info("Protected Delete enabled")
r.protectedDelete = true
}
verifier, err := LoadReleaseImageVerifier(mgr.GetConfig())
if err == nil {
logger.Info("Release Image verification enabled")
r.releaseImageVerifier = verifier
} else {
logger.WithError(err).Error("Release Image verification failed to be configured")
}
return r
}
// AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler
func AddToManager(mgr manager.Manager, r reconcile.Reconciler, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error {
cdReconciler, ok := r.(*ReconcileClusterDeployment)
if !ok {
return errors.New("reconciler supplied is not a ReconcileClusterDeployment")
}
logger := log.WithField("controller", ControllerName)
c, err := controller.New("clusterdeployment-controller", mgr, controller.Options{
Reconciler: r,
MaxConcurrentReconciles: concurrentReconciles,
RateLimiter: rateLimiter,
})
if err != nil {
logger.WithError(err).Error("could not create controller")
return err
}
// Inject watcher to the clusterdeployment reconciler.
controllerutils.InjectWatcher(cdReconciler, c)
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &hivev1.ClusterDeployment{},
clusterInstallIndexFieldName, indexClusterInstall); err != nil {
logger.WithError(err).Error("Error indexing cluster deployment for cluster install")
return err
}
// Watch for changes to ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}},
controllerutils.NewRateLimitedUpdateEventHandler(&handler.EnqueueRequestForObject{}, controllerutils.IsClusterDeploymentErrorUpdateEvent))
if err != nil {
logger.WithError(err).Error("Error watching cluster deployment")
return err
}
// Watch for provisions
if err := cdReconciler.watchClusterProvisions(c); err != nil {
return err
}
// Watch for jobs created by a ClusterDeployment:
err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
logger.WithError(err).Error("Error watching cluster deployment job")
return err
}
// Watch for pods created by an install job
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc(selectorPodWatchHandler))
if err != nil {
logger.WithError(err).Error("Error watching cluster deployment pods")
return err
}
// Watch for deprovision requests created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeprovision{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
logger.WithError(err).Error("Error watching deprovision request created by cluster deployment")
return err
}
// Watch for dnszones created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.DNSZone{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
logger.WithError(err).Error("Error watching cluster deployment dnszones")
return err
}
// Watch for changes to ClusterSyncs
if err := c.Watch(
&source.Kind{Type: &hiveintv1alpha1.ClusterSync{}},
&handler.EnqueueRequestForOwner{OwnerType: &hivev1.ClusterDeployment{}},
); err != nil {
return errors.Wrap(err, "cannot start watch on ClusterSyncs")
}
return nil
}
var _ reconcile.Reconciler = &ReconcileClusterDeployment{}
// ReconcileClusterDeployment reconciles a ClusterDeployment object
type ReconcileClusterDeployment struct {
client.Client
scheme *runtime.Scheme
logger log.FieldLogger
// watcher allows the reconciler to add new watches
// at runtime.
watcher controllerutils.Watcher
// this is a set of cluster install contracts that are currently
// being watched. this allows the reconciler to only add Watch for
// these once.
watchingClusterInstall map[string]struct{}
// A TTLCache of clusterprovision creates each clusterdeployment expects to see
expectations controllerutils.ExpectationsInterface
// remoteClusterAPIClientBuilder is a function pointer to the function that gets a builder for building a client
// for the remote cluster's API server
remoteClusterAPIClientBuilder func(cd *hivev1.ClusterDeployment) remoteclient.Builder
// validateCredentialsForClusterDeployment is what this controller will call to validate
// that the platform creds are good (used for testing)
validateCredentialsForClusterDeployment func(client.Client, *hivev1.ClusterDeployment, log.FieldLogger) (bool, error)
// releaseImageVerifier, if provided, will be used to check an release image before it is executed.
// Any error will prevent a release image from being accessed.
releaseImageVerifier verify.Interface
protectedDelete bool
}
// Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read
// and what is in the ClusterDeployment.Spec
func (r *ReconcileClusterDeployment) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) {
cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName)
cdLog.Info("reconciling cluster deployment")
recobsrv := hivemetrics.NewReconcileObserver(ControllerName, cdLog)
defer recobsrv.ObserveControllerReconcileTime()
// Fetch the ClusterDeployment instance
cd := &hivev1.ClusterDeployment{}
err := r.Get(context.TODO(), request.NamespacedName, cd)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
cdLog.Info("cluster deployment Not Found")
r.expectations.DeleteExpectations(request.NamespacedName.String())
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
cdLog.WithError(err).Error("Error getting cluster deployment")
return reconcile.Result{}, err
}
// Ensure owner references are correctly set
err = controllerutils.ReconcileOwnerReferences(cd, generateOwnershipUniqueKeys(cd), r, r.scheme, r.logger)
if err != nil {
cdLog.WithError(err).Error("Error reconciling object ownership")
return reconcile.Result{}, err
}
// Initialize cluster deployment conditions if not set
newConditions := controllerutils.InitializeClusterDeploymentConditions(cd.Status.Conditions, clusterDeploymentConditions)
if len(newConditions) > len(cd.Status.Conditions) {
cd.Status.Conditions = newConditions
cdLog.Info("initializing cluster deployment controller conditions")
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster deployment status")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// Remove legacy conditions if present:
legacyConditions := []string{
"ClusterImageSetNotFound",
}
newConditions = []hivev1.ClusterDeploymentCondition{}
var removedLegacyConditions bool
for _, c := range cd.Status.Conditions {
isLegacy := false
for _, lc := range legacyConditions {
if string(c.Type) == lc {
isLegacy = true
break
}
}
if !isLegacy {
newConditions = append(newConditions, c)
} else {
cdLog.Infof("removing legacy condition: %v", c)
removedLegacyConditions = true
}
}
if removedLegacyConditions {
cd.Status.Conditions = newConditions
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster deployment status")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
return r.reconcile(request, cd, cdLog)
}
func (r *ReconcileClusterDeployment) SetWatcher(w controllerutils.Watcher) {
r.watcher = w
}
func generateOwnershipUniqueKeys(owner hivev1.MetaRuntimeObject) []*controllerutils.OwnershipUniqueKey {
return []*controllerutils.OwnershipUniqueKey{
{
TypeToList: &hivev1.ClusterProvisionList{},
LabelSelector: map[string]string{constants.ClusterDeploymentNameLabel: owner.GetName()},
Controlled: true,
},
{
TypeToList: &corev1.PersistentVolumeClaimList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.PVCTypeLabel: constants.PVCTypeInstallLogs,
},
Controlled: true,
},
{
TypeToList: &batchv1.JobList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.JobTypeLabel: constants.JobTypeImageSet,
},
Controlled: true,
},
{
TypeToList: &hivev1.ClusterDeprovisionList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
},
Controlled: true,
},
{
TypeToList: &hivev1.DNSZoneList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.DNSZoneTypeLabel: constants.DNSZoneTypeChild,
},
Controlled: true,
},
{
TypeToList: &corev1.SecretList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.SecretTypeLabel: constants.SecretTypeMergedPullSecret,
},
Controlled: true,
},
{
TypeToList: &corev1.SecretList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.SecretTypeLabel: constants.SecretTypeKubeConfig,
},
Controlled: false,
},
{
TypeToList: &corev1.SecretList{},
LabelSelector: map[string]string{
constants.ClusterDeploymentNameLabel: owner.GetName(),
constants.SecretTypeLabel: constants.SecretTypeKubeAdminCreds,
},
Controlled: false,
},
}
}
func (r *ReconcileClusterDeployment) addAdditionalKubeconfigCAs(cd *hivev1.ClusterDeployment,
cdLog log.FieldLogger) error {
adminKubeconfigSecret := &corev1.Secret{}
if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name}, adminKubeconfigSecret); err != nil {
cdLog.WithError(err).Error("failed to get admin kubeconfig secret")
return err
}
originalSecret := adminKubeconfigSecret.DeepCopy()
rawData, hasRawData := adminKubeconfigSecret.Data[constants.RawKubeconfigSecretKey]
if !hasRawData {
adminKubeconfigSecret.Data[constants.RawKubeconfigSecretKey] = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
rawData = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
}
var err error
adminKubeconfigSecret.Data[constants.KubeconfigSecretKey], err = controllerutils.AddAdditionalKubeconfigCAs(rawData)
if err != nil {
cdLog.WithError(err).Errorf("error adding additional CAs to admin kubeconfig")
return err
}
if reflect.DeepEqual(originalSecret.Data, adminKubeconfigSecret.Data) {
cdLog.Debug("secret data has not changed, no need to update")
return nil
}
cdLog.Info("admin kubeconfig has been modified, updating")
err = r.Update(context.TODO(), adminKubeconfigSecret)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating admin kubeconfig secret")
return err
}
return nil
}
func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (result reconcile.Result, returnErr error) {
// Set platform label on the ClusterDeployment
if platform := getClusterPlatform(cd); cd.Labels[hivev1.HiveClusterPlatformLabel] != platform {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterPlatformLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterPlatformLabel,
cd.Labels[hivev1.HiveClusterPlatformLabel], platform)
}
cd.Labels[hivev1.HiveClusterPlatformLabel] = platform
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster platform label")
}
return reconcile.Result{}, err
}
// Set region label on the ClusterDeployment
if region := getClusterRegion(cd); cd.Spec.Platform.BareMetal == nil && cd.Spec.Platform.AgentBareMetal == nil &&
cd.Labels[hivev1.HiveClusterRegionLabel] != region {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterRegionLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterRegionLabel,
cd.Labels[hivev1.HiveClusterRegionLabel], region)
}
cd.Labels[hivev1.HiveClusterRegionLabel] = region
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster region label")
}
return reconcile.Result{}, err
}
if cd.Spec.ManageDNS {
changed, err := r.ensureDNSZonePreserveOnDelete(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
if changed {
return reconcile.Result{}, nil
}
}
if cd.DeletionTimestamp != nil {
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
// Make sure we have no deprovision underway metric even though this was probably cleared when we
// removed the finalizer.
clearDeprovisionUnderwaySecondsMetric(cd, cdLog)
return reconcile.Result{}, nil
}
// Deprovision still underway, report metric for this cluster.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.DeletionTimestamp.Time).Seconds())
return r.syncDeletedClusterDeployment(cd, cdLog)
}
// Check for the delete-after annotation, and if the cluster has expired, delete it
deleteAfter, ok := cd.Annotations[deleteAfterAnnotation]
if ok {
cdLog.Debugf("found delete after annotation: %s", deleteAfter)
dur, err := time.ParseDuration(deleteAfter)
if err != nil {
cdLog.WithError(err).WithField("deleteAfter", deleteAfter).Infof("error parsing %s as a duration", deleteAfterAnnotation)
return reconcile.Result{}, fmt.Errorf("error parsing %s as a duration: %v", deleteAfterAnnotation, err)
}
if !cd.CreationTimestamp.IsZero() {
expiry := cd.CreationTimestamp.Add(dur)
cdLog.Debugf("cluster expires at: %s", expiry)
if time.Now().After(expiry) {
cdLog.WithField("expiry", expiry).Info("cluster has expired, issuing delete")
err := utils.SafeDelete(r, context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting expired cluster")
}
return reconcile.Result{}, err
}
defer func() {
// We have an expiry time but we're not expired yet. Set requeueAfter to the expiry time
// so that we requeue cluster for deletion once reconcile has completed
result, returnErr = controllerutils.EnsureRequeueAtLeastWithin(
time.Until(cd.CreationTimestamp.Add(dur)),
result,
returnErr,
)
}()
}
}
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
cdLog.Debugf("adding clusterdeployment finalizer")
if err := r.addClusterDeploymentFinalizer(cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer")
return reconcile.Result{}, err
}
metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
if cd.Spec.ManageDNS {
dnsZone, err := r.ensureManagedDNSZone(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
if dnsZone == nil {
// dnsNotReady condition was set.
if isSet, _ := isDNSNotReadyConditionSet(cd); isSet {
// dnsNotReadyReason is why the dnsNotReady condition was set, therefore requeue so that we check to see if it times out.
// add defaultRequeueTime to avoid the race condition where the controller is reconciled at the exact time of the timeout (unlikely, but possible).
return reconcile.Result{RequeueAfter: defaultDNSNotReadyTimeout + defaultRequeueTime}, nil
}
return reconcile.Result{}, nil
}
updated, err := r.setDNSDelayMetric(cd, dnsZone, cdLog)
if updated || err != nil {
return reconcile.Result{}, err
}
}
if cd.Spec.Installed {
// set installedTimestamp for adopted clusters
if cd.Status.InstalledTimestamp == nil {
cd.Status.InstalledTimestamp = &cd.ObjectMeta.CreationTimestamp
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not set cluster installed timestamp")
return reconcile.Result{Requeue: true}, nil
}
}
// update SyncSetFailedCondition status condition
cdLog.Debug("Check if any syncsets are failing")
if err := r.setSyncSetFailedCondition(cd, cdLog); err != nil {
cdLog.WithError(err).Error("Error updating SyncSetFailedCondition status condition")
return reconcile.Result{}, err
}
switch {
case cd.Spec.Provisioning != nil:
if r, err := r.reconcileInstalledClusterProvision(cd, cdLog); err != nil {
return r, err
}
}
if cd.Spec.ClusterMetadata != nil &&
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name != "" {
if err := r.addAdditionalKubeconfigCAs(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
// Add cluster deployment as additional owner reference to admin secrets
if err := r.addOwnershipToSecret(cd, cdLog, cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name); err != nil {
return reconcile.Result{}, err
}
if cd.Spec.ClusterMetadata.AdminPasswordSecretRef != nil && cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name != "" {
if err := r.addOwnershipToSecret(cd, cdLog, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name); err != nil {
return reconcile.Result{}, err
}
}
if cd.Status.WebConsoleURL == "" || cd.Status.APIURL == "" {
return r.setClusterStatusURLs(cd, cdLog)
}
}
return reconcile.Result{}, nil
}
// If the ClusterDeployment is being relocated to another Hive instance, stop any current provisioning and do not
// do any more reconciling.
switch _, relocateStatus, err := controllerutils.IsRelocating(cd); {
case err != nil:
return reconcile.Result{}, errors.Wrap(err, "could not determine relocate status")
case relocateStatus == hivev1.RelocateOutgoing:
result, err := r.stopProvisioning(cd, cdLog)
if result == nil {
result = &reconcile.Result{}
}
return *result, err
}
// Sanity check the platform/cloud credentials.
validCreds, err := r.validatePlatformCreds(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("unable to validate platform credentials")
return reconcile.Result{}, err
}
// Make sure the condition is set properly.
_, err = r.setAuthenticationFailure(cd, validCreds, cdLog)
if err != nil {
cdLog.WithError(err).Error("unable to update clusterdeployment")
return reconcile.Result{}, err
}
// If the platform credentials are no good, return error and go into backoff
authCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.AuthenticationFailureClusterDeploymentCondition)
if authCondition.Status == corev1.ConditionTrue {
authError := errors.New(authCondition.Message)
cdLog.WithError(authError).Error("cannot proceed with provision while platform credentials authentication is failing.")
return reconcile.Result{}, authError
}
imageSet, err := r.getClusterImageSet(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("failed to get cluster image set for the clusterdeployment")
return reconcile.Result{}, err
}
releaseImage := r.getReleaseImage(cd, imageSet, cdLog)
cdLog.Debug("loading pull secrets")
pullSecret, err := r.mergePullSecrets(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error merging pull secrets")
return reconcile.Result{}, err
}
// Update the pull secret object if required
switch updated, err := r.updatePullSecretInfo(pullSecret, cd, cdLog); {
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "Error updating the merged pull secret")
return reconcile.Result{}, err
case updated:
// The controller will not automatically requeue the cluster deployment
// since the controller is not watching for secrets. So, requeue manually.
return reconcile.Result{Requeue: true}, nil
}
// let's verify the release image before using it here.
if r.releaseImageVerifier != nil {
var releaseDigest string
if index := strings.LastIndex(releaseImage, "@"); index != -1 {
releaseDigest = releaseImage[index+1:]
}
cdLog.WithField("releaseImage", releaseImage).
WithField("releaseDigest", releaseDigest).Debugf("verifying the release image using %s", r.releaseImageVerifier)
err := r.releaseImageVerifier.Verify(context.TODO(), releaseDigest)
if err != nil {
cdLog.WithField("releaseImage", releaseImage).
WithField("releaseDigest", releaseDigest).
WithError(err).Error("Verification of release image failed")
return reconcile.Result{}, r.updateCondition(cd, hivev1.InstallImagesNotResolvedCondition, corev1.ConditionTrue, "ReleaseImageVerificationFailed", err.Error(), cdLog)
}
}
switch result, err := r.resolveInstallerImage(cd, releaseImage, cdLog); {
case err != nil:
return reconcile.Result{}, err
case result != nil:
return *result, nil
}
if !r.expectations.SatisfiedExpectations(request.String()) {
cdLog.Debug("waiting for expectations to be satisfied")
return reconcile.Result{}, nil
}
switch {
case cd.Spec.Provisioning != nil:
// Ensure the install config matches the ClusterDeployment:
// TODO: Move with the openshift-installer ClusterInstall code when we implement https://issues.redhat.com/browse/HIVE-1522
if cd.Spec.Provisioning.InstallConfigSecretRef == nil {
cdLog.Info("install config not specified")
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.RequirementsMetCondition,
corev1.ConditionFalse,
"InstallConfigRefNotSet",
"Install config reference is not set",
controllerutils.UpdateConditionIfReasonOrMessageChange)
if changed {
cd.Status.Conditions = conditions
return reconcile.Result{}, r.Status().Update(context.TODO(), cd)
}
return reconcile.Result{}, nil
}
icSecret := &corev1.Secret{}
err = r.Get(context.Background(),
types.NamespacedName{
Namespace: cd.Namespace,
Name: cd.Spec.Provisioning.InstallConfigSecretRef.Name,
},
icSecret)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "Error loading install config secret")
return reconcile.Result{}, err
}
err = ValidateInstallConfig(cd, icSecret.Data["install-config.yaml"])
if err != nil {
cdLog.WithError(err).Info("install config validation failed")
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.RequirementsMetCondition,
corev1.ConditionFalse,
"InstallConfigValidationFailed",
err.Error(),
controllerutils.UpdateConditionIfReasonOrMessageChange)
if changed {
cd.Status.Conditions = conditions
return reconcile.Result{}, r.Status().Update(context.TODO(), cd)
}
return reconcile.Result{}, nil
}
// If we made it this far, RequirementsMet condition should be True:
//
// TODO: when https://github.com/openshift/hive/pull/1413 is implemented
// we'll want to remove this assumption as ClusterInstall implementations may
// later indicate their requirements are not met. Instead, we should explicitly clear
// the condition back to Unknown if we see our Reason set, but the problem is no longer
// present.
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.RequirementsMetCondition,
corev1.ConditionTrue,
"AllRequirementsMet",
"All pre-provision requirements met",
controllerutils.UpdateConditionIfReasonOrMessageChange)
if changed {
cd.Status.Conditions = conditions
err = r.Status().Update(context.TODO(), cd)
if err != nil {
return reconcile.Result{}, err
}
}
return r.reconcileInstallingClusterProvision(cd, releaseImage, cdLog)
case cd.Spec.ClusterInstallRef != nil:
return r.reconcileInstallingClusterInstall(cd, cdLog)
default:
return reconcile.Result{}, errors.New("invalid provisioning configuration")
}
}
func (r *ReconcileClusterDeployment) reconcileInstallingClusterProvision(cd *hivev1.ClusterDeployment, releaseImage string, logger log.FieldLogger) (reconcile.Result, error) {
if cd.Status.ProvisionRef == nil {
return r.startNewProvision(cd, releaseImage, logger)
}
return r.reconcileExistingProvision(cd, logger)
}
func (r *ReconcileClusterDeployment) reconcileInstalledClusterProvision(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) {
// delete failed provisions which are more than 7 days old
existingProvisions, err := r.existingProvisions(cd, logger)
if err != nil {
return reconcile.Result{}, err
}
r.deleteOldFailedProvisions(existingProvisions, logger)
logger.Debug("cluster is already installed, no processing of provision needed")
r.cleanupInstallLogPVC(cd, logger)
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) reconcileInstallingClusterInstall(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) {
ref := cd.Spec.ClusterInstallRef
gvk := schema.GroupVersionKind{
Group: ref.Group,
Version: ref.Version,
Kind: ref.Kind,
}
if err := r.watchClusterInstall(gvk, logger); err != nil {
logger.WithField("gvk", gvk.String()).WithError(err).Error("failed to watch for cluster install contract")
return reconcile.Result{}, err
}
return r.reconcileExistingInstallingClusterInstall(cd, logger)
}
func isDNSNotReadyConditionSet(cd *hivev1.ClusterDeployment) (bool, *hivev1.ClusterDeploymentCondition) {
dnsNotReadyCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.DNSNotReadyCondition)
return dnsNotReadyCondition.Status == corev1.ConditionTrue &&
(dnsNotReadyCondition.Reason == dnsNotReadyReason || dnsNotReadyCondition.Reason == dnsNotReadyTimedoutReason),
dnsNotReadyCondition
}
func addEnvVarIfFound(name string, envVars []corev1.EnvVar) []corev1.EnvVar {
value, found := os.LookupEnv(name)
if !found {
return envVars
}
tmp := corev1.EnvVar{
Name: name,
Value: value,
}
return append(envVars, tmp)
}
// getReleaseImage looks for a a release image in clusterdeployment or its corresponding imageset in the following order:
// 1 - specified in the cluster deployment spec.images.releaseImage
// 2 - referenced in the cluster deployment spec.imageSet
func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string {
if cd.Spec.Provisioning != nil && cd.Spec.Provisioning.ReleaseImage != "" {
return cd.Spec.Provisioning.ReleaseImage
}
if imageSet != nil {
return imageSet.Spec.ReleaseImage
}
return ""
}
func (r *ReconcileClusterDeployment) getClusterImageSet(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.ClusterImageSet, error) {
imageSetKey := types.NamespacedName{}
switch {
case cd.Spec.Provisioning != nil:
imageSetKey.Name = getClusterImageSetFromProvisioning(cd)
if imageSetKey.Name == "" {
return nil, nil
}
case cd.Spec.ClusterInstallRef != nil:
isName, err := getClusterImageSetFromClusterInstall(r.Client, cd)
if err != nil {
return nil, err
}
imageSetKey.Name = isName
default:
cdLog.Warning("clusterdeployment references no clusterimageset")
if err := r.setReqsMetConditionImageSetNotFound(cd, "unknown", true, cdLog); err != nil {
return nil, err
}
}
imageSet := &hivev1.ClusterImageSet{}
err := r.Get(context.TODO(), imageSetKey, imageSet)
if apierrors.IsNotFound(err) {
cdLog.WithField("clusterimageset", imageSetKey.Name).
Warning("clusterdeployment references non-existent clusterimageset")
if err := r.setReqsMetConditionImageSetNotFound(cd, imageSetKey.Name, true, cdLog); err != nil {
return nil, err
}
return nil, err
}
if err != nil {
cdLog.WithError(err).WithField("clusterimageset", imageSetKey.Name).
Error("unexpected error retrieving clusterimageset")
return nil, err
}
if err := r.setReqsMetConditionImageSetNotFound(cd, imageSetKey.Name, false, cdLog); err != nil {
return nil, err
}
return imageSet, nil
}
func (r *ReconcileClusterDeployment) statusUpdate(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update clusterdeployment status")
}
return err
}
const (
imagesResolvedReason = "ImagesResolved"
imagesResolvedMsg = "Images required for cluster deployment installations are resolved"
)
func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDeployment, releaseImage string, cdLog log.FieldLogger) (*reconcile.Result, error) {
areImagesResolved := cd.Status.InstallerImage != nil && cd.Status.CLIImage != nil
jobKey := client.ObjectKey{Namespace: cd.Namespace, Name: imageset.GetImageSetJobName(cd.Name)}
jobLog := cdLog.WithField("job", jobKey.Name)
existingJob := &batchv1.Job{}
switch err := r.Get(context.Background(), jobKey, existingJob); {
// The job does not exist. If the images have been resolved, continue reconciling. Otherwise, create the job.
case apierrors.IsNotFound(err):
if areImagesResolved {
return nil, r.updateCondition(cd, hivev1.InstallImagesNotResolvedCondition, corev1.ConditionFalse, imagesResolvedReason, imagesResolvedMsg, cdLog)
}
job := imageset.GenerateImageSetJob(cd, releaseImage, controllerutils.InstallServiceAccountName,
os.Getenv("HTTP_PROXY"),
os.Getenv("HTTPS_PROXY"),
os.Getenv("NO_PROXY"))
cdLog.WithField("derivedObject", job.Name).Debug("Setting labels on derived object")
job.Labels = k8slabels.AddLabel(job.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
job.Labels = k8slabels.AddLabel(job.Labels, constants.JobTypeLabel, constants.JobTypeImageSet)
if err := controllerutil.SetControllerReference(cd, job, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on job")
return nil, err
}
jobLog.WithField("releaseImage", releaseImage).Info("creating imageset job")
err = controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error setting up service account and role")
return nil, err
}
if err := r.Create(context.TODO(), job); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating job")
return nil, err
}
// kickstartDuration calculates the delay between creation of cd and start of imageset job
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to imageset job seconds")
metricImageSetDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
return &reconcile.Result{}, nil
// There was an error getting the job. Return the error.
case err != nil:
jobLog.WithError(err).Error("cannot get job")
return nil, err
// The job exists and is in the process of getting deleted. If the images were resolved, then continue reconciling.
// If the images were not resolved, requeue and wait for the delete to complete.
case !existingJob.DeletionTimestamp.IsZero():
if areImagesResolved {
return nil, r.updateCondition(cd, hivev1.InstallImagesNotResolvedCondition, corev1.ConditionFalse, imagesResolvedReason, imagesResolvedMsg, cdLog)
}
jobLog.Debug("imageset job is being deleted. Will recreate once deleted")
return &reconcile.Result{RequeueAfter: defaultRequeueTime}, err
// If job exists and is finished, delete it. If the images were not resolved, then the job will be re-created.
case controllerutils.IsFinished(existingJob):
jobLog.WithField("successful", controllerutils.IsSuccessful(existingJob)).
Warning("Finished job found. Deleting.")
if err := r.Delete(
context.Background(),
existingJob,
client.PropagationPolicy(metav1.DeletePropagationForeground),
); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot delete imageset job")
return nil, err
}
if areImagesResolved {
return nil, r.updateCondition(cd, hivev1.InstallImagesNotResolvedCondition, corev1.ConditionFalse, imagesResolvedReason, imagesResolvedMsg, cdLog)
}
// the job has failed to update the images and therefore
// we need to update the InstallImagesResolvedCondition to reflect why it failed.
for _, jcond := range existingJob.Status.Conditions {
if jcond.Type != batchv1.JobFailed {
continue
}
msg := fmt.Sprintf("The job %s/%s to resolve the image failed because of (%s) %s",
existingJob.Namespace, existingJob.Name,
jcond.Reason, jcond.Message,
)
return &reconcile.Result{}, r.updateCondition(cd, hivev1.InstallImagesNotResolvedCondition, corev1.ConditionTrue, "JobToResolveImagesFailed", msg, cdLog)
}
return &reconcile.Result{}, nil
// The job exists and is in progress. Wait for the job to finish before doing any more reconciliation.
default:
jobLog.Debug("job exists and is in progress")
return &reconcile.Result{}, nil
}
}
func (r *ReconcileClusterDeployment) updateCondition(
cd *hivev1.ClusterDeployment,
ctype hivev1.ClusterDeploymentConditionType,
status corev1.ConditionStatus,
reason string,
message string,
cdLog log.FieldLogger) error {
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
ctype,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.Debugf("setting %s Condition to %v", ctype, status)
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setAuthenticationFailure(cd *hivev1.ClusterDeployment, authSuccessful bool, cdLog log.FieldLogger) (bool, error) {
var status corev1.ConditionStatus
var reason, message string
if authSuccessful {
status = corev1.ConditionFalse
reason = platformAuthSuccessReason
message = "Platform credentials passed authentication check"
} else {
status = corev1.ConditionTrue
reason = platformAuthFailureReason
message = "Platform credentials failed authentication check"
}
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.AuthenticationFailureClusterDeploymentCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if !changed {
return false, nil
}
cd.Status.Conditions = conditions
return changed, r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setReqsMetConditionImageSetNotFound(cd *hivev1.ClusterDeployment, name string, isNotFound bool, cdLog log.FieldLogger) error {
var changed bool
var conds []hivev1.ClusterDeploymentCondition
if isNotFound {
conds, changed = controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.RequirementsMetCondition,
corev1.ConditionFalse,
clusterImageSetNotFoundReason,
fmt.Sprintf("ClusterImageSet %s is not available", name),
controllerutils.UpdateConditionIfReasonOrMessageChange)
} else {
// Set the RequirementsMet condition status back to unknown if True and it's current reason matches
reqsCond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions,
hivev1.RequirementsMetCondition)
if reqsCond.Status == corev1.ConditionFalse && reqsCond.Reason == clusterImageSetNotFoundReason {
conds, changed = controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.RequirementsMetCondition,
corev1.ConditionUnknown,
clusterImageSetFoundReason,
fmt.Sprintf("ClusterImageSet %s is available", name),
controllerutils.UpdateConditionIfReasonOrMessageChange)
}
}
if !changed {
return nil
}
cd.Status.Conditions = conds
reqsCond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions,
hivev1.RequirementsMetCondition)
cdLog.Infof("updating RequirementsMetCondition: status=%s reason=%s", reqsCond.Status, reqsCond.Reason)
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update status conditions")
}
return err
}
// setClusterStatusURLs fetches the openshift console route from the remote cluster and uses it to determine
// the correct APIURL and WebConsoleURL, and then set them in the Status. Typically only called if these Status fields
// are unset.
func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
server, err := remoteclient.InitialURL(r.Client, cd)
if err != nil {
cdLog.WithError(err).Error("could not get API URL from kubeconfig")
return reconcile.Result{}, err
}
cdLog.Debugf("found cluster API URL in kubeconfig: %s", server)
cd.Status.APIURL = server
remoteClient, unreachable, requeue := remoteclient.ConnectToRemoteCluster(
cd,
r.remoteClusterAPIClientBuilder(cd),
r.Client,
cdLog,
)
if unreachable {
return reconcile.Result{Requeue: requeue}, nil
}
routeObject := &routev1.Route{}
if err := remoteClient.Get(
context.Background(),
client.ObjectKey{Namespace: "openshift-console", Name: "console"},
routeObject,
); err != nil {
cdLog.WithError(err).Info("error fetching remote route object")
return reconcile.Result{Requeue: true}, nil
}
cdLog.Debugf("read remote route object: %s", routeObject)
cd.Status.WebConsoleURL = "https://" + routeObject.Spec.Host
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not set cluster status URLs")
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, nil
}
// ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone
// linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted.
// Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it
// not working as intended.
func (r *ReconcileClusterDeployment) ensureManagedDNSZoneDeleted(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (gone bool, returnErr error) {
if !cd.Spec.ManageDNS {
return true, nil
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
switch err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone); {
case apierrors.IsNotFound(err):
cdLog.Debug("dnszone has been removed from storage")
return true, nil
case err != nil:
cdLog.WithError(err).Error("error looking up managed dnszone")
return false, err
case !dnsZone.DeletionTimestamp.IsZero():
cdLog.Debug("dnszone has been deleted but is still in storage")
return false, nil
}
if err := r.Delete(context.TODO(), dnsZone); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting managed dnszone")
return false, err
}
return false, nil
}
func (r *ReconcileClusterDeployment) ensureClusterDeprovisioned(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (deprovisioned bool, returnErr error) {
// Skips/terminates deprovision if PreserveOnDelete is true. If there is ongoing deprovision we abandon it
if cd.Spec.PreserveOnDelete {
cdLog.Warn("skipping/terminating deprovision for cluster due to PreserveOnDelete=true, removing finalizer")
return true, nil
}
if cd.Spec.ClusterMetadata == nil {
cdLog.Warn("skipping uninstall for cluster that never had clusterID set")
return true, nil
}
// We do not yet support deprovision for BareMetal, for now skip deprovision and remove finalizer.
if cd.Spec.Platform.BareMetal != nil {
cdLog.Info("skipping deprovision for BareMetal cluster, removing finalizer")
return true, nil
}
if cd.Spec.ClusterInstallRef != nil {
cdLog.Info("skipping deprovision as it should be done by deleting the obj in cluster install reference")
return true, nil
}
// Generate a deprovision request
request, err := generateDeprovision(cd)
if err != nil {
cdLog.WithError(err).Error("error generating deprovision request")
return false, err
}
cdLog.WithField("derivedObject", request.Name).Debug("Setting label on derived object")
request.Labels = k8slabels.AddLabel(request.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
err = controllerutil.SetControllerReference(cd, request, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on deprovision request: %v", err)
return false, err
}
// Check if deprovision request already exists:
existingRequest := &hivev1.ClusterDeprovision{}
switch err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Name, Namespace: cd.Namespace}, existingRequest); {
case apierrors.IsNotFound(err):
cdLog.Info("creating deprovision request for cluster deployment")
switch err = r.Create(context.TODO(), request); {
case apierrors.IsAlreadyExists(err):
cdLog.Info("deprovision request already exists")
return false, nil
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating deprovision request")
// Check if namespace is terminated, if so we can give up, remove the finalizer, and let
// the cluster go away.
ns := &corev1.Namespace{}
err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Namespace}, ns)
if err != nil {
cdLog.WithError(err).Error("error checking for deletionTimestamp on namespace")
return false, err
}
if ns.DeletionTimestamp != nil {
cdLog.Warn("detected a namespace deleted before deprovision request could be created, giving up on deprovision and removing finalizer")
return true, err
}
return false, err
default:
// Successfully created the ClusterDeprovision. Update the Provisioned CD status condition accordingly.
return false, r.updateCondition(cd,
hivev1.ProvisionedCondition,
corev1.ConditionFalse,
hivev1.DeprovisioningProvisionedReason,
"Cluster is being deprovisioned",
cdLog)
}
case err != nil:
cdLog.WithError(err).Error("error getting deprovision request")
return false, err
}
authenticationFailureCondition := controllerutils.FindClusterDeprovisionCondition(existingRequest.Status.Conditions, hivev1.AuthenticationFailureClusterDeprovisionCondition)
if authenticationFailureCondition != nil {
var conds []hivev1.ClusterDeploymentCondition
var changed1, changed2, authFailure bool
conds, changed1 = controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.DeprovisionLaunchErrorCondition,
authenticationFailureCondition.Status,
authenticationFailureCondition.Reason,
authenticationFailureCondition.Message,
controllerutils.UpdateConditionIfReasonOrMessageChange)
if authenticationFailureCondition.Status == corev1.ConditionTrue {
authFailure = true
conds, changed2 = controllerutils.SetClusterDeploymentConditionWithChangeCheck(
conds,
hivev1.ProvisionedCondition,
corev1.ConditionFalse,
hivev1.DeprovisionFailedProvisionedReason,
"Cluster deprovision failed",
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
}
if changed1 || changed2 {
cd.Status.Conditions = conds
return false, r.Status().Update(context.TODO(), cd)
}
if authFailure {
// We get here if we had already set Provisioned to DeprovisionFailed
return false, nil
}
}
if !existingRequest.Status.Completed {
cdLog.Debug("deprovision request not yet completed")
return false, r.updateCondition(
cd,
hivev1.ProvisionedCondition,
corev1.ConditionFalse,
hivev1.DeprovisioningProvisionedReason,
"Cluster is deprovisioning",
cdLog,
)
}
// Deprovision succeeded
return true, r.updateCondition(
cd,
hivev1.ProvisionedCondition,
corev1.ConditionFalse,
hivev1.DeprovisionedProvisionedReason,
"Cluster is deprovisioned",
cdLog,
)
}
func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
switch _, relocateStatus, err := controllerutils.IsRelocating(cd); {
case err != nil:
cdLog.WithError(err).Error("could not determine relocate status")
return reconcile.Result{}, errors.Wrap(err, "could not determine relocate status")
case relocateStatus == hivev1.RelocateComplete:
cdLog.Info("clusterdeployment relocated, removing finalizer")
err := r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
case relocateStatus != "":
cdLog.Debug("ClusterDeployment is in the middle of a relocate. Wait until relocate has been completed or aborted before doing finalization.")
return reconcile.Result{}, nil
}
if controllerutils.IsDeleteProtected(cd) {
cdLog.Error("deprovision blocked for ClusterDeployment with protected delete on")
return reconcile.Result{}, nil
}
dnsZoneGone, err := r.ensureManagedDNSZoneDeleted(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
// Wait for outstanding provision to be removed before creating deprovision request
switch result, err := r.stopProvisioning(cd, cdLog); {
case result != nil:
return *result, err
case err != nil:
return reconcile.Result{}, err
}
deprovisioned, err := r.ensureClusterDeprovisioned(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
switch {
case !deprovisioned:
return reconcile.Result{}, nil
case !dnsZoneGone:
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
default:
cdLog.Infof("DNSZone gone and deprovision request completed, removing finalizer")
if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
}
func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision)
return r.Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
cd = cd.DeepCopy()
controllerutils.DeleteFinalizer(cd, hivev1.FinalizerDeprovision)
if err := r.Update(context.TODO(), cd); err != nil {
return err
}
clearDeprovisionUnderwaySecondsMetric(cd, cdLog)
// Increment the clusters deleted counter:
metricClustersDeleted.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return nil
}
// setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation
// to when the dnszone became ready, and set a metric to report the delay.
// Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered.
func (r *ReconcileClusterDeployment) setDNSDelayMetric(cd *hivev1.ClusterDeployment, dnsZone *hivev1.DNSZone, cdLog log.FieldLogger) (bool, error) {
modified := false
initializeAnnotations(cd)
if _, ok := cd.Annotations[dnsReadyAnnotation]; ok {
// already have recorded the dnsdelay metric
return modified, nil
}
readyTimestamp := dnsReadyTransitionTime(dnsZone)
if readyTimestamp == nil {
msg := "did not find timestamp for when dnszone became ready"
cdLog.WithField("dnszone", dnsZone.Name).Error(msg)
return modified, fmt.Errorf(msg)
}
dnsDelayDuration := readyTimestamp.Sub(cd.CreationTimestamp.Time)
cdLog.WithField("duration", dnsDelayDuration.Seconds()).Info("DNS ready")
cd.Annotations[dnsReadyAnnotation] = dnsDelayDuration.String()
if err := r.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to save annotation marking DNS becoming ready")
return modified, err
}
modified = true
metricDNSDelaySeconds.Observe(float64(dnsDelayDuration.Seconds()))
return modified, nil
}
// ensureDNSZonePreserveOnDelete makes sure the DNSZone, if one exists, has a
// matching PreserveOnDelete setting with its ClusterDeployment. Returns true
// if a change was made.
func (r *ReconcileClusterDeployment) ensureDNSZonePreserveOnDelete(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
logger := cdLog.WithField("zone", dnsZoneNamespacedName.String())
err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone)
if err != nil && !apierrors.IsNotFound(err) {
logger.WithError(err).Error("failed to fetch DNS zone")
return false, err
} else if err != nil {
// DNSZone doesn't exist yet
return false, nil
}
if dnsZone.Spec.PreserveOnDelete != cd.Spec.PreserveOnDelete {
logger.WithField("preserveOnDelete", cd.Spec.PreserveOnDelete).Info("setting DNSZone PreserveOnDelete to match ClusterDeployment PreserveOnDelete")
dnsZone.Spec.PreserveOnDelete = cd.Spec.PreserveOnDelete
err := r.Update(context.TODO(), dnsZone)
if err != nil {
logger.WithError(err).Log(controllerutils.LogLevel(err), "error updating DNSZone")
return false, err
}
return true, nil
}
return false, nil
}
func (r *ReconcileClusterDeployment) ensureManagedDNSZone(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.DNSZone, error) {
switch p := cd.Spec.Platform; {
case p.AWS != nil:
case p.GCP != nil:
case p.Azure != nil:
default:
cdLog.Error("cluster deployment platform does not support managed DNS")
if err := r.updateCondition(cd, hivev1.DNSNotReadyCondition, corev1.ConditionTrue, dnsUnsupportedPlatformReason, "Managed DNS is not supported on specified platform", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition for DNSUnsupportedPlatform reason")
return nil, err
}
return nil, errors.New("managed DNS not supported on platform")
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
logger := cdLog.WithField("zone", dnsZoneNamespacedName.String())
switch err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone); {
case apierrors.IsNotFound(err):
logger.Info("creating new DNSZone for cluster deployment")
return nil, r.createManagedDNSZone(cd, logger)
case err != nil:
logger.WithError(err).Error("failed to fetch DNS zone")
return nil, err
}
if !metav1.IsControlledBy(dnsZone, cd) {
cdLog.Error("DNS zone already exists but is not owned by cluster deployment")
if err := r.updateCondition(cd, hivev1.DNSNotReadyCondition, corev1.ConditionTrue, dnsZoneResourceConflictReason, "Existing DNS zone not owned by cluster deployment", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, errors.New("Existing unowned DNS zone")
}
availableCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
insufficientCredentialsCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.InsufficientCredentialsCondition)
authenticationFailureCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.AuthenticationFailureCondition)
apiOptInRequiredCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.APIOptInRequiredCondition)
dnsErrorCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.GenericDNSErrorsCondition)
var (
status corev1.ConditionStatus
reason, message string
)
switch {
case availableCondition != nil && availableCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionFalse
reason = dnsReadyReason
message = "DNS Zone available"
case insufficientCredentialsCondition != nil && insufficientCredentialsCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionTrue
reason = "InsufficientCredentials"
message = insufficientCredentialsCondition.Message
case authenticationFailureCondition != nil && authenticationFailureCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionTrue
reason = "AuthenticationFailure"
message = authenticationFailureCondition.Message
case apiOptInRequiredCondition != nil && apiOptInRequiredCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionTrue
reason = "APIOptInRequiredForDNS"
message = apiOptInRequiredCondition.Message
case dnsErrorCondition != nil && dnsErrorCondition.Status == corev1.ConditionTrue:
status = corev1.ConditionTrue
reason = dnsErrorCondition.Reason
message = dnsErrorCondition.Message
default:
status = corev1.ConditionTrue
reason = dnsNotReadyReason
message = "DNS Zone not yet available"
isDNSNotReadyConditionSet, dnsNotReadyCondition := isDNSNotReadyConditionSet(cd)
if isDNSNotReadyConditionSet {
// Timeout if it has been in this state for longer than allowed.
timeSinceLastTransition := time.Since(dnsNotReadyCondition.LastTransitionTime.Time)
if timeSinceLastTransition >= defaultDNSNotReadyTimeout {
// We've timed out, set the dnsNotReadyTimedoutReason for the DNSNotReady condition
cdLog.WithField("timeout", defaultDNSNotReadyTimeout).Warn("Timed out waiting on managed dns creation")
reason = dnsNotReadyTimedoutReason
message = "DNS Zone timed out in DNSNotReady state"
}
}
}
if err := r.updateCondition(cd, hivev1.DNSNotReadyCondition, status, reason, message, cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
if reason != dnsReadyReason {
return nil, nil
}
return dnsZone, nil
}
func (r *ReconcileClusterDeployment) createManagedDNSZone(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error {
dnsZone := &hivev1.DNSZone{
ObjectMeta: metav1.ObjectMeta{
Name: controllerutils.DNSZoneName(cd.Name),
Namespace: cd.Namespace,
},
Spec: hivev1.DNSZoneSpec{
Zone: cd.Spec.BaseDomain,
PreserveOnDelete: cd.Spec.PreserveOnDelete,
LinkToParentDomain: true,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
additionalTags := make([]hivev1.AWSResourceTag, 0, len(cd.Spec.Platform.AWS.UserTags))
for k, v := range cd.Spec.Platform.AWS.UserTags {
additionalTags = append(additionalTags, hivev1.AWSResourceTag{Key: k, Value: v})
}
region := ""
if strings.HasPrefix(cd.Spec.Platform.AWS.Region, constants.AWSChinaRegionPrefix) {
region = constants.AWSChinaRoute53Region
}
dnsZone.Spec.AWS = &hivev1.AWSDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.AWS.CredentialsSecretRef,
CredentialsAssumeRole: cd.Spec.Platform.AWS.CredentialsAssumeRole,
AdditionalTags: additionalTags,
Region: region,
}
case cd.Spec.Platform.GCP != nil:
dnsZone.Spec.GCP = &hivev1.GCPDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.GCP.CredentialsSecretRef,
}
case cd.Spec.Platform.Azure != nil:
dnsZone.Spec.Azure = &hivev1.AzureDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.Azure.CredentialsSecretRef,
ResourceGroupName: cd.Spec.Platform.Azure.BaseDomainResourceGroupName,
}
}
logger.WithField("derivedObject", dnsZone.Name).Debug("Setting labels on derived object")
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.DNSZoneTypeLabel, constants.DNSZoneTypeChild)
if err := controllerutil.SetControllerReference(cd, dnsZone, r.scheme); err != nil {
logger.WithError(err).Error("error setting controller reference on dnszone")
return err
}
err := r.Create(context.TODO(), dnsZone)
if err != nil {
logger.WithError(err).Log(controllerutils.LogLevel(err), "cannot create DNS zone")
return err
}
logger.Info("dns zone created")
return nil
}
func selectorPodWatchHandler(a client.Object) []reconcile.Request {
retval := []reconcile.Request{}
pod := a.(*corev1.Pod)
if pod == nil {
// Wasn't a Pod, bail out. This should not happen.
log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a)
return retval
}
if pod.Labels == nil {
return retval
}
cdName, ok := pod.Labels[constants.ClusterDeploymentNameLabel]
if !ok {
return retval
}
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Name: cdName,
Namespace: pod.Namespace,
}})
return retval
}
// GetInstallLogsPVCName returns the expected name of the persistent volume claim for cluster install failure logs.
// TODO: Remove this function and all calls to it. It's being left here for compatibility until the install log PVs are removed from all the installs.
func GetInstallLogsPVCName(cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, "install-logs")
}
// cleanupInstallLogPVC will immediately delete the PVC (should it exist) if the cluster was installed successfully, without retries.
// If there were retries, it will delete the PVC if it has been more than 7 days since the job was completed.
// TODO: Remove this function and all calls to it. It's being left here for compatibility until the install log PVs are removed from all the installs.
func (r *ReconcileClusterDeployment) cleanupInstallLogPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
if !cd.Spec.Installed {
return nil
}
pvc := &corev1.PersistentVolumeClaim{}
err := r.Get(context.TODO(), types.NamespacedName{Name: GetInstallLogsPVCName(cd), Namespace: cd.Namespace}, pvc)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
cdLog.WithError(err).Error("error looking up install logs PVC")
return err
}
// Also check if we've already deleted it, pvc won't be deleted until the install pod is, and that is retained
// for one day.
if pvc.DeletionTimestamp != nil {
return nil
}
pvcLog := cdLog.WithField("pvc", pvc.Name)
switch {
case cd.Status.InstallRestarts == 0:
pvcLog.Info("deleting logs PersistentVolumeClaim for installed cluster with no restarts")
case cd.Status.InstalledTimestamp == nil:
pvcLog.Warn("deleting logs PersistentVolumeClaim for cluster with errors but no installed timestamp")
// Otherwise, delete if more than 7 days have passed.
case time.Since(cd.Status.InstalledTimestamp.Time) > (7 * 24 * time.Hour):
pvcLog.Info("deleting logs PersistentVolumeClaim for cluster that was installed after restarts more than 7 days ago")
default:
cdLog.WithField("pvc", pvc.Name).Debug("preserving logs PersistentVolumeClaim for cluster with install restarts for 7 days")
return nil
}
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting install logs PVC")
return err
}
return nil
}
func generateDeprovision(cd *hivev1.ClusterDeployment) (*hivev1.ClusterDeprovision, error) {
req := &hivev1.ClusterDeprovision{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Namespace: cd.Namespace,
},
Spec: hivev1.ClusterDeprovisionSpec{
InfraID: cd.Spec.ClusterMetadata.InfraID,
ClusterID: cd.Spec.ClusterMetadata.ClusterID,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
req.Spec.Platform.AWS = &hivev1.AWSClusterDeprovision{
Region: cd.Spec.Platform.AWS.Region,
CredentialsSecretRef: &cd.Spec.Platform.AWS.CredentialsSecretRef,
CredentialsAssumeRole: cd.Spec.Platform.AWS.CredentialsAssumeRole,
}
case cd.Spec.Platform.Azure != nil:
req.Spec.Platform.Azure = &hivev1.AzureClusterDeprovision{
CredentialsSecretRef: &cd.Spec.Platform.Azure.CredentialsSecretRef,
CloudName: &cd.Spec.Platform.Azure.CloudName,
}
case cd.Spec.Platform.GCP != nil:
req.Spec.Platform.GCP = &hivev1.GCPClusterDeprovision{
Region: cd.Spec.Platform.GCP.Region,
CredentialsSecretRef: &cd.Spec.Platform.GCP.CredentialsSecretRef,
}
case cd.Spec.Platform.OpenStack != nil:
req.Spec.Platform.OpenStack = &hivev1.OpenStackClusterDeprovision{
Cloud: cd.Spec.Platform.OpenStack.Cloud,
CredentialsSecretRef: &cd.Spec.Platform.OpenStack.CredentialsSecretRef,
CertificatesSecretRef: cd.Spec.Platform.OpenStack.CertificatesSecretRef,
}
case cd.Spec.Platform.VSphere != nil:
req.Spec.Platform.VSphere = &hivev1.VSphereClusterDeprovision{
CredentialsSecretRef: cd.Spec.Platform.VSphere.CredentialsSecretRef,
CertificatesSecretRef: cd.Spec.Platform.VSphere.CertificatesSecretRef,
VCenter: cd.Spec.Platform.VSphere.VCenter,
}
case cd.Spec.Platform.Ovirt != nil:
req.Spec.Platform.Ovirt = &hivev1.OvirtClusterDeprovision{
CredentialsSecretRef: cd.Spec.Platform.Ovirt.CredentialsSecretRef,
CertificatesSecretRef: cd.Spec.Platform.Ovirt.CertificatesSecretRef,
ClusterID: cd.Spec.Platform.Ovirt.ClusterID,
}
default:
return nil, errors.New("unsupported cloud provider for deprovision")
}
return req, nil
}
func generatePullSecretObj(pullSecret string, pullSecretName string, cd *hivev1.ClusterDeployment) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: pullSecretName,
Namespace: cd.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: pullSecret,
},
}
}
func dnsReadyTransitionTime(dnsZone *hivev1.DNSZone) *time.Time {
readyCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if readyCondition != nil && readyCondition.Status == corev1.ConditionTrue {
return &readyCondition.LastTransitionTime.Time
}
return nil
}
func clearDeprovisionUnderwaySecondsMetric(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) {
cleared := hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.Delete(map[string]string{
"cluster_deployment": cd.Name,
"namespace": cd.Namespace,
"cluster_type": hivemetrics.GetClusterDeploymentType(cd),
})
if cleared {
cdLog.Debug("cleared metric: %v", hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds)
}
}
// initializeAnnotations() initializes the annotations if it is not already
func initializeAnnotations(cd *hivev1.ClusterDeployment) {
if cd.Annotations == nil {
cd.Annotations = map[string]string{}
}
}
// mergePullSecrets merges the global pull secret JSON (if defined) with the cluster's pull secret JSON (if defined)
// An error will be returned if neither is defined
func (r *ReconcileClusterDeployment) mergePullSecrets(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (string, error) {
var localPullSecret string
var err error
// For code readability let's call the pull secret in cluster deployment config as local pull secret
if cd.Spec.PullSecretRef != nil {
localPullSecret, err = controllerutils.LoadSecretData(r.Client, cd.Spec.PullSecretRef.Name, cd.Namespace, corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "local pull secret could not be retrieved")
}
}
// Check if global pull secret from env as it comes from hive config
globalPullSecretName := os.Getenv(constants.GlobalPullSecret)
var globalPullSecret string
if len(globalPullSecretName) != 0 {
globalPullSecret, err = controllerutils.LoadSecretData(r.Client, globalPullSecretName, controllerutils.GetHiveNamespace(), corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "global pull secret could not be retrieved")
}
}
switch {
case globalPullSecret != "" && localPullSecret != "":
// Merge local pullSecret and globalPullSecret. If both pull secrets have same registry name
// then the merged pull secret will have registry secret from local pull secret
pullSecret, err := controllerutils.MergeJsons(globalPullSecret, localPullSecret, cdLog)
if err != nil {
errMsg := "unable to merge global pull secret with local pull secret"
cdLog.WithError(err).Error(errMsg)
return "", errors.Wrap(err, errMsg)
}
return pullSecret, nil
case globalPullSecret != "":
return globalPullSecret, nil
case localPullSecret != "":
return localPullSecret, nil
default:
errMsg := "clusterdeployment must specify pull secret since hiveconfig does not specify a global pull secret"
cdLog.Error(errMsg)
return "", errors.New(errMsg)
}
}
// updatePullSecretInfo creates or updates the merged pull secret for the clusterdeployment.
// It returns true when the merged pull secret has been created or updated.
func (r *ReconcileClusterDeployment) updatePullSecretInfo(pullSecret string, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
var err error
pullSecretObjExists := true
existingPullSecretObj := &corev1.Secret{}
mergedSecretName := constants.GetMergedPullSecretName(cd)
err = r.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
if apierrors.IsNotFound(err) {
cdLog.Info("Existing pull secret object not found")
pullSecretObjExists = false
} else {
return false, errors.Wrap(err, "Error getting pull secret from cluster deployment")
}
}
if pullSecretObjExists {
existingPullSecret, ok := existingPullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
return false, fmt.Errorf("Pull secret %s did not contain key %s", mergedSecretName, corev1.DockerConfigJsonKey)
}
if string(existingPullSecret) == pullSecret {
cdLog.Debug("Existing and the new merged pull secret are same")
return false, nil
}
cdLog.Info("Existing merged pull secret hash did not match with latest merged pull secret")
existingPullSecretObj.Data[corev1.DockerConfigJsonKey] = []byte(pullSecret)
err = r.Update(context.TODO(), existingPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error updating merged pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Updated the merged pull secret object successfully")
} else {
// create a new pull secret object
newPullSecretObj := generatePullSecretObj(
pullSecret,
mergedSecretName,
cd,
)
cdLog.WithField("derivedObject", newPullSecretObj.Name).Debug("Setting labels on derived object")
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.SecretTypeLabel, constants.SecretTypeMergedPullSecret)
err = controllerutil.SetControllerReference(cd, newPullSecretObj, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on new merged pull secret: %v", err)
return false, err
}
err = r.Create(context.TODO(), newPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error creating new pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Created the merged pull secret object successfully")
}
return true, nil
}
func calculateNextProvisionTime(failureTime time.Time, retries int, cdLog log.FieldLogger) time.Time {
// (2^currentRetries) * 60 seconds up to a max of 24 hours.
const sleepCap = 24 * time.Hour
const retryCap = 11 // log_2_(24*60)
if retries >= retryCap {
return failureTime.Add(sleepCap)
}
return failureTime.Add((1 << uint(retries)) * time.Minute)
}
func (r *ReconcileClusterDeployment) existingProvisions(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) ([]*hivev1.ClusterProvision, error) {
provisionList := &hivev1.ClusterProvisionList{}
if err := r.List(
context.TODO(),
provisionList,
client.InNamespace(cd.Namespace),
client.MatchingLabels(map[string]string{constants.ClusterDeploymentNameLabel: cd.Name}),
); err != nil {
cdLog.WithError(err).Warn("could not list provisions for clusterdeployment")
return nil, err
}
provisions := make([]*hivev1.ClusterProvision, len(provisionList.Items))
for i := range provisionList.Items {
provisions[i] = &provisionList.Items[i]
}
return provisions, nil
}
func (r *ReconcileClusterDeployment) getFirstProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) *hivev1.ClusterProvision {
provisions, err := r.existingProvisions(cd, cdLog)
if err != nil {
return nil
}
for _, provision := range provisions {
if provision.Spec.Attempt == 0 {
return provision
}
}
cdLog.Warn("could not find the first provision for clusterdeployment")
return nil
}
func (r *ReconcileClusterDeployment) adoptProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) error {
pLog := cdLog.WithField("provision", provision.Name)
cd.Status.ProvisionRef = &corev1.LocalObjectReference{Name: provision.Name}
if cd.Status.InstallStartedTimestamp == nil {
n := metav1.Now()
cd.Status.InstallStartedTimestamp = &n
}
if err := r.Status().Update(context.TODO(), cd); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "could not adopt provision")
return err
}
pLog.Info("adopted provision")
return nil
}
func (r *ReconcileClusterDeployment) deleteStaleProvisions(provs []*hivev1.ClusterProvision, cdLog log.FieldLogger) {
// Cap the number of existing provisions. Always keep the earliest provision as
// it is used to determine the total time that it took to install. Take off
// one extra to make room for the new provision being started.
amountToDelete := len(provs) - maxProvisions
if amountToDelete <= 0 {
return
}
cdLog.Infof("Deleting %d old provisions", amountToDelete)
sort.Slice(provs, func(i, j int) bool { return provs[i].Spec.Attempt < provs[j].Spec.Attempt })
for _, provision := range provs[1 : amountToDelete+1] {
pLog := cdLog.WithField("provision", provision.Name)
pLog.Info("Deleting old provision")
if err := r.Delete(context.TODO(), provision); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to delete old provision")
}
}
}
// deleteOldFailedProvisions deletes the failed provisions which are more than 7 days old
func (r *ReconcileClusterDeployment) deleteOldFailedProvisions(provs []*hivev1.ClusterProvision, cdLog log.FieldLogger) {
cdLog.Debugf("Deleting failed provisions which are more than 7 days old")
for _, provision := range provs {
if provision.Spec.Stage == hivev1.ClusterProvisionStageFailed && time.Since(provision.CreationTimestamp.Time) > (7*24*time.Hour) {
pLog := cdLog.WithField("provision", provision.Name)
pLog.Info("Deleting failed provision")
if err := r.Delete(context.TODO(), provision); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to delete failed provision")
}
}
}
}
// validatePlatformCreds ensure the platform/cloud credentials are at least good enough to authenticate with
func (r *ReconcileClusterDeployment) validatePlatformCreds(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (bool, error) {
return r.validateCredentialsForClusterDeployment(r.Client, cd, logger)
}
// checkForFailedSync returns true if it finds that the ClusterSync has the Failed condition set
func checkForFailedSync(clusterSync *hiveintv1alpha1.ClusterSync) bool {
for _, cond := range clusterSync.Status.Conditions {
if cond.Type == hiveintv1alpha1.ClusterSyncFailed {
return cond.Status == corev1.ConditionTrue
}
}
return false
}
// setSyncSetFailedCondition updates the hivev1.SyncSetFailedCondition
func (r *ReconcileClusterDeployment) setSyncSetFailedCondition(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
var (
status corev1.ConditionStatus
reason, message string
)
clusterSync := &hiveintv1alpha1.ClusterSync{}
switch err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}, clusterSync); {
case apierrors.IsNotFound(err):
if paused, err := strconv.ParseBool(cd.Annotations[constants.SyncsetPauseAnnotation]); err == nil && paused {
cdLog.Info("SyncSet is paused. ClusterSync will not be created")
status = corev1.ConditionTrue
reason = "SyncSetPaused"
message = "SyncSet is paused. ClusterSync will not be created"
} else {
cdLog.Info("ClusterSync has not yet been created")
status = corev1.ConditionTrue
reason = "MissingClusterSync"
message = "ClusterSync has not yet been created"
}
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not get ClusterSync")
return err
case checkForFailedSync(clusterSync):
status = corev1.ConditionTrue
reason = "SyncSetApplyFailure"
message = "One of the SyncSet applies has failed"
default:
status = corev1.ConditionFalse
reason = "SyncSetApplySuccess"
message = "SyncSet apply is successful"
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.SyncSetFailedCondition,
status,
reason,
message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if !changed {
return nil
}
cd.Status.Conditions = conds
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating syncset failed condition")
return err
}
return nil
}
// addOwnershipToSecret adds cluster deployment as an additional non-controlling owner to secret
func (r *ReconcileClusterDeployment) addOwnershipToSecret(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger, name string) error {
cdLog = cdLog.WithField("secret", name)
secret := &corev1.Secret{}
if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: name}, secret); err != nil {
cdLog.WithError(err).Error("failed to get secret")
return err
}
labelAdded := false
// Add the label for cluster deployment for reconciling later, and add the owner reference
if secret.Labels[constants.ClusterDeploymentNameLabel] != cd.Name {
cdLog.Debug("Setting label on derived object")
secret.Labels = k8slabels.AddLabel(secret.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
labelAdded = true
}
cdRef := metav1.OwnerReference{
APIVersion: cd.APIVersion,
Kind: cd.Kind,
Name: cd.Name,
UID: cd.UID,
BlockOwnerDeletion: pointer.BoolPtr(true),
}
cdRefChanged := librarygocontroller.EnsureOwnerRef(secret, cdRef)
if cdRefChanged {
cdLog.Debug("ownership added for cluster deployment")
}
if cdRefChanged || labelAdded {
cdLog.Info("secret has been modified, updating")
if err := r.Update(context.TODO(), secret); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating secret")
return err
}
}
return nil
}
// getClusterPlatform returns the platform of a given ClusterDeployment
func getClusterPlatform(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return constants.PlatformAWS
case cd.Spec.Platform.Azure != nil:
return constants.PlatformAzure
case cd.Spec.Platform.GCP != nil:
return constants.PlatformGCP
case cd.Spec.Platform.OpenStack != nil:
return constants.PlatformOpenStack
case cd.Spec.Platform.VSphere != nil:
return constants.PlatformVSphere
case cd.Spec.Platform.BareMetal != nil:
return constants.PlatformBaremetal
case cd.Spec.Platform.AgentBareMetal != nil:
return constants.PlatformAgentBaremetal
}
return constants.PlatformUnknown
}
// getClusterRegion returns the region of a given ClusterDeployment
func getClusterRegion(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return cd.Spec.Platform.AWS.Region
case cd.Spec.Platform.Azure != nil:
return cd.Spec.Platform.Azure.Region
case cd.Spec.Platform.GCP != nil:
return cd.Spec.Platform.GCP.Region
}
return regionUnknown
}
func LoadReleaseImageVerifier(config *rest.Config) (verify.Interface, error) {
ns := os.Getenv(constants.HiveReleaseImageVerificationConfigMapNamespaceEnvVar)
name := os.Getenv(constants.HiveReleaseImageVerificationConfigMapNameEnvVar)
if name == "" {
return nil, nil
}
if ns == "" {
return nil, errors.New("namespace must be set for Release Image verifier ConfigMap")
}
client, err := dynamic.NewForConfig(config) // the verify lib expects unstructured style object for configuration and therefore dynamic makes more sense.
if err != nil {
return nil, errors.Wrap(err, "failed to create kube client")
}
cm, err := client.Resource(corev1.SchemeGroupVersion.WithResource("configmaps")).
Namespace(ns).
Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to read ConfigMap for release image verification")
}
// verifier configuration expects the configmap to have certain annotations and
// ensuring that before passing it on ensures that it is used.
annos := cm.GetAnnotations()
if annos == nil {
annos = map[string]string{}
}
annos[verify.ReleaseAnnotationConfigMapVerifier] = "true"
cm.SetAnnotations(annos)
cmData, err := cm.MarshalJSON()
if err != nil {
return nil, err
}
m := manifest.Manifest{
OriginalFilename: "release-image-verifier-configmap",
GVK: cm.GroupVersionKind(),
Obj: cm,
Raw: cmData,
}
return verify.NewFromManifests([]manifest.Manifest{m}, sigstore.NewCachedHTTPClientConstructor(sigstore.DefaultClient, nil).HTTPClient)
}
| 1 | 19,929 | Shouldn't this be in the above `if` block where we are setting the installedtimestamp to ensure this only happens for already installed (and/or adopted) clusters? | openshift-hive | go |
@@ -422,7 +422,7 @@ class SessionManager(QObject):
window=window.win_id)
tab_to_focus = None
for i, tab in enumerate(win['tabs']):
- new_tab = tabbed_browser.tabopen()
+ new_tab = tabbed_browser.tabopen(background=False)
self._load_tab(new_tab, tab)
if tab.get('active', False):
tab_to_focus = i | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Management of sessions - saved tabs/windows."""
import os
import os.path
import itertools
import urllib
import sip
from PyQt5.QtCore import QUrl, QObject, QPoint, QTimer
from PyQt5.QtWidgets import QApplication
import yaml
from qutebrowser.utils import (standarddir, objreg, qtutils, log, message,
utils)
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.config import config, configfiles
from qutebrowser.completion.models import miscmodels
from qutebrowser.mainwindow import mainwindow
default = object() # Sentinel value
def init(parent=None):
"""Initialize sessions.
Args:
parent: The parent to use for the SessionManager.
"""
base_path = os.path.join(standarddir.data(), 'sessions')
try:
os.mkdir(base_path)
except FileExistsError:
pass
session_manager = SessionManager(base_path, parent)
objreg.register('session-manager', session_manager)
class SessionError(Exception):
"""Exception raised when a session failed to load/save."""
class SessionNotFoundError(SessionError):
"""Exception raised when a session to be loaded was not found."""
class TabHistoryItem:
"""A single item in the tab history.
Attributes:
url: The QUrl of this item.
original_url: The QUrl of this item which was originally requested.
title: The title as string of this item.
active: Whether this item is the item currently navigated to.
user_data: The user data for this item.
"""
def __init__(self, url, title, *, original_url=None, active=False,
user_data=None):
self.url = url
if original_url is None:
self.original_url = url
else:
self.original_url = original_url
self.title = title
self.active = active
self.user_data = user_data
def __repr__(self):
return utils.get_repr(self, constructor=True, url=self.url,
original_url=self.original_url, title=self.title,
active=self.active, user_data=self.user_data)
class SessionManager(QObject):
"""Manager for sessions.
Attributes:
_base_path: The path to store sessions under.
_last_window_session: The session data of the last window which was
closed.
_current: The name of the currently loaded session, or None.
did_load: Set when a session was loaded.
"""
def __init__(self, base_path, parent=None):
super().__init__(parent)
self._current = None
self._base_path = base_path
self._last_window_session = None
self.did_load = False
def _get_session_path(self, name, check_exists=False):
"""Get the session path based on a session name or absolute path.
Args:
name: The name of the session.
check_exists: Whether it should also be checked if the session
exists.
"""
path = os.path.expanduser(name)
if os.path.isabs(path) and ((not check_exists) or
os.path.exists(path)):
return path
else:
path = os.path.join(self._base_path, name + '.yml')
if check_exists and not os.path.exists(path):
raise SessionNotFoundError(path)
else:
return path
def exists(self, name):
"""Check if a named session exists."""
try:
self._get_session_path(name, check_exists=True)
except SessionNotFoundError:
return False
else:
return True
def _save_tab_item(self, tab, idx, item):
"""Save a single history item in a tab.
Args:
tab: The tab to save.
idx: The index of the current history item.
item: The history item.
Return:
A dict with the saved data for this item.
"""
data = {
'url': bytes(item.url().toEncoded()).decode('ascii'),
}
if item.title():
data['title'] = item.title()
else:
# https://github.com/qutebrowser/qutebrowser/issues/879
if tab.history.current_idx() == idx:
data['title'] = tab.title()
else:
data['title'] = data['url']
if item.originalUrl() != item.url():
encoded = item.originalUrl().toEncoded()
data['original-url'] = bytes(encoded).decode('ascii')
if tab.history.current_idx() == idx:
data['active'] = True
try:
user_data = item.userData()
except AttributeError:
# QtWebEngine
user_data = None
if tab.history.current_idx() == idx:
pos = tab.scroller.pos_px()
data['zoom'] = tab.zoom.factor()
data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
elif user_data is not None:
if 'zoom' in user_data:
data['zoom'] = user_data['zoom']
if 'scroll-pos' in user_data:
pos = user_data['scroll-pos']
data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
data['pinned'] = tab.data.pinned
return data
def _save_tab(self, tab, active):
"""Get a dict with data for a single tab.
Args:
tab: The WebView to save.
active: Whether the tab is currently active.
"""
data = {'history': []}
if active:
data['active'] = True
for idx, item in enumerate(tab.history):
qtutils.ensure_valid(item)
item_data = self._save_tab_item(tab, idx, item)
if item.url().scheme() == 'qute' and item.url().host() == 'back':
# don't add qute://back to the session file
if item_data.get('active', False) and data['history']:
# mark entry before qute://back as active
data['history'][-1]['active'] = True
else:
data['history'].append(item_data)
return data
def _save_all(self, *, only_window=None, with_private=False):
"""Get a dict with data for all windows/tabs."""
data = {'windows': []}
if only_window is not None:
winlist = [only_window]
else:
winlist = objreg.window_registry
for win_id in sorted(winlist):
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
main_window = objreg.get('main-window', scope='window',
window=win_id)
# We could be in the middle of destroying a window here
if sip.isdeleted(main_window):
continue
if tabbed_browser.private and not with_private:
continue
win_data = {}
active_window = QApplication.instance().activeWindow()
if getattr(active_window, 'win_id', None) == win_id:
win_data['active'] = True
win_data['geometry'] = bytes(main_window.saveGeometry())
win_data['tabs'] = []
if tabbed_browser.private:
win_data['private'] = True
for i, tab in enumerate(tabbed_browser.widgets()):
active = i == tabbed_browser.currentIndex()
win_data['tabs'].append(self._save_tab(tab, active))
data['windows'].append(win_data)
return data
def _get_session_name(self, name):
"""Helper for save to get the name to save the session to.
Args:
name: The name of the session to save, or the 'default' sentinel
object.
"""
if name is default:
name = config.val.session.default_name
if name is None:
if self._current is not None:
name = self._current
else:
name = 'default'
return name
def save(self, name, last_window=False, load_next_time=False,
only_window=None, with_private=False):
"""Save a named session.
Args:
name: The name of the session to save, or the 'default' sentinel
object.
last_window: If set, saves the saved self._last_window_session
instead of the currently open state.
load_next_time: If set, prepares this session to be load next time.
only_window: If set, only tabs in the specified window is saved.
with_private: Include private windows.
Return:
The name of the saved session.
"""
name = self._get_session_name(name)
path = self._get_session_path(name)
log.sessions.debug("Saving session {} to {}...".format(name, path))
if last_window:
data = self._last_window_session
if data is None:
log.sessions.error("last_window_session is None while saving!")
return
else:
data = self._save_all(only_window=only_window,
with_private=with_private)
log.sessions.vdebug("Saving data: {}".format(data))
try:
with qtutils.savefile_open(path) as f:
utils.yaml_dump(data, f)
except (OSError, UnicodeEncodeError, yaml.YAMLError) as e:
raise SessionError(e)
if load_next_time:
configfiles.state['general']['session'] = name
return name
def save_autosave(self):
"""Save the autosave session."""
try:
self.save('_autosave')
except SessionError as e:
log.sessions.error("Failed to save autosave session: {}".format(e))
def delete_autosave(self):
"""Delete the autosave session."""
try:
self.delete('_autosave')
except SessionNotFoundError:
# Exiting before the first load finished
pass
except SessionError as e:
log.sessions.error("Failed to delete autosave session: {}"
.format(e))
def save_last_window_session(self):
"""Temporarily save the session for the last closed window."""
self._last_window_session = self._save_all()
def _load_tab(self, new_tab, data):
"""Load yaml data into a newly opened tab."""
entries = []
lazy_load = []
# use len(data['history'])
# -> dropwhile empty if not session.lazy_session
lazy_index = len(data['history'])
gen = itertools.chain(
itertools.takewhile(lambda _: not lazy_load,
enumerate(data['history'])),
enumerate(lazy_load),
itertools.dropwhile(lambda i: i[0] < lazy_index,
enumerate(data['history'])))
for i, histentry in gen:
user_data = {}
if 'zoom' in data:
# The zoom was accidentally stored in 'data' instead of per-tab
# earlier.
# See https://github.com/qutebrowser/qutebrowser/issues/728
user_data['zoom'] = data['zoom']
elif 'zoom' in histentry:
user_data['zoom'] = histentry['zoom']
if 'scroll-pos' in data:
# The scroll position was accidentally stored in 'data' instead
# of per-tab earlier.
# See https://github.com/qutebrowser/qutebrowser/issues/728
pos = data['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
elif 'scroll-pos' in histentry:
pos = histentry['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
if 'pinned' in histentry:
new_tab.data.pinned = histentry['pinned']
if (config.val.session.lazy_restore and
histentry.get('active', False) and
not histentry['url'].startswith('qute://back')):
# remove "active" mark and insert back page marked as active
lazy_index = i + 1
lazy_load.append({
'title': histentry['title'],
'url':
'qute://back#' +
urllib.parse.quote(histentry['title']),
'active': True
})
histentry['active'] = False
active = histentry.get('active', False)
url = QUrl.fromEncoded(histentry['url'].encode('ascii'))
if 'original-url' in histentry:
orig_url = QUrl.fromEncoded(
histentry['original-url'].encode('ascii'))
else:
orig_url = url
entry = TabHistoryItem(url=url, original_url=orig_url,
title=histentry['title'], active=active,
user_data=user_data)
entries.append(entry)
if active:
new_tab.title_changed.emit(histentry['title'])
try:
new_tab.history.load_items(entries)
except ValueError as e:
raise SessionError(e)
def load(self, name, temp=False):
"""Load a named session.
Args:
name: The name of the session to load.
temp: If given, don't set the current session.
"""
path = self._get_session_path(name, check_exists=True)
try:
with open(path, encoding='utf-8') as f:
data = utils.yaml_load(f)
except (OSError, UnicodeDecodeError, yaml.YAMLError) as e:
raise SessionError(e)
log.sessions.debug("Loading session {} from {}...".format(name, path))
for win in data['windows']:
window = mainwindow.MainWindow(geometry=win['geometry'],
private=win.get('private', None))
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
tab_to_focus = None
for i, tab in enumerate(win['tabs']):
new_tab = tabbed_browser.tabopen()
self._load_tab(new_tab, tab)
if tab.get('active', False):
tab_to_focus = i
if new_tab.data.pinned:
tabbed_browser.set_tab_pinned(new_tab, new_tab.data.pinned)
if tab_to_focus is not None:
tabbed_browser.setCurrentIndex(tab_to_focus)
if win.get('active', False):
QTimer.singleShot(0, tabbed_browser.activateWindow)
if data['windows']:
self.did_load = True
if not name.startswith('_') and not temp:
self._current = name
def delete(self, name):
"""Delete a session."""
path = self._get_session_path(name, check_exists=True)
try:
os.remove(path)
except OSError as e:
raise SessionError(e)
def list_sessions(self):
"""Get a list of all session names."""
sessions = []
for filename in os.listdir(self._base_path):
base, ext = os.path.splitext(filename)
if ext == '.yml':
sessions.append(base)
return sorted(sessions)
@cmdutils.register(instance='session-manager')
@cmdutils.argument('name', completion=miscmodels.session)
def session_load(self, name, clear=False, temp=False, force=False):
"""Load a session.
Args:
name: The name of the session.
clear: Close all existing windows.
temp: Don't set the current session for :session-save.
force: Force loading internal sessions (starting with an
underline).
"""
if name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to load anyways.".format(name))
old_windows = list(objreg.window_registry.values())
try:
self.load(name, temp=temp)
except SessionNotFoundError:
raise cmdexc.CommandError("Session {} not found!".format(name))
except SessionError as e:
raise cmdexc.CommandError("Error while loading session: {}"
.format(e))
else:
if clear:
for win in old_windows:
win.close()
@cmdutils.register(instance='session-manager')
@cmdutils.argument('name', completion=miscmodels.session)
@cmdutils.argument('win_id', win_id=True)
@cmdutils.argument('with_private', flag='p')
def session_save(self, name: str = default, current=False, quiet=False,
force=False, only_active_window=False, with_private=False,
win_id=None):
"""Save a session.
Args:
name: The name of the session. If not given, the session configured
in session.default_name is saved.
current: Save the current session instead of the default.
quiet: Don't show confirmation message.
force: Force saving internal sessions (starting with an underline).
only_active_window: Saves only tabs of the currently active window.
with_private: Include private windows.
"""
if name is not default and name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to save anyways.".format(name))
if current:
if self._current is None:
raise cmdexc.CommandError("No session loaded currently!")
name = self._current
assert not name.startswith('_')
try:
if only_active_window:
name = self.save(name, only_window=win_id,
with_private=True)
else:
name = self.save(name, with_private=with_private)
except SessionError as e:
raise cmdexc.CommandError("Error while saving session: {}"
.format(e))
else:
if not quiet:
message.info("Saved session {}.".format(name))
@cmdutils.register(instance='session-manager')
@cmdutils.argument('name', completion=miscmodels.session)
def session_delete(self, name, force=False):
"""Delete a session.
Args:
name: The name of the session.
force: Force deleting internal sessions (starting with an
underline).
"""
if name.startswith('_') and not force:
raise cmdexc.CommandError("{} is an internal session, use --force "
"to delete anyways.".format(name))
try:
self.delete(name)
except SessionNotFoundError:
raise cmdexc.CommandError("Session {} not found!".format(name))
except SessionError as e:
log.sessions.exception("Error while deleting session!")
raise cmdexc.CommandError("Error while deleting session: {}"
.format(e))
else:
log.sessions.debug("Deleted session {}.".format(name))
| 1 | 20,046 | This seems like another unrelated change I've done in `master`. | qutebrowser-qutebrowser | py |
@@ -873,6 +873,15 @@ module RSpec::Core
expect(config.formatters.first.output.path).to eq(path)
end
end
+
+ context "when a duplicate formatter exists for the same output target" do
+ it "does not add the formatter" do
+ config.add_formatter :documentation
+ expect {
+ config.add_formatter :documentation
+ }.not_to change { config.formatters.length }
+ end
+ end
end
describe "#filter_run_including" do | 1 | require 'spec_helper'
require 'tmpdir'
module RSpec::Core
describe Configuration do
let(:config) { Configuration.new }
describe "RSpec.configuration with a block" do
before { allow(RSpec).to receive(:warn_deprecation) }
it "is deprecated" do
expect(RSpec).to receive(:warn_deprecation)
RSpec.configuration {}
end
end
describe '#deprecation_stream' do
it 'defaults to standard error' do
expect($rspec_core_without_stderr_monkey_patch.deprecation_stream).to eq STDERR
end
it 'is configurable' do
io = double 'deprecation io'
config.deprecation_stream = io
expect(config.deprecation_stream).to eq io
end
end
describe "#output_stream" do
it 'defaults to standard output' do
expect(config.output_stream).to eq $stdout
end
it 'is configurable' do
io = double 'output io'
config.output_stream = io
expect(config.output_stream).to eq io
end
context 'when the reporter has already been initialized' do
before do
config.reporter
allow(config).to receive(:warn)
end
it 'prints a notice indicating the reconfigured output_stream will be ignored' do
config.output_stream = StringIO.new
expect(config).to have_received(:warn).with(/output_stream.*#{__FILE__}:#{__LINE__ - 1}/)
end
it 'does not change the value of `output_stream`' do
config.output_stream = StringIO.new
expect(config.output_stream).to eq($stdout)
end
it 'does not print a warning if set to the value it already has' do
config.output_stream = config.output_stream
expect(config).not_to have_received(:warn)
end
end
end
describe "#setup_load_path_and_require" do
include_context "isolate load path mutation"
def absolute_path_to(dir)
File.expand_path("../../../../#{dir}", __FILE__)
end
it 'adds `lib` to the load path' do
lib_dir = absolute_path_to("lib")
$LOAD_PATH.delete(lib_dir)
expect($LOAD_PATH).not_to include(lib_dir)
config.setup_load_path_and_require []
expect($LOAD_PATH).to include(lib_dir)
end
it 'adds the configured `default_path` to the load path' do
config.default_path = 'features'
foo_dir = absolute_path_to("features")
expect($LOAD_PATH).not_to include(foo_dir)
config.setup_load_path_and_require []
expect($LOAD_PATH).to include(foo_dir)
end
it 'stores the required files' do
expect(config).to receive(:require).with('a/path')
config.setup_load_path_and_require ['a/path']
expect(config.requires).to eq ['a/path']
end
context "when `default_path` refers to a file rather than a directory" do
it 'does not add it to the load path' do
config.default_path = 'Rakefile'
config.setup_load_path_and_require []
expect($LOAD_PATH).not_to include(match(/Rakefile/))
end
end
end
describe "#load_spec_files" do
it "loads files using load" do
config.files_to_run = ["foo.bar", "blah_spec.rb"]
expect(config).to receive(:load).twice
config.load_spec_files
end
it "loads each file once, even if duplicated in list" do
config.files_to_run = ["a_spec.rb", "a_spec.rb"]
expect(config).to receive(:load).once
config.load_spec_files
end
end
describe "#mock_framework" do
it "defaults to :rspec" do
expect(config).to receive(:require).with('rspec/core/mocking_adapters/rspec')
config.mock_framework
end
end
describe "#mock_framework="do
it "delegates to mock_with" do
expect(config).to receive(:mock_with).with(:rspec)
config.mock_framework = :rspec
end
end
shared_examples "a configurable framework adapter" do |m|
it "yields a config object if the framework_module supports it" do
custom_config = Struct.new(:custom_setting).new
mod = Module.new
allow(mod).to receive_messages(:configuration => custom_config)
config.send m, mod do |mod_config|
mod_config.custom_setting = true
end
expect(custom_config.custom_setting).to be_truthy
end
it "raises if framework module doesn't support configuration" do
mod = Module.new
expect {
config.send m, mod do |mod_config|
end
}.to raise_error(/must respond to `configuration`/)
end
end
describe "#mock_with" do
before { allow(config).to receive(:require) }
it_behaves_like "a configurable framework adapter", :mock_with
it "allows rspec-mocks to be configured with a provided block" do
mod = Module.new
expect(RSpec::Mocks.configuration).to receive(:add_stub_and_should_receive_to).with(mod)
config.mock_with :rspec do |c|
c.add_stub_and_should_receive_to mod
end
end
context "with a module" do
it "sets the mock_framework_adapter to that module" do
mod = Module.new
config.mock_with mod
expect(config.mock_framework).to eq(mod)
end
end
it 'uses the named adapter' do
expect(config).to receive(:require).with("rspec/core/mocking_adapters/mocha")
stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
config.mock_with :mocha
end
it "uses the null adapter when given :nothing" do
expect(config).to receive(:require).with('rspec/core/mocking_adapters/null').and_call_original
config.mock_with :nothing
end
it "raises an error when given an unknown key" do
expect {
config.mock_with :crazy_new_mocking_framework_ive_not_yet_heard_of
}.to raise_error(ArgumentError, /unknown mocking framework/i)
end
it "raises an error when given another type of object" do
expect {
config.mock_with Object.new
}.to raise_error(ArgumentError, /unknown mocking framework/i)
end
context 'when there are already some example groups defined' do
it 'raises an error since this setting must be applied before any groups are defined' do
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
stub_const("RSpec::Core::MockingAdapters::Mocha", double(:framework_name => :mocha))
expect {
config.mock_with :mocha
}.to raise_error(/must be configured before any example groups are defined/)
end
it 'does not raise an error if the default `mock_with :rspec` is re-configured' do
config.mock_framework # called by RSpec when configuring the first example group
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
config.mock_with :rspec
end
it 'does not raise an error if re-setting the same config' do
stub_const("RSpec::Core::MockingAdapters::Mocha", double(:framework_name => :mocha))
groups = []
allow(RSpec.world).to receive_messages(:example_groups => groups)
config.mock_with :mocha
groups << double.as_null_object
config.mock_with :mocha
end
end
end
describe "#expectation_framework" do
it "defaults to :rspec" do
expect(config).to receive(:require).with('rspec/expectations')
config.expectation_frameworks
end
end
describe "#expectation_framework=" do
it "delegates to expect_with=" do
expect(config).to receive(:expect_with).with(:rspec)
config.expectation_framework = :rspec
end
end
describe "#expect_with" do
before do
stub_const("Test::Unit::Assertions", Module.new)
allow(config).to receive(:require)
end
it_behaves_like "a configurable framework adapter", :expect_with
[
[:rspec, 'rspec/expectations'],
[:stdlib, 'test/unit/assertions']
].each do |framework, required_file|
context "with #{framework}" do
it "requires #{required_file}" do
expect(config).to receive(:require).with(required_file)
config.expect_with framework
end
end
end
it "supports multiple calls" do
config.expect_with :rspec
config.expect_with :stdlib
expect(config.expectation_frameworks).to eq [RSpec::Matchers, Test::Unit::Assertions]
end
it "raises if block given with multiple args" do
expect {
config.expect_with :rspec, :stdlib do |mod_config|
end
}.to raise_error(/expect_with only accepts/)
end
it "raises ArgumentError if framework is not supported" do
expect do
config.expect_with :not_supported
end.to raise_error(ArgumentError)
end
context 'when there are already some example groups defined' do
it 'raises an error since this setting must be applied before any groups are defined' do
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
expect {
config.expect_with :rspec
}.to raise_error(/must be configured before any example groups are defined/)
end
it 'does not raise an error if the default `expect_with :rspec` is re-configured' do
config.expectation_frameworks # called by RSpec when configuring the first example group
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
config.expect_with :rspec
end
it 'does not raise an error if re-setting the same config' do
groups = []
allow(RSpec.world).to receive_messages(:example_groups => groups)
config.expect_with :stdlib
groups << double.as_null_object
config.expect_with :stdlib
end
end
end
describe "#expecting_with_rspec?" do
before do
stub_const("Test::Unit::Assertions", Module.new)
allow(config).to receive(:require)
end
it "returns false by default" do
expect(config).not_to be_expecting_with_rspec
end
it "returns true when `expect_with :rspec` has been configured" do
config.expect_with :rspec
expect(config).to be_expecting_with_rspec
end
it "returns true when `expect_with :rspec, :stdlib` has been configured" do
config.expect_with :rspec, :stdlib
expect(config).to be_expecting_with_rspec
end
it "returns true when `expect_with :stdlib, :rspec` has been configured" do
config.expect_with :stdlib, :rspec
expect(config).to be_expecting_with_rspec
end
it "returns false when `expect_with :stdlib` has been configured" do
config.expect_with :stdlib
expect(config).not_to be_expecting_with_rspec
end
end
describe "#files_to_run" do
it "loads files not following pattern if named explicitly" do
config.files_or_directories_to_run = "spec/rspec/core/resources/a_bar.rb"
expect(config.files_to_run).to eq([ "spec/rspec/core/resources/a_bar.rb"])
end
it "prevents repetition of dir when start of the pattern" do
config.pattern = "spec/**/a_spec.rb"
config.files_or_directories_to_run = "spec"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it "does not prevent repetition of dir when later of the pattern" do
config.pattern = "rspec/**/a_spec.rb"
config.files_or_directories_to_run = "spec"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
context "with <path>:<line_number>" do
it "overrides inclusion filters set on config" do
config.filter_run_including :foo => :bar
config.files_or_directories_to_run = "path/to/file.rb:37"
expect(config.inclusion_filter.size).to eq(1)
expect(config.inclusion_filter[:locations].keys.first).to match(/path\/to\/file\.rb$/)
expect(config.inclusion_filter[:locations].values.first).to eq([37])
end
it "overrides inclusion filters set before config" do
config.force(:inclusion_filter => {:foo => :bar})
config.files_or_directories_to_run = "path/to/file.rb:37"
expect(config.inclusion_filter.size).to eq(1)
expect(config.inclusion_filter[:locations].keys.first).to match(/path\/to\/file\.rb$/)
expect(config.inclusion_filter[:locations].values.first).to eq([37])
end
it "clears exclusion filters set on config" do
config.exclusion_filter = { :foo => :bar }
config.files_or_directories_to_run = "path/to/file.rb:37"
expect(config.exclusion_filter).to be_empty,
"expected exclusion filter to be empty:\n#{config.exclusion_filter}"
end
it "clears exclusion filters set before config" do
config.force(:exclusion_filter => { :foo => :bar })
config.files_or_directories_to_run = "path/to/file.rb:37"
expect(config.exclusion_filter).to be_empty,
"expected exclusion filter to be empty:\n#{config.exclusion_filter}"
end
end
context "with default pattern" do
it "loads files named _spec.rb" do
config.files_or_directories_to_run = "spec/rspec/core/resources"
expect(config.files_to_run).to eq([ "spec/rspec/core/resources/a_spec.rb"])
end
it "loads files in Windows", :if => RSpec.windows_os? do
config.files_or_directories_to_run = "C:\\path\\to\\project\\spec\\sub\\foo_spec.rb"
expect(config.files_to_run).to eq([ "C:/path/to/project/spec/sub/foo_spec.rb"])
end
it "loads files in Windows when directory is specified", :if => RSpec.windows_os? do
config.files_or_directories_to_run = "spec\\rspec\\core\\resources"
expect(config.files_to_run).to eq([ "spec/rspec/core/resources/a_spec.rb"])
end
end
context "with default default_path" do
it "loads files in the default path when run by rspec" do
allow(config).to receive(:command) { 'rspec' }
config.files_or_directories_to_run = []
expect(config.files_to_run).not_to be_empty
end
it "loads files in the default path when run with DRB (e.g., spork)" do
allow(config).to receive(:command) { 'spork' }
allow(RSpec::Core::Runner).to receive(:running_in_drb?) { true }
config.files_or_directories_to_run = []
expect(config.files_to_run).not_to be_empty
end
it "does not load files in the default path when run by ruby" do
allow(config).to receive(:command) { 'ruby' }
config.files_or_directories_to_run = []
expect(config.files_to_run).to be_empty
end
end
def specify_consistent_ordering_of_files_to_run
allow(File).to receive(:directory?).with('a') { true }
orderings = [
%w[ a/1.rb a/2.rb a/3.rb ],
%w[ a/2.rb a/1.rb a/3.rb ],
%w[ a/3.rb a/2.rb a/1.rb ]
].map do |files|
expect(Dir).to receive(:[]).with(/^\{?a/) { files }
yield
config.files_to_run
end
expect(orderings.uniq.size).to eq(1)
end
context 'when the given directories match the pattern' do
it 'orders the files in a consistent ordering, regardless of the underlying OS ordering' do
specify_consistent_ordering_of_files_to_run do
config.pattern = 'a/*.rb'
config.files_or_directories_to_run = 'a'
end
end
end
context 'when the pattern is given relative to the given directories' do
it 'orders the files in a consistent ordering, regardless of the underlying OS ordering' do
specify_consistent_ordering_of_files_to_run do
config.pattern = '*.rb'
config.files_or_directories_to_run = 'a'
end
end
end
context 'when given multiple file paths' do
it 'orders the files in a consistent ordering, regardless of the given order' do
allow(File).to receive(:directory?) { false } # fake it into thinking these a full file paths
files = ['a/b/c_spec.rb', 'c/b/a_spec.rb']
config.files_or_directories_to_run = *files
ordering_1 = config.files_to_run
config.files_or_directories_to_run = *(files.reverse)
ordering_2 = config.files_to_run
expect(ordering_1).to eq(ordering_2)
end
end
end
%w[pattern= filename_pattern=].each do |setter|
describe "##{setter}" do
context "with single pattern" do
before { config.send(setter, "**/*_foo.rb") }
it "loads files following pattern" do
file = File.expand_path(File.dirname(__FILE__) + "/resources/a_foo.rb")
config.files_or_directories_to_run = file
expect(config.files_to_run).to include(file)
end
it "loads files in directories following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
config.files_or_directories_to_run = dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
end
it "does not load files in directories not following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
config.files_or_directories_to_run = dir
expect(config.files_to_run).not_to include("#{dir}/a_bar.rb")
end
end
context "with multiple patterns" do
it "supports comma separated values" do
config.send(setter, "**/*_foo.rb,**/*_bar.rb")
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
config.files_or_directories_to_run = dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
it "supports comma separated values with spaces" do
config.send(setter, "**/*_foo.rb, **/*_bar.rb")
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
config.files_or_directories_to_run = dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
it "supports curly braces glob syntax" do
config.send(setter, "**/*_{foo,bar}.rb")
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
config.files_or_directories_to_run = dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
end
context "after files have already been loaded" do
it 'will warn that it will have no effect' do
expect_warning_with_call_site(__FILE__, __LINE__ + 2, /has no effect/)
config.load_spec_files
config.send(setter, "rspec/**/*.spec")
end
it 'will not warn if reset is called after load_spec_files' do
config.load_spec_files
config.reset
expect(RSpec).to_not receive(:warning)
config.send(setter, "rspec/**/*.spec")
end
end
end
end
describe "path with line number" do
it "assigns the line number as a location filter" do
config.files_or_directories_to_run = "path/to/a_spec.rb:37"
expect(config.filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37]}})
end
end
context "with full_description set" do
it "overrides filters" do
config.filter_run :focused => true
config.full_description = "foo"
expect(config.filter).not_to have_key(:focused)
end
it 'is possible to access the full description regular expression' do
config.full_description = "foo"
expect(config.full_description).to eq(/foo/)
end
end
context "without full_description having been set" do
it 'returns nil from #full_description' do
expect(config.full_description).to eq nil
end
end
context "with line number" do
it "assigns the file and line number as a location filter" do
config.files_or_directories_to_run = "path/to/a_spec.rb:37"
expect(config.filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37]}})
end
it "assigns multiple files with line numbers as location filters" do
config.files_or_directories_to_run = "path/to/a_spec.rb:37", "other_spec.rb:44"
expect(config.filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37],
File.expand_path("other_spec.rb") => [44]}})
end
it "assigns files with multiple line numbers as location filters" do
config.files_or_directories_to_run = "path/to/a_spec.rb:37", "path/to/a_spec.rb:44"
expect(config.filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37, 44]}})
end
end
context "with multiple line numbers" do
it "assigns the file and line numbers as a location filter" do
config.files_or_directories_to_run = "path/to/a_spec.rb:1:3:5:7"
expect(config.filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [1,3,5,7]}})
end
end
it "assigns the example name as the filter on description" do
config.full_description = "foo"
expect(config.filter).to eq({:full_description => /foo/})
end
it "assigns the example names as the filter on description if description is an array" do
config.full_description = [ "foo", "bar" ]
expect(config.filter).to eq({:full_description => Regexp.union(/foo/, /bar/)})
end
it 'is possible to access the full description regular expression' do
config.full_description = "foo","bar"
expect(config.full_description).to eq Regexp.union(/foo/,/bar/)
end
describe "#default_path" do
it 'defaults to "spec"' do
expect(config.default_path).to eq('spec')
end
end
describe "#include" do
module InstanceLevelMethods
def you_call_this_a_blt?
"egad man, where's the mayo?!?!?"
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.include(InstanceLevelMethods, *args)
config.include_or_extend_modules.last.last
end
end
context "with no filter" do
it "includes the given module into each example group" do
RSpec.configure do |c|
c.include(InstanceLevelMethods)
end
group = ExampleGroup.describe('does like, stuff and junk', :magic_key => :include) { }
expect(group).not_to respond_to(:you_call_this_a_blt?)
expect(group.new.you_call_this_a_blt?).to eq("egad man, where's the mayo?!?!?")
end
end
context "with a filter" do
it "includes the given module into each matching example group" do
RSpec.configure do |c|
c.include(InstanceLevelMethods, :magic_key => :include)
end
group = ExampleGroup.describe('does like, stuff and junk', :magic_key => :include) { }
expect(group).not_to respond_to(:you_call_this_a_blt?)
expect(group.new.you_call_this_a_blt?).to eq("egad man, where's the mayo?!?!?")
end
end
end
describe "#extend" do
module ThatThingISentYou
def that_thing
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.extend(ThatThingISentYou, *args)
config.include_or_extend_modules.last.last
end
end
it "extends the given module into each matching example group" do
RSpec.configure do |c|
c.extend(ThatThingISentYou, :magic_key => :extend)
end
group = ExampleGroup.describe(ThatThingISentYou, :magic_key => :extend) { }
expect(group).to respond_to(:that_thing)
end
end
describe "#run_all_when_everything_filtered?" do
it "defaults to false" do
expect(config.run_all_when_everything_filtered?).to be_falsey
end
it "can be queried with question method" do
config.run_all_when_everything_filtered = true
expect(config.run_all_when_everything_filtered?).to be_truthy
end
end
%w[color color_enabled].each do |color_option|
describe "##{color_option}=" do
context "given true" do
before { config.send "#{color_option}=", true }
context "with config.tty? and output.tty?" do
it "does not set color_enabled" do
output = StringIO.new
config.output_stream = output
config.tty = true
allow(config.output_stream).to receive_messages :tty? => true
expect(config.send(color_option)).to be_truthy
expect(config.send(color_option, output)).to be_truthy
end
end
context "with config.tty? and !output.tty?" do
it "sets color_enabled" do
output = StringIO.new
config.output_stream = output
config.tty = true
allow(config.output_stream).to receive_messages :tty? => false
expect(config.send(color_option)).to be_truthy
expect(config.send(color_option, output)).to be_truthy
end
end
context "with config.tty? and !output.tty?" do
it "does not set color_enabled" do
output = StringIO.new
config.output_stream = output
config.tty = false
allow(config.output_stream).to receive_messages :tty? => true
expect(config.send(color_option)).to be_truthy
expect(config.send(color_option, output)).to be_truthy
end
end
context "with !config.tty? and !output.tty?" do
it "does not set color_enabled" do
output = StringIO.new
config.output_stream = output
config.tty = false
allow(config.output_stream).to receive_messages :tty? => false
expect(config.send(color_option)).to be_falsey
expect(config.send(color_option, output)).to be_falsey
end
end
context "on windows" do
before do
@original_host = RbConfig::CONFIG['host_os']
RbConfig::CONFIG['host_os'] = 'mingw'
allow(config).to receive(:require)
end
after do
RbConfig::CONFIG['host_os'] = @original_host
end
context "with ANSICON available" do
around(:each) { |e| with_env_vars('ANSICON' => 'ANSICON', &e) }
it "enables colors" do
config.output_stream = StringIO.new
allow(config.output_stream).to receive_messages :tty? => true
config.send "#{color_option}=", true
expect(config.send(color_option)).to be_truthy
end
it "leaves output stream intact" do
config.output_stream = $stdout
allow(config).to receive(:require) do |what|
config.output_stream = 'foo' if what =~ /Win32/
end
config.send "#{color_option}=", true
expect(config.output_stream).to eq($stdout)
end
end
context "with ANSICON NOT available" do
before do
allow_warning
end
it "warns to install ANSICON" do
allow(config).to receive(:require) { raise LoadError }
expect_warning_with_call_site(__FILE__, __LINE__ + 1, /You must use ANSICON/)
config.send "#{color_option}=", true
end
it "sets color_enabled to false" do
allow(config).to receive(:require) { raise LoadError }
config.send "#{color_option}=", true
config.color_enabled = true
expect(config.send(color_option)).to be_falsey
end
end
end
end
end
it "prefers incoming cli_args" do
config.output_stream = StringIO.new
allow(config.output_stream).to receive_messages :tty? => true
config.force :color => true
config.color = false
expect(config.color).to be_truthy
end
end
describe '#formatter=' do
it "delegates to add_formatter (better API for user-facing configuration)" do
expect(config).to receive(:add_formatter).with('these','options')
config.add_formatter('these','options')
end
end
describe "#add_formatter" do
it "adds to the list of formatters" do
config.add_formatter :documentation
expect(config.formatters.first).to be_an_instance_of(Formatters::DocumentationFormatter)
end
it "finds a formatter by name (w/ Symbol)" do
config.add_formatter :documentation
expect(config.formatters.first).to be_an_instance_of(Formatters::DocumentationFormatter)
end
it "finds a formatter by name (w/ String)" do
config.add_formatter 'documentation'
expect(config.formatters.first).to be_an_instance_of(Formatters::DocumentationFormatter)
end
it "finds a formatter by class" do
formatter_class = Class.new(Formatters::BaseTextFormatter)
config.add_formatter formatter_class
expect(config.formatters.first).to be_an_instance_of(formatter_class)
end
it "finds a formatter by class name" do
stub_const("CustomFormatter", Class.new(Formatters::BaseFormatter))
config.add_formatter "CustomFormatter"
expect(config.formatters.first).to be_an_instance_of(CustomFormatter)
end
it "finds a formatter by class fully qualified name" do
stub_const("RSpec::CustomFormatter", Class.new(Formatters::BaseFormatter))
config.add_formatter "RSpec::CustomFormatter"
expect(config.formatters.first).to be_an_instance_of(RSpec::CustomFormatter)
end
it "requires a formatter file based on its fully qualified name" do
expect(config).to receive(:require).with('rspec/custom_formatter') do
stub_const("RSpec::CustomFormatter", Class.new(Formatters::BaseFormatter))
end
config.add_formatter "RSpec::CustomFormatter"
expect(config.formatters.first).to be_an_instance_of(RSpec::CustomFormatter)
end
it "raises NameError if class is unresolvable" do
expect(config).to receive(:require).with('rspec/custom_formatter3')
expect(lambda { config.add_formatter "RSpec::CustomFormatter3" }).to raise_error(NameError)
end
it "raises ArgumentError if formatter is unknown" do
expect(lambda { config.add_formatter :progresss }).to raise_error(ArgumentError)
end
context "with a 2nd arg defining the output" do
it "creates a file at that path and sets it as the output" do
path = File.join(Dir.tmpdir, 'output.txt')
config.add_formatter('doc', path)
expect(config.formatters.first.output).to be_a(File)
expect(config.formatters.first.output.path).to eq(path)
end
end
end
describe "#filter_run_including" do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.filter_run_including(*args)
config.inclusion_filter
end
end
it "sets the filter with a hash" do
config.filter_run_including :foo => true
expect(config.inclusion_filter[:foo]).to be(true)
end
it "sets the filter with a symbol" do
config.filter_run_including :foo
expect(config.inclusion_filter[:foo]).to be(true)
end
it "merges with existing filters" do
config.filter_run_including :foo => true
config.filter_run_including :bar => false
expect(config.inclusion_filter[:foo]).to be(true)
expect(config.inclusion_filter[:bar]).to be(false)
end
end
describe "#filter_run_excluding" do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.filter_run_excluding(*args)
config.exclusion_filter
end
end
it "sets the filter" do
config.filter_run_excluding :foo => true
expect(config.exclusion_filter[:foo]).to be(true)
end
it "sets the filter using a symbol" do
config.filter_run_excluding :foo
expect(config.exclusion_filter[:foo]).to be(true)
end
it "merges with existing filters" do
config.filter_run_excluding :foo => true
config.filter_run_excluding :bar => false
expect(config.exclusion_filter[:foo]).to be(true)
expect(config.exclusion_filter[:bar]).to be(false)
end
end
describe "#inclusion_filter" do
it "returns {} even if set to nil" do
config.inclusion_filter = nil
expect(config.inclusion_filter).to eq({})
end
end
describe "#inclusion_filter=" do
it "treats symbols as hash keys with true values when told to" do
config.inclusion_filter = :foo
expect(config.inclusion_filter).to eq({:foo => true})
end
it "overrides any inclusion filters set on the command line or in configuration files" do
config.force(:inclusion_filter => { :foo => :bar })
config.inclusion_filter = {:want => :this}
expect(config.inclusion_filter).to eq({:want => :this})
end
end
describe "#exclusion_filter" do
it "returns {} even if set to nil" do
config.exclusion_filter = nil
expect(config.exclusion_filter).to eq({})
end
describe "the default :if filter" do
it "does not exclude a spec with { :if => true } metadata" do
expect(config.exclusion_filter[:if].call(true)).to be_falsey
end
it "excludes a spec with { :if => false } metadata" do
expect(config.exclusion_filter[:if].call(false)).to be_truthy
end
it "excludes a spec with { :if => nil } metadata" do
expect(config.exclusion_filter[:if].call(nil)).to be_truthy
end
end
describe "the default :unless filter" do
it "excludes a spec with { :unless => true } metadata" do
expect(config.exclusion_filter[:unless].call(true)).to be_truthy
end
it "does not exclude a spec with { :unless => false } metadata" do
expect(config.exclusion_filter[:unless].call(false)).to be_falsey
end
it "does not exclude a spec with { :unless => nil } metadata" do
expect(config.exclusion_filter[:unless].call(nil)).to be_falsey
end
end
end
describe "#treat_symbols_as_metadata_keys_with_true_values=" do
it 'is deprecated' do
expect_deprecation_with_call_site(__FILE__, __LINE__ + 1)
config.treat_symbols_as_metadata_keys_with_true_values = true
end
end
describe "#exclusion_filter=" do
it "treats symbols as hash keys with true values when told to" do
config.exclusion_filter = :foo
expect(config.exclusion_filter).to eq({:foo => true})
end
it "overrides any exclusion filters set on the command line or in configuration files" do
config.force(:exclusion_filter => { :foo => :bar })
config.exclusion_filter = {:want => :this}
expect(config.exclusion_filter).to eq({:want => :this})
end
end
describe "line_numbers=" do
it "sets the line numbers" do
config.line_numbers = ['37']
expect(config.filter).to eq({:line_numbers => [37]})
end
it "overrides filters" do
config.filter_run :focused => true
config.line_numbers = ['37']
expect(config.filter).to eq({:line_numbers => [37]})
end
it "prevents subsequent filters" do
config.line_numbers = ['37']
config.filter_run :focused => true
expect(config.filter).to eq({:line_numbers => [37]})
end
end
describe "line_numbers" do
it "returns the line numbers from the filter" do
config.line_numbers = ['42']
expect(config.line_numbers).to eq [42]
end
it "defaults to empty" do
expect(config.line_numbers).to eq []
end
end
describe "#full_backtrace=" do
it "doesn't impact other instances of config" do
config_1 = Configuration.new
config_2 = Configuration.new
config_1.full_backtrace = true
expect(config_2.full_backtrace?).to be_falsey
end
end
describe "#backtrace_exclusion_patterns=" do
it "actually receives the new filter values" do
config = Configuration.new
config.backtrace_exclusion_patterns = [/.*/]
expect(config.backtrace_formatter.exclude? "this").to be_truthy
end
end
describe 'full_backtrace' do
it 'returns true when backtrace patterns is empty' do
config.backtrace_exclusion_patterns = []
expect(config.full_backtrace?).to eq true
end
it 'returns false when backtrace patterns isnt empty' do
config.backtrace_exclusion_patterns = [:lib]
expect(config.full_backtrace?).to eq false
end
end
describe "#backtrace_exclusion_patterns" do
it "can be appended to" do
config = Configuration.new
config.backtrace_exclusion_patterns << /.*/
expect(config.backtrace_formatter.exclude? "this").to be_truthy
end
end
describe "#libs=" do
include_context "isolate load path mutation"
it "adds directories to the LOAD_PATH" do
expect($LOAD_PATH).to receive(:unshift).with("a/dir")
config.libs = ["a/dir"]
end
end
describe "libs" do
include_context "isolate load path mutation"
it 'records paths added to the load path' do
config.libs = ["a/dir"]
expect(config.libs).to eq ["a/dir"]
end
end
describe "#requires=" do
before { expect(RSpec).to receive :deprecate }
it "requires the configured files" do
expect(config).to receive(:require).with('foo').ordered
expect(config).to receive(:require).with('bar').ordered
config.requires = ['foo', 'bar']
end
it "stores require paths" do
expect(config).to receive(:require).with("a/path")
config.requires = ["a/path"]
expect(config.requires).to eq ['a/path']
end
end
describe "#add_setting" do
describe "with no modifiers" do
context "with no additional options" do
before do
config.add_setting :custom_option
end
it "defaults to nil" do
expect(config.custom_option).to be_nil
end
it "adds a predicate" do
expect(config.custom_option?).to be_falsey
end
it "can be overridden" do
config.custom_option = "a value"
expect(config.custom_option).to eq("a value")
end
end
context "with :default => 'a value'" do
before do
config.add_setting :custom_option, :default => 'a value'
end
it "defaults to 'a value'" do
expect(config.custom_option).to eq("a value")
end
it "returns true for the predicate" do
expect(config.custom_option?).to be_truthy
end
it "can be overridden with a truthy value" do
config.custom_option = "a new value"
expect(config.custom_option).to eq("a new value")
end
it "can be overridden with nil" do
config.custom_option = nil
expect(config.custom_option).to eq(nil)
end
it "can be overridden with false" do
config.custom_option = false
expect(config.custom_option).to eq(false)
end
end
end
context "with :alias => " do
it "is deprecated" do
expect(RSpec)::to receive(:deprecate).with(/:alias option/, :replacement => ":alias_with")
config.add_setting :custom_option
config.add_setting :another_custom_option, :alias => :custom_option
end
end
context "with :alias_with => " do
before do
config.add_setting :custom_option, :alias_with => :another_custom_option
end
it "delegates the getter to the other option" do
config.another_custom_option = "this value"
expect(config.custom_option).to eq("this value")
end
it "delegates the setter to the other option" do
config.custom_option = "this value"
expect(config.another_custom_option).to eq("this value")
end
it "delegates the predicate to the other option" do
config.custom_option = true
expect(config.another_custom_option?).to be_truthy
end
end
end
describe "#configure_group" do
it "extends with 'extend'" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.extend(mod, :foo => :bar)
config.configure_group(group)
expect(group).to be_a(mod)
end
it "extends with 'module'" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.include(mod, :foo => :bar)
config.configure_group(group)
expect(group.included_modules).to include(mod)
end
it "requires only one matching filter" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.include(mod, :foo => :bar, :baz => :bam)
config.configure_group(group)
expect(group.included_modules).to include(mod)
end
it "includes each one before deciding whether to include the next" do
mod1 = Module.new do
def self.included(host)
host.metadata[:foo] = :bar
end
end
mod2 = Module.new
group = ExampleGroup.describe("group")
config.include(mod1)
config.include(mod2, :foo => :bar)
config.configure_group(group)
expect(group.included_modules).to include(mod1)
expect(group.included_modules).to include(mod2)
end
module IncludeOrExtendMeOnce
def self.included(host)
raise "included again" if host.instance_methods.include?(:foobar)
host.class_eval { def foobar; end }
end
def self.extended(host)
raise "extended again" if host.respond_to?(:foobar)
def host.foobar; end
end
end
it "doesn't include a module when already included in ancestor" do
config.include(IncludeOrExtendMeOnce, :foo => :bar)
group = ExampleGroup.describe("group", :foo => :bar)
child = group.describe("child")
config.configure_group(group)
config.configure_group(child)
end
it "doesn't extend when ancestor is already extended with same module" do
config.extend(IncludeOrExtendMeOnce, :foo => :bar)
group = ExampleGroup.describe("group", :foo => :bar)
child = group.describe("child")
config.configure_group(group)
config.configure_group(child)
end
end
describe "#alias_example_to" do
it_behaves_like "metadata hash builder" do
after do
RSpec::Core::ExampleGroup.module_eval do
class << self
undef :my_example_method if method_defined? :my_example_method
end
end
end
def metadata_hash(*args)
config.alias_example_to :my_example_method, *args
group = ExampleGroup.describe("group")
example = group.my_example_method("description")
example.metadata
end
end
end
describe "#reset" do
it "clears the reporter" do
expect(config.reporter).not_to be_nil
config.reset
expect(config.instance_variable_get("@reporter")).to be_nil
end
it "clears the formatters" do
config.add_formatter "doc"
config.reset
expect(config.formatters).to be_empty
end
end
describe "#force" do
context "for ordering options" do
let(:list) { [1, 2, 3, 4] }
let(:ordering_strategy) { config.ordering_registry.fetch(:global) }
let(:rng) { RSpec::Core::RandomNumberGenerator.new config.seed }
let(:shuffled) { Ordering::Random.new(config).shuffle list, rng }
specify "CLI `--order defined` takes precedence over `config.order = rand`" do
config.force :order => "defined"
config.order = "rand"
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
specify "CLI `--order rand:37` takes precedence over `config.order = defined`" do
config.force :order => "rand:37"
config.order = "defined"
expect(ordering_strategy.order(list)).to eq(shuffled)
end
specify "CLI `--seed 37` forces order and seed" do
config.force :seed => 37
config.order = "defined"
config.seed = 145
expect(ordering_strategy.order(list)).to eq(shuffled)
expect(config.seed).to eq(37)
end
specify "CLI `--order defined` takes precedence over `config.register_ordering(:global)`" do
config.force :order => "defined"
config.register_ordering(:global, &:reverse)
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
end
it "forces 'false' value" do
config.add_setting :custom_option
config.custom_option = true
expect(config.custom_option?).to be_truthy
config.force :custom_option => false
expect(config.custom_option?).to be_falsey
config.custom_option = true
expect(config.custom_option?).to be_falsey
end
end
describe '#seed' do
it 'returns the seed as an int' do
config.seed = '123'
expect(config.seed).to eq(123)
end
end
describe "#seed_used?" do
def use_seed_on(registry)
registry.fetch(:random).order([1, 2])
end
it 'returns false if neither ordering registry used the seed' do
expect(config.seed_used?).to be false
end
it 'returns true if the ordering registry used the seed' do
use_seed_on(config.ordering_registry)
expect(config.seed_used?).to be true
end
end
describe '#order=' do
context 'given "random"' do
before do
config.seed = 7654
config.order = 'random'
end
it 'does not change the seed' do
expect(config.seed).to eq(7654)
end
it 'sets up random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
global_ordering = config.ordering_registry.fetch(:global)
expect(global_ordering).to be_an_instance_of(Ordering::Random)
end
end
context 'given "random:123"' do
before { config.order = 'random:123' }
it 'sets seed to 123' do
expect(config.seed).to eq(123)
end
it 'sets up random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
global_ordering = config.ordering_registry.fetch(:global)
expect(global_ordering).to be_an_instance_of(Ordering::Random)
end
end
context 'given "defined"' do
before do
config.order = 'rand:123'
config.order = 'defined'
end
it "does not change the seed" do
expect(config.seed).to eq(123)
end
it 'clears the random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
list = [1, 2, 3, 4]
ordering_strategy = config.ordering_registry.fetch(:global)
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
end
end
describe "#register_ordering" do
def register_reverse_ordering
config.register_ordering(:reverse, &:reverse)
end
it 'stores the ordering for later use' do
register_reverse_ordering
list = [1, 2, 3]
strategy = config.ordering_registry.fetch(:reverse)
expect(strategy).to be_a(Ordering::Custom)
expect(strategy.order(list)).to eq([3, 2, 1])
end
it 'can register an ordering object' do
strategy = Object.new
def strategy.order(list)
list.reverse
end
config.register_ordering(:reverse, strategy)
list = [1, 2, 3]
fetched = config.ordering_registry.fetch(:reverse)
expect(fetched).to be(strategy)
expect(fetched.order(list)).to eq([3, 2, 1])
end
end
describe '#warnings' do
around do |example|
@_original_setting = $VERBOSE
example.run
$VERBOSE = @_original_setting
end
it "sets verbose to true when true" do
config.warnings = true
expect($VERBOSE).to eq true
end
it "sets verbose to false when true" do
config.warnings = false
expect($VERBOSE).to eq false
end
it 'returns the verbosity setting' do
expect(config.warnings).to eq $VERBOSE
end
it 'is loaded from config by #force' do
config.force :warnings => true
expect($VERBOSE).to eq true
end
end
describe "#raise_errors_for_deprecations!" do
it 'causes deprecations to raise errors rather than printing to the deprecation stream' do
config.deprecation_stream = stream = StringIO.new
config.raise_errors_for_deprecations!
expect {
config.reporter.deprecation(:deprecated => "foo", :call_site => "foo.rb:1")
}.to raise_error(RSpec::Core::DeprecationError, /foo is deprecated/)
expect(stream.string).to eq("")
end
end
describe "#expose_current_running_example_as" do
before { stub_const(Configuration::ExposeCurrentExample.name, Module.new) }
it 'exposes the current example via the named method' do
RSpec.configuration.expose_current_running_example_as :the_example
RSpec.configuration.expose_current_running_example_as :another_example_helper
value_1 = value_2 = nil
ExampleGroup.describe "Group" do
it "works" do
value_1 = the_example
value_2 = another_example_helper
end
end.run
expect(value_1).to be_an(RSpec::Core::Example)
expect(value_1.description).to eq("works")
expect(value_2).to be(value_1)
end
end
end
end
| 1 | 11,084 | Would be good to have another context `"when a duplicate formatter exists for a different output target"` that shows that it keeps both. As this stands, the specs could pass w/o the `formatter.output == new_formatter.output` check. | rspec-rspec-core | rb |
@@ -217,14 +217,15 @@ void SYCLSharedUSMSpace::deallocate(const char* arg_label,
Kokkos::Tools::make_space_handle(name()));
}
-void SYCLDeviceUSMSpace::impl_access_error() {
+KOKKOS_DEPRECATED void SYCLDeviceUSMSpace::impl_access_error() {
const std::string msg(
"Kokkos::Experimental::SYCLDeviceUSMSpace::impl_access_error attempt to "
"execute device function from non-device space");
Kokkos::Impl::throw_runtime_exception(msg);
}
-void SYCLDeviceUSMSpace::impl_access_error(const void* const) {
+KOKKOS_DEPRECATED void SYCLDeviceUSMSpace::impl_access_error(
+ const void* const) {
impl_access_error();
}
| 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#include <Kokkos_HostSpace.hpp>
#include <Kokkos_SYCL.hpp>
#include <Kokkos_SYCL_Space.hpp>
#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
#include <SYCL/Kokkos_SYCL_Instance.hpp>
#include <impl/Kokkos_MemorySpace.hpp>
#include <impl/Kokkos_Profiling.hpp>
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
namespace Impl {
namespace {
auto USM_memcpy(sycl::queue& q, void* dst, const void* src, size_t n) {
return q.memcpy(dst, src, n);
}
void USM_memcpy(Kokkos::Experimental::Impl::SYCLInternal& space, void* dst,
const void* src, size_t n) {
(void)USM_memcpy(*space.m_queue, dst, src, n);
}
void USM_memcpy(void* dst, const void* src, size_t n) {
Experimental::SYCL().fence();
auto event = USM_memcpy(
*Experimental::Impl::SYCLInternal::singleton().m_queue, dst, src, n);
Experimental::Impl::SYCLInternal::fence(event);
}
} // namespace
DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
Kokkos::Experimental::SYCLDeviceUSMSpace, Kokkos::Experimental::SYCL>::
DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
const void* src, size_t n) {
USM_memcpy(*instance.impl_internal_space_instance(), dst, src, n);
}
DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
Kokkos::Experimental::SYCLDeviceUSMSpace,
Kokkos::Experimental::SYCL>::DeepCopy(void* dst, const void* src,
size_t n) {
USM_memcpy(dst, src, n);
}
DeepCopy<Kokkos::HostSpace, Kokkos::Experimental::SYCLDeviceUSMSpace,
Kokkos::Experimental::SYCL>::DeepCopy(const Kokkos::Experimental::SYCL&
instance,
void* dst, const void* src,
size_t n) {
USM_memcpy(*instance.impl_internal_space_instance(), dst, src, n);
}
DeepCopy<Kokkos::HostSpace, Kokkos::Experimental::SYCLDeviceUSMSpace,
Kokkos::Experimental::SYCL>::DeepCopy(void* dst, const void* src,
size_t n) {
USM_memcpy(dst, src, n);
}
DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, Kokkos::HostSpace,
Kokkos::Experimental::SYCL>::DeepCopy(const Kokkos::Experimental::SYCL&
instance,
void* dst, const void* src,
size_t n) {
USM_memcpy(*instance.impl_internal_space_instance(), dst, src, n);
}
DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, Kokkos::HostSpace,
Kokkos::Experimental::SYCL>::DeepCopy(void* dst, const void* src,
size_t n) {
USM_memcpy(dst, src, n);
}
} // namespace Impl
} // namespace Kokkos
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
namespace Experimental {
SYCLDeviceUSMSpace::SYCLDeviceUSMSpace() : m_device(SYCL().sycl_device()) {}
SYCLSharedUSMSpace::SYCLSharedUSMSpace() : m_device(SYCL().sycl_device()) {}
void* allocate_sycl(
const char* arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size, const Kokkos::Tools::SpaceHandle arg_handle,
const RawMemoryAllocationFailure::AllocationMechanism failure_tag,
const sycl::usm::alloc allocation_kind) {
const sycl::queue& queue = *SYCL().impl_internal_space_instance()->m_queue;
void* const hostPtr = sycl::malloc(arg_alloc_size, queue, allocation_kind);
if (hostPtr == nullptr)
throw RawMemoryAllocationFailure(
arg_alloc_size, 1, RawMemoryAllocationFailure::FailureMode::Unknown,
failure_tag);
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::allocateData(arg_handle, arg_label, hostPtr,
reported_size);
}
return hostPtr;
}
void* SYCLDeviceUSMSpace::allocate(const size_t arg_alloc_size) const {
return allocate("[unlabeled]", arg_alloc_size);
}
void* SYCLDeviceUSMSpace::allocate(const char* arg_label,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
return allocate_sycl(
arg_label, arg_alloc_size, arg_logical_size,
Kokkos::Tools::make_space_handle(name()),
RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocDevice,
sycl::usm::alloc::device);
}
void* SYCLSharedUSMSpace::allocate(const size_t arg_alloc_size) const {
return allocate("[unlabeled]", arg_alloc_size);
}
void* SYCLSharedUSMSpace::allocate(const char* arg_label,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
return allocate_sycl(
arg_label, arg_alloc_size, arg_logical_size,
Kokkos::Tools::make_space_handle(name()),
RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocShared,
sycl::usm::alloc::shared);
}
void sycl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
const size_t arg_alloc_size, const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) {
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
reported_size);
}
const sycl::queue& queue = *SYCL().impl_internal_space_instance()->m_queue;
sycl::free(arg_alloc_ptr, queue);
}
void SYCLDeviceUSMSpace::deallocate(void* const arg_alloc_ptr,
const size_t arg_alloc_size) const {
deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
}
void SYCLDeviceUSMSpace::deallocate(const char* arg_label,
void* const arg_alloc_ptr,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
Kokkos::Tools::make_space_handle(name()));
}
void SYCLSharedUSMSpace::deallocate(void* const arg_alloc_ptr,
const size_t arg_alloc_size) const {
deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
}
void SYCLSharedUSMSpace::deallocate(const char* arg_label,
void* const arg_alloc_ptr,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
Kokkos::Tools::make_space_handle(name()));
}
void SYCLDeviceUSMSpace::impl_access_error() {
const std::string msg(
"Kokkos::Experimental::SYCLDeviceUSMSpace::impl_access_error attempt to "
"execute device function from non-device space");
Kokkos::Impl::throw_runtime_exception(msg);
}
void SYCLDeviceUSMSpace::impl_access_error(const void* const) {
impl_access_error();
}
} // namespace Experimental
} // namespace Kokkos
namespace Kokkos {
namespace Impl {
#ifdef KOKKOS_ENABLE_DEBUG
SharedAllocationRecord<void, void> SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>::s_root_record;
SharedAllocationRecord<void, void> SharedAllocationRecord<
Kokkos::Experimental::SYCLSharedUSMSpace, void>::s_root_record;
#endif
SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>::
SharedAllocationRecord(
const Kokkos::Experimental::SYCLDeviceUSMSpace& space,
const std::string& label, const size_t size,
const SharedAllocationRecord<void, void>::function_type dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: base_t(
#ifdef KOKKOS_ENABLE_DEBUG
&SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
void>::s_root_record,
#endif
Kokkos::Impl::checked_allocation_with_header(space, label, size),
sizeof(SharedAllocationHeader) + size, dealloc),
m_space(space) {
if (Kokkos::Profiling::profileLibraryLoaded()) {
Kokkos::Profiling::allocateData(
Kokkos::Profiling::make_space_handle(space.name()), label, data(),
size);
}
SharedAllocationHeader header;
this->base_t::_fill_host_accessible_header_info(header, label);
// Copy to device memory
Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, HostSpace>(
RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
}
SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>::
SharedAllocationRecord(
const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
const std::string& arg_label, const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: base_t(
#ifdef KOKKOS_ENABLE_DEBUG
&SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_space(arg_space) {
this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
arg_label);
}
} // namespace Impl
} // namespace Kokkos
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
void>::~SharedAllocationRecord() {
if (Kokkos::Profiling::profileLibraryLoaded()) {
SharedAllocationHeader header;
Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
Kokkos::HostSpace>(&header, RecordBase::m_alloc_ptr,
sizeof(SharedAllocationHeader));
Kokkos::Profiling::deallocateData(
Kokkos::Profiling::make_space_handle(
Kokkos::Experimental::SYCLDeviceUSMSpace::name()),
header.m_label, data(), size());
}
m_space.deallocate(SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size);
}
SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
void>::~SharedAllocationRecord() {
const char* label = nullptr;
if (Kokkos::Profiling::profileLibraryLoaded()) {
label = RecordBase::m_alloc_ptr->m_label;
}
m_space.deallocate(label, SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size,
(SharedAllocationRecord<void, void>::m_alloc_size -
sizeof(SharedAllocationHeader)));
}
//----------------------------------------------------------------------------
} // namespace Impl
} // namespace Kokkos
//==============================================================================
// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
#include <impl/Kokkos_SharedAlloc_timpl.hpp>
namespace Kokkos {
namespace Impl {
// To avoid additional compilation cost for something that's (mostly?) not
// performance sensitive, we explicity instantiate these CRTP base classes here,
// where we have access to the associated *_timpl.hpp header files.
template class HostInaccessibleSharedAllocationRecordCommon<
Kokkos::Experimental::SYCLDeviceUSMSpace>;
template class SharedAllocationRecordCommon<
Kokkos::Experimental::SYCLDeviceUSMSpace>;
template class SharedAllocationRecordCommon<
Kokkos::Experimental::SYCLSharedUSMSpace>;
} // namespace Impl
} // namespace Kokkos
// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
//==============================================================================
| 1 | 27,821 | Why not just remove? The CUDA one has been around for while. This one has not been part of any release (?) | kokkos-kokkos | cpp |
@@ -83,8 +83,9 @@ public class Session implements ContentBlocking.Delegate, GeckoSession.Navigatio
private transient SharedPreferences mPrefs;
private transient GeckoRuntime mRuntime;
private transient byte[] mPrivatePage;
- private transient boolean mFirstContentfulPaint;
+ private transient boolean mFirstContentFulPaint;
private transient long mKeepAlive;
+ private transient boolean mIsFirstActivation;
public interface BitmapChangedListener {
void onBitmapChanged(Session aSession, Bitmap aBitmap); | 1 | /* -*- Mode: Java; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: nil; -*-
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.vrbrowser.browser.engine;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.graphics.Bitmap;
import android.preference.PreferenceManager;
import android.util.Log;
import android.view.Surface;
import android.view.inputmethod.CursorAnchorInfo;
import android.view.inputmethod.ExtractedText;
import android.view.inputmethod.ExtractedTextRequest;
import androidx.annotation.IntDef;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.UiThread;
import org.mozilla.geckoview.AllowOrDeny;
import org.mozilla.geckoview.ContentBlocking;
import org.mozilla.geckoview.GeckoDisplay;
import org.mozilla.geckoview.GeckoResult;
import org.mozilla.geckoview.GeckoRuntime;
import org.mozilla.geckoview.GeckoSession;
import org.mozilla.geckoview.GeckoSessionSettings;
import org.mozilla.geckoview.MediaElement;
import org.mozilla.geckoview.SlowScriptResponse;
import org.mozilla.geckoview.WebRequestError;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.browser.Media;
import org.mozilla.vrbrowser.browser.SessionChangeListener;
import org.mozilla.vrbrowser.browser.SettingsStore;
import org.mozilla.vrbrowser.browser.UserAgentOverride;
import org.mozilla.vrbrowser.browser.VideoAvailabilityListener;
import org.mozilla.vrbrowser.geolocation.GeolocationData;
import org.mozilla.vrbrowser.telemetry.GleanMetricsService;
import org.mozilla.vrbrowser.telemetry.TelemetryWrapper;
import org.mozilla.vrbrowser.utils.BitmapCache;
import org.mozilla.vrbrowser.utils.InternalPages;
import org.mozilla.vrbrowser.utils.SystemUtils;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static java.util.Objects.requireNonNull;
import static org.mozilla.vrbrowser.utils.ServoUtils.createServoSession;
import static org.mozilla.vrbrowser.utils.ServoUtils.isInstanceOfServoSession;
import static org.mozilla.vrbrowser.utils.ServoUtils.isServoAvailable;
public class Session implements ContentBlocking.Delegate, GeckoSession.NavigationDelegate,
GeckoSession.ProgressDelegate, GeckoSession.ContentDelegate, GeckoSession.TextInputDelegate,
GeckoSession.PromptDelegate, GeckoSession.MediaDelegate, GeckoSession.HistoryDelegate, GeckoSession.PermissionDelegate,
GeckoSession.SelectionActionDelegate, SharedPreferences.OnSharedPreferenceChangeListener, SessionChangeListener {
private static final String LOGTAG = SystemUtils.createLogtag(Session.class);
private static UserAgentOverride sUserAgentOverride;
private static final long KEEP_ALIVE_DURATION_MS = 1000; // 1 second.
private transient CopyOnWriteArrayList<GeckoSession.NavigationDelegate> mNavigationListeners;
private transient CopyOnWriteArrayList<GeckoSession.ProgressDelegate> mProgressListeners;
private transient CopyOnWriteArrayList<GeckoSession.ContentDelegate> mContentListeners;
private transient CopyOnWriteArrayList<SessionChangeListener> mSessionChangeListeners;
private transient CopyOnWriteArrayList<GeckoSession.TextInputDelegate> mTextInputListeners;
private transient CopyOnWriteArrayList<VideoAvailabilityListener> mVideoAvailabilityListeners;
private transient CopyOnWriteArrayList<BitmapChangedListener> mBitmapChangedListeners;
private transient CopyOnWriteArrayList<GeckoSession.SelectionActionDelegate> mSelectionActionListeners;
private SessionState mState;
private CopyOnWriteArrayList<Runnable> mQueuedCalls = new CopyOnWriteArrayList<>();
private transient GeckoSession.PermissionDelegate mPermissionDelegate;
private transient GeckoSession.PromptDelegate mPromptDelegate;
private transient GeckoSession.HistoryDelegate mHistoryDelegate;
private transient Context mContext;
private transient SharedPreferences mPrefs;
private transient GeckoRuntime mRuntime;
private transient byte[] mPrivatePage;
private transient boolean mFirstContentfulPaint;
private transient long mKeepAlive;
public interface BitmapChangedListener {
void onBitmapChanged(Session aSession, Bitmap aBitmap);
}
@IntDef(value = { SESSION_OPEN, SESSION_DO_NOT_OPEN})
public @interface SessionOpenModeFlags {}
public static final int SESSION_OPEN = 0;
public static final int SESSION_DO_NOT_OPEN = 1;
protected Session(Context aContext, GeckoRuntime aRuntime,
@NonNull SessionSettings aSettings, @SessionOpenModeFlags int aOpenMode) {
mContext = aContext;
mRuntime = aRuntime;
initialize();
mState = createSession(aSettings, aOpenMode);
mState.setActive(true);
}
protected Session(Context aContext, GeckoRuntime aRuntime, @NonNull SessionState aRestoreState) {
mContext = aContext;
mRuntime = aRuntime;
initialize();
mState = aRestoreState;
}
private void initialize() {
mNavigationListeners = new CopyOnWriteArrayList<>();
mProgressListeners = new CopyOnWriteArrayList<>();
mContentListeners = new CopyOnWriteArrayList<>();
mSessionChangeListeners = new CopyOnWriteArrayList<>();
mTextInputListeners = new CopyOnWriteArrayList<>();
mVideoAvailabilityListeners = new CopyOnWriteArrayList<>();
mSelectionActionListeners = new CopyOnWriteArrayList<>();
mBitmapChangedListeners = new CopyOnWriteArrayList<>();
if (mPrefs != null) {
mPrefs.registerOnSharedPreferenceChangeListener(this);
}
mPrefs = PreferenceManager.getDefaultSharedPreferences(mContext);
InternalPages.PageResources pageResources = InternalPages.PageResources.create(R.raw.private_mode, R.raw.private_style);
mPrivatePage = InternalPages.createAboutPage(mContext, pageResources);
if (sUserAgentOverride == null) {
sUserAgentOverride = new UserAgentOverride();
sUserAgentOverride.loadOverridesFromAssets((Activity)mContext, mContext.getString(R.string.user_agent_override_file));
}
}
protected void shutdown() {
if (mState.mSession != null) {
if (mState.mSession.isOpen()) {
mState.mSession.close();
}
mState.mDisplay = null;
mState.mSession = null;
}
for (SessionChangeListener listener : mSessionChangeListeners) {
listener.onRemoveSession(this);
}
if (mState.mParentId != null) {
Session parent = SessionStore.get().getSession(mState.mParentId);
if (parent != null) {
parent.mSessionChangeListeners.remove(this);
}
}
mQueuedCalls.clear();
mNavigationListeners.clear();
mProgressListeners.clear();
mContentListeners.clear();
mSessionChangeListeners.clear();
mTextInputListeners.clear();
mVideoAvailabilityListeners.clear();
mSelectionActionListeners.clear();
mBitmapChangedListeners.clear();
if (mPrefs != null) {
mPrefs.unregisterOnSharedPreferenceChangeListener(this);
}
}
private void dumpAllState() {
for (GeckoSession.NavigationDelegate listener: mNavigationListeners) {
dumpState(listener);
}
for (GeckoSession.ProgressDelegate listener: mProgressListeners) {
dumpState(listener);
}
for (GeckoSession.ContentDelegate listener: mContentListeners) {
dumpState(listener);
}
for (VideoAvailabilityListener listener: mVideoAvailabilityListeners) {
dumpState(listener);
}
}
private void dumpState(GeckoSession.NavigationDelegate aListener) {
if (mState.mSession != null) {
aListener.onCanGoBack(mState.mSession, canGoBack());
aListener.onCanGoForward(mState.mSession, mState.mCanGoForward);
aListener.onLocationChange(mState.mSession, mState.mUri);
}
}
private void dumpState(GeckoSession.ProgressDelegate aListener) {
if (mState.mIsLoading) {
aListener.onPageStart(mState.mSession, mState.mUri);
} else {
aListener.onPageStop(mState.mSession, true);
}
if (mState.mSecurityInformation != null) {
aListener.onSecurityChange(mState.mSession, mState.mSecurityInformation);
}
}
private void dumpState(GeckoSession.ContentDelegate aListener) {
aListener.onTitleChange(mState.mSession, mState.mTitle);
}
private void dumpState(VideoAvailabilityListener aListener) {
aListener.onVideoAvailabilityChanged(mState.mMediaElements != null && mState.mMediaElements.size() > 0);
}
private void flushQueuedEvents() {
for (Runnable call: mQueuedCalls) {
call.run();
}
mQueuedCalls.clear();
}
public void setPermissionDelegate(GeckoSession.PermissionDelegate aDelegate) {
mPermissionDelegate = aDelegate;
}
public void setPromptDelegate(GeckoSession.PromptDelegate aDelegate) {
mPromptDelegate = aDelegate;
}
public void setHistoryDelegate(GeckoSession.HistoryDelegate aDelegate) {
mHistoryDelegate = aDelegate;
}
public void addNavigationListener(GeckoSession.NavigationDelegate aListener) {
mNavigationListeners.add(aListener);
dumpState(aListener);
}
public void removeNavigationListener(GeckoSession.NavigationDelegate aListener) {
mNavigationListeners.remove(aListener);
}
public void addProgressListener(GeckoSession.ProgressDelegate aListener) {
mProgressListeners.add(aListener);
dumpState(aListener);
}
public void removeProgressListener(GeckoSession.ProgressDelegate aListener) {
mProgressListeners.remove(aListener);
}
public void addContentListener(GeckoSession.ContentDelegate aListener) {
mContentListeners.add(aListener);
dumpState(aListener);
}
public void removeContentListener(GeckoSession.ContentDelegate aListener) {
mContentListeners.remove(aListener);
}
public void addSessionChangeListener(SessionChangeListener aListener) {
mSessionChangeListeners.add(aListener);
}
public void removeSessionChangeListener(SessionChangeListener aListener) {
mSessionChangeListeners.remove(aListener);
}
public void addTextInputListener(GeckoSession.TextInputDelegate aListener) {
mTextInputListeners.add(aListener);
}
public void removeTextInputListener(GeckoSession.TextInputDelegate aListener) {
mTextInputListeners.remove(aListener);
}
public void addVideoAvailabilityListener(VideoAvailabilityListener aListener) {
mVideoAvailabilityListeners.add(aListener);
dumpState(aListener);
}
public void removeVideoAvailabilityListener(VideoAvailabilityListener aListener) {
mVideoAvailabilityListeners.remove(aListener);
}
public void addSelectionActionListener(GeckoSession.SelectionActionDelegate aListener) {
mSelectionActionListeners.add(aListener);
}
public void removeSelectionActionListener(GeckoSession.ContentDelegate aListener) {
mSelectionActionListeners.remove(aListener);
}
public void addBitmapChangedListener(BitmapChangedListener aListener) {
mBitmapChangedListeners.add(aListener);
}
public void removeBitmapChangedListener(BitmapChangedListener aListener) {
mBitmapChangedListeners.remove(aListener);
}
private void setupSessionListeners(GeckoSession aSession) {
aSession.setNavigationDelegate(this);
aSession.setProgressDelegate(this);
aSession.setContentDelegate(this);
aSession.getTextInput().setDelegate(this);
aSession.setPermissionDelegate(this);
aSession.setPromptDelegate(this);
aSession.setContentBlockingDelegate(this);
aSession.setMediaDelegate(this);
aSession.setHistoryDelegate(this);
aSession.setSelectionActionDelegate(this);
}
private void cleanSessionListeners(GeckoSession aSession) {
aSession.setContentDelegate(null);
aSession.setNavigationDelegate(null);
aSession.setProgressDelegate(null);
aSession.getTextInput().setDelegate(null);
aSession.setPromptDelegate(null);
aSession.setPermissionDelegate(null);
aSession.setContentBlockingDelegate(null);
aSession.setMediaDelegate(null);
aSession.setHistoryDelegate(null);
aSession.setSelectionActionDelegate(null);
}
public void suspend() {
if (mState.isActive()) {
Log.e(LOGTAG, "Active Sessions can not be suspended");
return;
}
if (mState.mSession == null) {
return;
}
if (mKeepAlive > System.currentTimeMillis()) {
Log.e(LOGTAG, "Unable to suspend activity with active keep alive time.");
return;
}
Log.d(LOGTAG, "Suspending Session: " + mState.mId);
closeSession(mState);
mState.mSession = null;
}
private boolean shouldLoadDefaultPage(@NonNull SessionState aState) {
if (aState.mUri != null && aState.mUri.length() != 0 && !aState.mUri.equals(mContext.getString(R.string.about_blank))) {
return false;
}
if (aState.mSessionState != null && aState.mSessionState.size() != 0) {
return false;
}
return true;
}
private void loadDefaultPage() {
if (mState.mSettings.isPrivateBrowsingEnabled()) {
loadPrivateBrowsingPage();
} else {
loadHomePage();
}
}
private void restore() {
SessionSettings settings = mState.mSettings;
if (settings == null) {
settings = new SessionSettings.Builder()
.withDefaultSettings(mContext)
.build();
}
mState.mSession = createGeckoSession(settings);
if (!mState.mSession.isOpen()) {
mState.mSession.open(mRuntime);
}
// data:text URLs can not be restored.
if (mState.mSessionState != null && ((mState.mUri == null) || mState.mUri.startsWith("data:text"))) {
mState.mSessionState = null;
mState.mUri = null;
}
if (shouldLoadDefaultPage(mState)) {
loadDefaultPage();
} else if (mState.mSessionState != null) {
mState.mSession.restoreState(mState.mSessionState);
} else if (mState.mUri != null) {
mState.mSession.loadUri(mState.mUri);
} else {
loadDefaultPage();
}
if (mState.mUri != null && mState.mUri.contains(".youtube.com")) {
mState.mSession.loadUri(mState.mUri, GeckoSession.LOAD_FLAGS_REPLACE_HISTORY);
}
dumpAllState();
mState.setActive(true);
}
private SessionState createSession(@NonNull SessionSettings aSettings, @SessionOpenModeFlags int aOpenMode) {
SessionState state = new SessionState();
state.mSettings = aSettings;
state.mSession = createGeckoSession(aSettings);
if (aOpenMode == SESSION_OPEN && !state.mSession.isOpen()) {
state.mSession.open(mRuntime);
}
return state;
}
private GeckoSession createGeckoSession(@NonNull SessionSettings aSettings) {
GeckoSessionSettings geckoSettings = new GeckoSessionSettings.Builder()
.useMultiprocess(SettingsStore.getInstance(mContext).isMultiprocessEnabled())
.usePrivateMode(aSettings.isPrivateBrowsingEnabled())
.useTrackingProtection(aSettings.isTrackingProtectionEnabled())
.userAgentMode(aSettings.getUserAgentMode())
.viewportMode(aSettings.getViewportMode())
.suspendMediaWhenInactive(aSettings.isSuspendMediaWhenInactiveEnabled())
.build();
GeckoSession session;
if (aSettings.isServoEnabled() && isServoAvailable()) {
session = createServoSession(mContext);
} else {
session = new GeckoSession(geckoSettings);
}
session.getSettings().setUserAgentOverride(aSettings.getUserAgentOverride());
setupSessionListeners(session);
return session;
}
public void recreateSession() {
SessionState previous = mState;
mState = mState.recreate();
restore();
GeckoSession previousGeckoSession = null;
if (previous.mSession != null) {
previousGeckoSession = previous.mSession;
closeSession(previous);
}
for (SessionChangeListener listener : mSessionChangeListeners) {
listener.onCurrentSessionChange(previousGeckoSession, mState.mSession);
}
}
private void closeSession(@NonNull SessionState aState) {
if (aState.mSession == null) {
return;
}
cleanSessionListeners(aState.mSession);
aState.mSession.setActive(false);
aState.mSession.stop();
if (aState.mDisplay != null) {
aState.mDisplay.surfaceDestroyed();
aState.mSession.releaseDisplay(aState.mDisplay);
aState.mDisplay = null;
}
aState.mSession.close();
aState.setActive(false);
mFirstContentfulPaint = false;
}
public void captureBitmap() {
if (mState.mDisplay == null || !mFirstContentfulPaint) {
return;
}
try {
mState.mDisplay.screenshot().aspectPreservingSize(500).capture().then(bitmap -> {
if (bitmap != null) {
BitmapCache.getInstance(mContext).addBitmap(getId(), bitmap);
for (BitmapChangedListener listener: mBitmapChangedListeners) {
listener.onBitmapChanged(Session.this, bitmap);
}
}
return null;
}).exceptionally(throwable -> {
Log.e(LOGTAG, "Error capturing session bitmap");
throwable.printStackTrace();
return null;
});
} catch (Exception ex) {
Log.e(LOGTAG, "Error capturing session bitmap");
ex.printStackTrace();
}
}
public CompletableFuture<Void> captureBackgroundBitmap(int displayWidth, int displayHeight) {
if (mState.mSession == null || !mFirstContentfulPaint) {
return CompletableFuture.completedFuture(null);
}
Surface captureSurface = BitmapCache.getInstance(mContext).acquireCaptureSurface(displayWidth, displayHeight);
if (captureSurface == null) {
return CompletableFuture.completedFuture(null);
}
CompletableFuture<Void> result = new CompletableFuture<>();
GeckoDisplay display = mState.mSession.acquireDisplay();
display.surfaceChanged(captureSurface, displayWidth, displayHeight);
Runnable cleanResources = () -> {
display.surfaceDestroyed();
mState.mSession.releaseDisplay(display);
BitmapCache.getInstance(mContext).releaseCaptureSurface();
};
try {
display.screenshot().aspectPreservingSize(500).capture().then(bitmap -> {
if (bitmap != null) {
BitmapCache.getInstance(mContext).addBitmap(getId(), bitmap);
for (BitmapChangedListener listener : mBitmapChangedListeners) {
listener.onBitmapChanged(Session.this, bitmap);
}
}
cleanResources.run();
result.complete(null);
return null;
}).exceptionally(throwable -> {
Log.e(LOGTAG, "Error capturing session background bitmap");
throwable.printStackTrace();
cleanResources.run();
result.complete(null);
return null;
});
}
catch (Exception ex) {
Log.e(LOGTAG, "Error capturing session background bitmap");
ex.printStackTrace();
cleanResources.run();
result.complete(null);
}
return result;
}
public boolean hasCapturedBitmap() {
return BitmapCache.getInstance(mContext).hasBitmap(mState.mId);
}
public void purgeHistory() {
if (mState.mSession != null) {
mState.mSession.purgeHistory();
}
}
public void setRegion(String aRegion) {
Log.d(LOGTAG, "Session setRegion: " + aRegion);
mState.mRegion = aRegion != null ? aRegion.toLowerCase() : "worldwide";
// There is a region initialize and the home is already loaded
if (mState.mSession != null && isHomeUri(getCurrentUri())) {
mState.mSession.loadUri("javascript:window.location.replace('" + getHomeUri() + "');");
}
}
public String getHomeUri() {
String homepage = SettingsStore.getInstance(mContext).getHomepage();
if (homepage.equals(mContext.getString(R.string.homepage_url)) && mState.mRegion != null) {
homepage = homepage + "?region=" + mState.mRegion;
}
return homepage;
}
public Boolean isHomeUri(String aUri) {
return aUri != null && aUri.toLowerCase().startsWith(
SettingsStore.getInstance(mContext).getHomepage()
);
}
public String getCurrentUri() {
if (mState.mUri == null) {
return "";
}
return mState.mUri;
}
public String getCurrentTitle() {
if (mState.mTitle == null) {
return "";
}
return mState.mTitle;
}
public boolean isSecure() {
return mState.mSecurityInformation != null && mState.mSecurityInformation.isSecure;
}
public boolean isVideoAvailable() {
return mState.mMediaElements != null && mState.mMediaElements.size() > 0;
}
public boolean isFirstContentfulPaint() {
return mFirstContentfulPaint;
}
public Media getFullScreenVideo() {
for (Media media: mState.mMediaElements) {
if (media.isFullscreen()) {
return media;
}
}
if (mState.mMediaElements.size() > 0) {
return mState.mMediaElements.get(mState.mMediaElements.size() - 1);
}
return null;
}
public boolean isInputActive() {
return mState.mIsInputActive;
}
public boolean canGoBack() {
if (mState.mCanGoBack || isInFullScreen()) {
return true;
}
if (mState.mParentId != null) {
Session parent = SessionStore.get().getSession(mState.mParentId);
return parent != null && !parent.isActive();
}
return false;
}
public void goBack() {
if (isInFullScreen()) {
exitFullScreen();
} else if (mState.mCanGoBack && mState.mSession != null) {
mState.mSession.goBack();
} else if (mState.mParentId != null) {
Session parent = SessionStore.get().getSession(mState.mParentId);
if (parent != null && !parent.isActive()) {
for (SessionChangeListener listener: mSessionChangeListeners) {
listener.onUnstackSession(this, parent);
}
}
}
}
public void goForward() {
if (mState.mCanGoForward && mState.mSession != null) {
mState.mSession.goForward();
}
}
public void setActive(boolean aActive) {
// Flush the events queued while the session was inactive
if (mState.mSession != null && !mState.isActive() && aActive) {
flushQueuedEvents();
}
if (mState.mSession != null) {
if (mState.isActive() != aActive) {
mState.mSession.setActive(aActive);
}
mState.setActive(aActive);
} else if (aActive) {
restore();
} else {
Log.e(LOGTAG, "ERROR: Setting null GeckoView to inactive!");
}
for (SessionChangeListener listener: mSessionChangeListeners) {
listener.onActiveStateChange(this, aActive);
}
}
public void reload() {
if (mState.mSession != null) {
mState.mSession.reload();
}
}
public void stop() {
if (mState.mSession != null) {
mState.mSession.stop();
}
}
public void loadUri(String aUri) {
if (aUri == null) {
aUri = getHomeUri();
}
if (mState.mSession != null) {
Log.d(LOGTAG, "Loading URI: " + aUri);
mState.mSession.loadUri(aUri);
}
}
public void loadHomePage() {
loadUri(getHomeUri());
}
public void loadPrivateBrowsingPage() {
if (mState.mSession != null) {
mState.mSession.loadData(mPrivatePage, "text/html");
}
}
public void toggleServo() {
if (mState.mSession == null) {
return;
}
Log.v("servo", "toggleServo");
SessionState previous = mState;
String uri = getCurrentUri();
SessionSettings settings = new SessionSettings.Builder()
.withDefaultSettings(mContext)
.withServo(!isInstanceOfServoSession(mState.mSession))
.build();
mState = createSession(settings, SESSION_OPEN);
closeSession(previous);
mState.setActive(true);
loadUri(uri);
}
public boolean isInFullScreen() {
return mState.mFullScreen;
}
public void exitFullScreen() {
if (mState.mSession != null) {
mState.mSession.exitFullScreen();
}
}
public GeckoSession getGeckoSession() {
return mState.mSession;
}
public String getId() {
return mState.mId;
}
public boolean isPrivateMode() {
if (mState.mSession != null) {
return mState.mSession.getSettings().getUsePrivateMode();
} else if (mState.mSettings != null) {
return mState.mSettings.isPrivateBrowsingEnabled();
}
return false;
}
// Session Settings
protected void setServo(final boolean enabled) {
mState.mSettings.setServoEnabled(enabled);
if (mState.mSession != null && isInstanceOfServoSession(mState.mSession) != enabled) {
toggleServo();
}
}
public int getUaMode() {
return mState.mSession.getSettings().getUserAgentMode();
}
public boolean isActive() {
return mState.isActive();
}
private static final String M_PREFIX = "m.";
private static final String MOBILE_PREFIX = "mobile.";
private String checkForMobileSite(String aUri) {
if (aUri == null) {
return null;
}
String result = null;
URI uri;
try {
uri = new URI(aUri);
} catch (URISyntaxException | NullPointerException e) {
Log.d(LOGTAG, "Error parsing URL: " + aUri + " " + e.getMessage());
return null;
}
String authority = uri.getAuthority();
if (authority == null) {
return null;
}
authority = authority.toLowerCase();
String foundPrefix = null;
if (authority.startsWith(M_PREFIX)) {
foundPrefix= M_PREFIX;
} else if (authority.startsWith(MOBILE_PREFIX)) {
foundPrefix = MOBILE_PREFIX;
}
if (foundPrefix != null) {
try {
uri = new URI(uri.getScheme(), authority.substring(foundPrefix.length()), uri.getPath(), uri.getQuery(), uri.getFragment());
result = uri.toString();
} catch (URISyntaxException | NullPointerException e) {
Log.d(LOGTAG, "Error dropping mobile prefix from: " + aUri + " " + e.getMessage());
}
}
return result;
}
public void setUaMode(int mode) {
if (mState.mSession == null || mState.mSettings.getUserAgentMode() == mode) {
return;
}
mState.mSettings.setUserAgentMode(mode);
mState.mSession.getSettings().setUserAgentMode(mode);
String overrideUri = null;
if (mode == GeckoSessionSettings.USER_AGENT_MODE_DESKTOP) {
mState.mSettings.setViewportMode(GeckoSessionSettings.VIEWPORT_MODE_DESKTOP);
overrideUri = checkForMobileSite(mState.mUri);
} else {
mState.mSettings.setViewportMode(GeckoSessionSettings.VIEWPORT_MODE_MOBILE);
}
mState.mSession.getSettings().setViewportMode(mState.mSettings.getViewportMode());
mState.mSession.loadUri(overrideUri != null ? overrideUri : mState.mUri, GeckoSession.LOAD_FLAGS_BYPASS_CACHE | GeckoSession.LOAD_FLAGS_REPLACE_HISTORY);
}
protected void resetMultiprocess() {
recreateSession();
}
protected void setTrackingProtection(final boolean aEnabled) {
if (mState.mSettings.isTrackingProtectionEnabled() != aEnabled) {
mState.mSettings.setTrackingProtectionEnabled(aEnabled);
if (mState.mSession != null) {
mState.mSession.getSettings().setUseTrackingProtection(aEnabled);
}
}
}
public void updateLastUse() {
mState.mLastUse = System.currentTimeMillis();
}
public long getLastUse() {
return mState.mLastUse;
}
public @NonNull SessionState getSessionState() {
return mState;
}
// NavigationDelegate
@Override
public void onLocationChange(@NonNull GeckoSession aSession, String aUri) {
if (mState.mSession != aSession) {
return;
}
mState.mPreviousUri = mState.mUri;
mState.mUri = aUri;
for (GeckoSession.NavigationDelegate listener : mNavigationListeners) {
listener.onLocationChange(aSession, aUri);
}
// The homepage finishes loading after the region has been updated
if (mState.mRegion != null && aUri.equalsIgnoreCase(SettingsStore.getInstance(mContext).getHomepage())) {
aSession.loadUri("javascript:window.location.replace('" + getHomeUri() + "');");
}
}
@Override
public void onCanGoBack(@NonNull GeckoSession aSession, boolean aGeckoSessionCanGoBack) {
if (mState.mSession != aSession) {
return;
}
Log.d(LOGTAG, "Session onCanGoBack: " + (aGeckoSessionCanGoBack ? "true" : "false"));
mState.mCanGoBack = aGeckoSessionCanGoBack;
for (GeckoSession.NavigationDelegate listener : mNavigationListeners) {
listener.onCanGoBack(aSession, canGoBack());
}
}
@Override
public void onCanGoForward(@NonNull GeckoSession aSession, boolean aCanGoForward) {
if (mState.mSession != aSession) {
return;
}
Log.d(LOGTAG, "Session onCanGoForward: " + (aCanGoForward ? "true" : "false"));
mState.mCanGoForward = aCanGoForward;
for (GeckoSession.NavigationDelegate listener : mNavigationListeners) {
listener.onCanGoForward(aSession, aCanGoForward);
}
}
@Override
public @Nullable GeckoResult<AllowOrDeny> onLoadRequest(@NonNull GeckoSession aSession, @NonNull LoadRequest aRequest) {
String uri = aRequest.uri;
Log.d(LOGTAG, "onLoadRequest: " + uri);
if (aSession == mState.mSession) {
Log.d(LOGTAG, "Testing for UA override");
final String userAgentOverride = sUserAgentOverride.lookupOverride(uri);
aSession.getSettings().setUserAgentOverride(userAgentOverride);
if (mState.mSettings != null) {
mState.mSettings.setUserAgentOverride(userAgentOverride);
}
}
if (mContext.getString(R.string.about_private_browsing).equalsIgnoreCase(uri)) {
return GeckoResult.DENY;
}
if (mNavigationListeners.size() == 0) {
return GeckoResult.ALLOW;
}
final GeckoResult<AllowOrDeny> result = new GeckoResult<>();
AtomicInteger count = new AtomicInteger(0);
AtomicBoolean allowed = new AtomicBoolean(false);
for (GeckoSession.NavigationDelegate listener: mNavigationListeners) {
GeckoResult<AllowOrDeny> listenerResult = listener.onLoadRequest(aSession, aRequest);
if (listenerResult != null) {
listenerResult.then(value -> {
if (AllowOrDeny.ALLOW.equals(value)) {
allowed.set(true);
}
if (count.getAndIncrement() == mNavigationListeners.size() - 1) {
result.complete(allowed.get() ? AllowOrDeny.ALLOW : AllowOrDeny.DENY);
}
return null;
});
} else {
allowed.set(true);
if (count.getAndIncrement() == mNavigationListeners.size() - 1) {
result.complete(allowed.get() ? AllowOrDeny.ALLOW : AllowOrDeny.DENY);
}
}
}
return result;
}
@Override
public GeckoResult<GeckoSession> onNewSession(@NonNull GeckoSession aSession, @NonNull String aUri) {
mKeepAlive = System.currentTimeMillis() + KEEP_ALIVE_DURATION_MS;
Log.d(LOGTAG, "onNewSession: " + aUri);
Session session = SessionStore.get().createSession(mState.mSettings, SESSION_DO_NOT_OPEN);
session.mState.mParentId = mState.mId;
session.mKeepAlive = mKeepAlive;
for (SessionChangeListener listener: mSessionChangeListeners) {
listener.onStackSession(session);
}
mSessionChangeListeners.add(session);
return GeckoResult.fromValue(session.getGeckoSession());
}
@Override
public GeckoResult<String> onLoadError(@NonNull GeckoSession session, String uri, @NonNull WebRequestError error) {
Log.d(LOGTAG, "Session onLoadError: " + uri);
return GeckoResult.fromValue(InternalPages.createErrorPageDataURI(mContext, uri, error.category, error.code));
}
// Progress Listener
@Override
public void onPageStart(@NonNull GeckoSession aSession, @NonNull String aUri) {
if (mState.mSession != aSession) {
return;
}
Log.d(LOGTAG, "Session onPageStart");
mState.mIsLoading = true;
TelemetryWrapper.startPageLoadTime();
GleanMetricsService.startPageLoadTime();
for (GeckoSession.ProgressDelegate listener : mProgressListeners) {
listener.onPageStart(aSession, aUri);
}
}
@Override
public void onPageStop(@NonNull GeckoSession aSession, boolean b) {
if (mState.mSession != aSession) {
return;
}
Log.d(LOGTAG, "Session onPageStop");
mState.mIsLoading = false;
if (!SessionUtils.isLocalizedContent(mState.mUri)) {
TelemetryWrapper.uploadPageLoadToHistogram(mState.mUri);
GleanMetricsService.stopPageLoadTimeWithURI(mState.mUri);
}
for (GeckoSession.ProgressDelegate listener : mProgressListeners) {
listener.onPageStop(aSession, b);
}
}
@Override
public void onSecurityChange(@NonNull GeckoSession aSession, @NonNull SecurityInformation aInformation) {
if (mState.mSession != aSession) {
return;
}
Log.d(LOGTAG, "Session onPageStop");
mState.mSecurityInformation = aInformation;
for (GeckoSession.ProgressDelegate listener : mProgressListeners) {
listener.onSecurityChange(aSession, aInformation);
}
}
@Override
public void onSessionStateChange(@NonNull GeckoSession aSession,
@NonNull GeckoSession.SessionState aSessionState) {
if (mState.mSession == aSession) {
mState.mSessionState = aSessionState;
}
}
// Content Delegate
@Override
public void onTitleChange(@NonNull GeckoSession aSession, String aTitle) {
if (mState.mSession != aSession) {
return;
}
mState.mTitle = aTitle;
for (GeckoSession.ContentDelegate listener : mContentListeners) {
listener.onTitleChange(aSession, aTitle);
}
}
@Override
public void onCloseRequest(@NonNull GeckoSession aSession) {
}
@Override
public void onFullScreen(@NonNull GeckoSession aSession, boolean aFullScreen) {
if (mState.mSession != aSession) {
return;
}
Log.d(LOGTAG, "Session onFullScreen");
mState.mFullScreen = aFullScreen;
for (GeckoSession.ContentDelegate listener : mContentListeners) {
listener.onFullScreen(aSession, aFullScreen);
}
}
@Override
public void onContextMenu(@NonNull GeckoSession session, int screenX, int screenY, @NonNull ContextElement element) {
if (mState.mSession == session) {
for (GeckoSession.ContentDelegate listener : mContentListeners) {
listener.onContextMenu(session, screenX, screenY, element);
}
}
}
@Override
public void onCrash(@NonNull GeckoSession session) {
Log.e(LOGTAG,"Child crashed. Creating new session");
recreateSession();
loadUri(getHomeUri());
}
@Override
public void onFirstComposite(@NonNull GeckoSession aSession) {
if (mState.mSession == aSession) {
for (GeckoSession.ContentDelegate listener : mContentListeners) {
listener.onFirstComposite(aSession);
}
if (mFirstContentfulPaint) {
// onFirstContentfulPaint is only called once after a session is opened.
// Notify onFirstContentfulPaint after a session is reattached before
// being closed ((e.g. tab selected)
for (GeckoSession.ContentDelegate listener : mContentListeners) {
listener.onFirstContentfulPaint(aSession);
}
}
}
}
@Override
public void onFirstContentfulPaint(@NonNull GeckoSession aSession) {
mFirstContentfulPaint = true;
if (mState.mSession == aSession) {
for (GeckoSession.ContentDelegate listener : mContentListeners) {
listener.onFirstContentfulPaint(aSession);
}
}
}
@Nullable
@Override
public GeckoResult<SlowScriptResponse> onSlowScript(@NonNull GeckoSession aSession, @NonNull String aScriptFileName) {
if (mState.mSession == aSession) {
for (GeckoSession.ContentDelegate listener : mContentListeners) {
GeckoResult<SlowScriptResponse> result = listener.onSlowScript(aSession, aScriptFileName);
if (result != null) {
return result;
}
}
}
return null;
}
// TextInput Delegate
@Override
public void restartInput(@NonNull GeckoSession aSession, int reason) {
if (mState.mSession == aSession) {
for (GeckoSession.TextInputDelegate listener : mTextInputListeners) {
listener.restartInput(aSession, reason);
}
}
}
@Override
public void showSoftInput(@NonNull GeckoSession aSession) {
if (mState.mSession == aSession) {
mState.mIsInputActive = true;
for (GeckoSession.TextInputDelegate listener : mTextInputListeners) {
listener.showSoftInput(aSession);
}
}
}
@Override
public void hideSoftInput(@NonNull GeckoSession aSession) {
if (mState.mSession == aSession) {
mState.mIsInputActive = false;
for (GeckoSession.TextInputDelegate listener : mTextInputListeners) {
listener.hideSoftInput(aSession);
}
}
}
@Override
public void updateSelection(@NonNull GeckoSession aSession, int selStart, int selEnd, int compositionStart, int compositionEnd) {
if (mState.mSession == aSession) {
for (GeckoSession.TextInputDelegate listener : mTextInputListeners) {
listener.updateSelection(aSession, selStart, selEnd, compositionStart, compositionEnd);
}
}
}
@Override
public void updateExtractedText(@NonNull GeckoSession aSession, @NonNull ExtractedTextRequest request, @NonNull ExtractedText text) {
if (mState.mSession == aSession) {
for (GeckoSession.TextInputDelegate listener : mTextInputListeners) {
listener.updateExtractedText(aSession, request, text);
}
}
}
@Override
public void updateCursorAnchorInfo(@NonNull GeckoSession aSession, @NonNull CursorAnchorInfo info) {
if (mState.mSession == aSession) {
for (GeckoSession.TextInputDelegate listener : mTextInputListeners) {
listener.updateCursorAnchorInfo(aSession, info);
}
}
}
@Override
public void onContentBlocked(@NonNull final GeckoSession session, @NonNull final ContentBlocking.BlockEvent event) {
if ((event.getAntiTrackingCategory() & ContentBlocking.AntiTracking.AD) != 0) {
Log.i(LOGTAG, "Blocking Ad: " + event.uri);
}
if ((event.getAntiTrackingCategory() & ContentBlocking.AntiTracking.ANALYTIC) != 0) {
Log.i(LOGTAG, "Blocking Analytic: " + event.uri);
}
if ((event.getAntiTrackingCategory() & ContentBlocking.AntiTracking.CONTENT) != 0) {
Log.i(LOGTAG, "Blocking Content: " + event.uri);
}
if ((event.getAntiTrackingCategory() & ContentBlocking.AntiTracking.SOCIAL) != 0) {
Log.i(LOGTAG, "Blocking Social: " + event.uri);
}
}
// PromptDelegate
@Nullable
@Override
public GeckoResult<PromptResponse> onPopupPrompt(@NonNull GeckoSession geckoSession, @NonNull PopupPrompt popupPrompt) {
if (mPromptDelegate != null) {
return mPromptDelegate.onPopupPrompt(geckoSession, popupPrompt);
}
return GeckoResult.fromValue(popupPrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onAlertPrompt(@NonNull GeckoSession aSession, @NonNull AlertPrompt alertPrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onAlertPrompt(aSession, alertPrompt);
}
return GeckoResult.fromValue(alertPrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onButtonPrompt(@NonNull GeckoSession aSession, @NonNull ButtonPrompt buttonPrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onButtonPrompt(aSession, buttonPrompt);
}
return GeckoResult.fromValue(buttonPrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onTextPrompt(@NonNull GeckoSession aSession, @NonNull TextPrompt textPrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onTextPrompt(aSession, textPrompt);
}
return GeckoResult.fromValue(textPrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onAuthPrompt(@NonNull GeckoSession aSession, @NonNull AuthPrompt authPrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onAuthPrompt(aSession, authPrompt);
}
return GeckoResult.fromValue(authPrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onChoicePrompt(@NonNull GeckoSession aSession, @NonNull ChoicePrompt choicePrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onChoicePrompt(aSession, choicePrompt);
}
return GeckoResult.fromValue(choicePrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onColorPrompt(@NonNull GeckoSession aSession, @NonNull ColorPrompt colorPrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onColorPrompt(aSession, colorPrompt);
}
return GeckoResult.fromValue(colorPrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onDateTimePrompt(@NonNull GeckoSession aSession, @NonNull DateTimePrompt dateTimePrompt) {
if (mState.mSession == aSession && mPromptDelegate != null) {
return mPromptDelegate.onDateTimePrompt(aSession, dateTimePrompt);
}
return GeckoResult.fromValue(dateTimePrompt.dismiss());
}
@Nullable
@Override
public GeckoResult<PromptResponse> onFilePrompt(@NonNull GeckoSession aSession, @NonNull FilePrompt filePrompt) {
if (mPromptDelegate != null) {
return mPromptDelegate.onFilePrompt(aSession, filePrompt);
}
return GeckoResult.fromValue(filePrompt.dismiss());
}
// MediaDelegate
@Override
public void onMediaAdd(@NonNull GeckoSession aSession, @NonNull MediaElement element) {
if (mState.mSession != aSession) {
return;
}
Media media = new Media(element);
mState.mMediaElements.add(media);
for (VideoAvailabilityListener listener: mVideoAvailabilityListeners) {
listener.onVideoAvailabilityChanged(true);
}
}
@Override
public void onMediaRemove(@NonNull GeckoSession aSession, @NonNull MediaElement element) {
if (mState.mSession != aSession) {
return;
}
for (int i = 0; i < mState.mMediaElements.size(); ++i) {
Media media = mState.mMediaElements.get(i);
if (media.getMediaElement() == element) {
media.unload();
mState.mMediaElements.remove(i);
if (mState.mMediaElements.size() == 0) {
for (VideoAvailabilityListener listener: mVideoAvailabilityListeners) {
listener.onVideoAvailabilityChanged(false);
}
}
return;
}
}
}
// HistoryDelegate
@Override
public void onHistoryStateChange(@NonNull GeckoSession aSession, @NonNull GeckoSession.HistoryDelegate.HistoryList historyList) {
if (mState.mSession == aSession) {
if (mHistoryDelegate != null) {
mHistoryDelegate.onHistoryStateChange(aSession, historyList);
} else {
mQueuedCalls.add(() -> {
if (mHistoryDelegate != null) {
mHistoryDelegate.onHistoryStateChange(aSession, historyList);
}
});
}
}
}
@Nullable
@Override
public GeckoResult<Boolean> onVisited(@NonNull GeckoSession aSession, @NonNull String url, @Nullable String lastVisitedURL, int flags) {
if (mState.mSession == aSession) {
if (mHistoryDelegate != null) {
return mHistoryDelegate.onVisited(aSession, url, lastVisitedURL, flags);
} else {
final GeckoResult<Boolean> response = new GeckoResult<>();
mQueuedCalls.add(() -> {
if (mHistoryDelegate != null) {
try {
requireNonNull(mHistoryDelegate.onVisited(aSession, url, lastVisitedURL, flags)).then(aBoolean -> {
response.complete(aBoolean);
return null;
}).exceptionally(throwable -> {
Log.d(LOGTAG, "Null GeckoResult from onVisited");
return null;
});
} catch (NullPointerException e) {
e.printStackTrace();
}
}
});
return response;
}
}
return GeckoResult.fromValue(false);
}
@UiThread
@Nullable
public GeckoResult<boolean[]> getVisited(@NonNull GeckoSession aSession, @NonNull String[] urls) {
if (mState.mSession == aSession) {
if (mHistoryDelegate != null) {
return mHistoryDelegate.getVisited(aSession, urls);
} else {
final GeckoResult<boolean[]> response = new GeckoResult<>();
mQueuedCalls.add(() -> {
if (mHistoryDelegate != null) {
try {
requireNonNull(mHistoryDelegate.getVisited(aSession, urls)).then(aBoolean -> {
response.complete(aBoolean);
return null;
}).exceptionally(throwable -> {
Log.d(LOGTAG, "Null GeckoResult from getVisited");
return null;
});
} catch (NullPointerException e) {
e.printStackTrace();
}
}
});
return response;
}
}
return GeckoResult.fromValue(new boolean[]{});
}
// PermissionDelegate
@Override
public void onAndroidPermissionsRequest(@NonNull GeckoSession aSession, @Nullable String[] strings, @NonNull Callback callback) {
if (mState.mSession == aSession && mPermissionDelegate != null) {
mPermissionDelegate.onAndroidPermissionsRequest(aSession, strings, callback);
}
}
@Override
public void onContentPermissionRequest(@NonNull GeckoSession aSession, @Nullable String s, int i, @NonNull Callback callback) {
if (mState.mSession == aSession && mPermissionDelegate != null) {
mPermissionDelegate.onContentPermissionRequest(aSession, s, i, callback);
}
}
@Override
public void onMediaPermissionRequest(@NonNull GeckoSession aSession, @NonNull String s, @Nullable MediaSource[] mediaSources, @Nullable MediaSource[] mediaSources1, @NonNull MediaCallback mediaCallback) {
if (mState.mSession == aSession && mPermissionDelegate != null) {
mPermissionDelegate.onMediaPermissionRequest(aSession, s, mediaSources, mediaSources1, mediaCallback);
}
}
// SharedPreferences.OnSharedPreferenceChangeListener
@Override
public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) {
if (mContext != null) {
if (key.equals(mContext.getString(R.string.settings_key_geolocation_data))) {
GeolocationData data = GeolocationData.parse(sharedPreferences.getString(key, null));
setRegion(data.getCountryCode());
}
}
}
// GeckoSession.SelectionActionDelegate
@Override
public void onShowActionRequest(@NonNull GeckoSession aSession, @NonNull Selection selection) {
if (mState.mSession == aSession) {
for (GeckoSession.SelectionActionDelegate listener : mSelectionActionListeners) {
listener.onShowActionRequest(aSession, selection);
}
}
}
@Override
public void onHideAction(@NonNull GeckoSession aSession, int aHideReason) {
if (mState.mSession == aSession) {
for (GeckoSession.SelectionActionDelegate listener : mSelectionActionListeners) {
listener.onHideAction(aSession, aHideReason);
}
}
}
// SessionChangeListener
@Override
public void onRemoveSession(Session aParent) {
if (mState.mParentId != null) {
mState.mParentId = null;
// Parent stack session closed. Notify canGoBack state changed
for (GeckoSession.NavigationDelegate listener : mNavigationListeners) {
listener.onCanGoBack(this.getGeckoSession(), canGoBack());
}
}
}
@Override
public void onActiveStateChange(Session aSession, boolean aActive) {
if (mState.mParentId != null) {
// Parent stack session has been attached/detached. Notify canGoBack state changed
for (GeckoSession.NavigationDelegate listener : mNavigationListeners) {
listener.onCanGoBack(this.getGeckoSession(), canGoBack());
}
}
}
@Override
public void onStackSession(Session aSession) {
if (aSession.equals(this)) {
return;
}
for (SessionChangeListener listener : mSessionChangeListeners) {
listener.onStackSession(aSession);
}
}
// Display functions
public void releaseDisplay() {
surfaceDestroyed();
if (mState.mDisplay != null) {
if (mState.mSession != null) {
mState.mSession.releaseDisplay(mState.mDisplay);
}
mState.mDisplay = null;
}
}
public void surfaceDestroyed() {
if (mState.mDisplay != null) {
mState.mDisplay.surfaceDestroyed();
}
}
public void surfaceChanged(@NonNull final Surface surface, final int left, final int top,
final int width, final int height) {
if (mState.mSession == null) {
return;
}
if (mState.mDisplay == null) {
mState.mDisplay = mState.mSession.acquireDisplay();
}
mState.mDisplay.surfaceChanged(surface, left, top, width, height);
}
}
| 1 | 8,737 | nit: `Contentful` is the word used in the GV API, without the uppercase in F | MozillaReality-FirefoxReality | java |
@@ -83,6 +83,12 @@ public class SchemaMeta extends CommonService<OperationMeta> {
}
private void initOperations() {
+ if (swagger.getPaths() == null) {
+ LOGGER.error(swagger.getInfo().getTitle() + " with path " + swagger.getBasePath()
+ + " is an empty interface, please delete it or fill with one method!");
+ return;
+ }
+
for (Entry<String, Path> entry : swagger.getPaths().entrySet()) {
String strPath = entry.getKey();
Path path = entry.getValue(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.core.definition;
import java.lang.reflect.Method;
import java.util.List;
import java.util.Map.Entry;
import org.apache.servicecomb.core.Handler;
import org.apache.servicecomb.core.exception.ExceptionUtils;
import org.apache.servicecomb.foundation.common.utils.ReflectUtils;
import org.apache.servicecomb.swagger.converter.SwaggerToClassGenerator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.swagger.models.HttpMethod;
import io.swagger.models.Operation;
import io.swagger.models.Path;
import io.swagger.models.Swagger;
public class SchemaMeta extends CommonService<OperationMeta> {
private static final Logger LOGGER = LoggerFactory.getLogger(SchemaMeta.class);
// 如果要生成class,使用这个package
private String packageName;
private Swagger swagger;
private MicroserviceMeta microserviceMeta;
// microserviceName:schemaId
private String microserviceQualifiedName;
// 契约对应的接口
private Class<?> swaggerIntf;
// handlerChain是microservice级别的
private List<Handler> consumerHandlerChain;
private List<Handler> providerHandlerChain;
private SwaggerToClassGenerator swaggerToClassGenerator;
public SchemaMeta(Swagger swagger, MicroserviceMeta microserviceMeta, String schemaId) {
this.packageName = SchemaUtils.generatePackageName(microserviceMeta, schemaId);
this.swagger = swagger;
this.name = schemaId;
this.microserviceMeta = microserviceMeta;
this.microserviceQualifiedName = microserviceMeta.getName() + "." + schemaId;
swaggerToClassGenerator = new SwaggerToClassGenerator(microserviceMeta.getClassLoader(), swagger, packageName);
swaggerIntf = swaggerToClassGenerator.convert();
createOperationMgr("schemaMeta " + schemaId + " operation mgr");
operationMgr.setRegisterErrorFmt("Operation name repeat, schema=%s, operation=%s");
initOperations();
}
public SwaggerToClassGenerator getSwaggerToClassGenerator() {
return swaggerToClassGenerator;
}
public String getPackageName() {
return packageName;
}
private void initOperations() {
for (Entry<String, Path> entry : swagger.getPaths().entrySet()) {
String strPath = entry.getKey();
Path path = entry.getValue();
for (Entry<HttpMethod, Operation> operationEntry : path.getOperationMap().entrySet()) {
Operation operation = operationEntry.getValue();
if (operation.getOperationId() == null) {
throw ExceptionUtils.operationIdInvalid(getSchemaId(), strPath);
}
// org.apache.servicecomb.swagger.engine.SwaggerEnvironment.createConsumer(Class<?>, Class<?>)
// org.apache.servicecomb.swagger.engine.SwaggerEnvironment.createProducer(Object, Swagger)
// had make sure that consumer/swagger or producer/swagger can work
//
// in this place, do not throw exception when method not exists
// eg:
// swagger interface is a.b.c, and consumer interface is a.b.c too.
// version 1, they are the same
// version 2, producer add a new operation, that means swagger have more operation than consumer interface a.b.c
// interface a.b.c in consumer process is the old interface
// so for swagger, can not do any valid check here
// only need to save found method, that's enough.
Method method = ReflectUtils.findMethod(swaggerIntf, operation.getOperationId());
if (method == null) {
LOGGER.warn("method {} not found in swagger interface {}, schemaId={}",
operation.getOperationId(),
swaggerIntf.getName(),
getSchemaId());
continue;
}
String httpMethod = operationEntry.getKey().name();
OperationMeta operationMeta = new OperationMeta();
operationMeta.init(this, method, strPath, httpMethod, operation);
operationMgr.register(method.getName(), operationMeta);
}
}
}
public Swagger getSwagger() {
return swagger;
}
public String getSchemaId() {
return name;
}
public String getMicroserviceQualifiedName() {
return microserviceQualifiedName;
}
public String getMicroserviceName() {
return microserviceMeta.getName();
}
public MicroserviceMeta getMicroserviceMeta() {
return microserviceMeta;
}
public Class<?> getSwaggerIntf() {
return swaggerIntf;
}
public List<Handler> getConsumerHandlerChain() {
return consumerHandlerChain;
}
public void setConsumerHandlerChain(List<Handler> consumerHandlerChain) {
this.consumerHandlerChain = consumerHandlerChain;
}
public List<Handler> getProviderHandlerChain() {
return providerHandlerChain;
}
public void setProviderHandlerChain(List<Handler> providerHandlerChain) {
this.providerHandlerChain = providerHandlerChain;
}
}
| 1 | 9,666 | It just does the check, maybe we can name it as checkOperations. | apache-servicecomb-java-chassis | java |
@@ -344,7 +344,7 @@ describe('runRules', function() {
});
fixture.innerHTML =
- '<div id="t1"><span></span></div><div id="t2"><em></em></div>';
+ '<div id="t1"><em></em></div><div id="t2"><em></em></div>';
var $test = {
0: fixture.querySelector('#t1'), | 1 | /*global runRules */
describe('runRules', function() {
'use strict';
// These tests can sometimes be flaky in IE, allow for up to 3 retries
if (axe.testUtils.isIE11) {
this.retries(3);
}
function iframeReady(src, context, id, cb) {
var i = document.createElement('iframe');
i.addEventListener('load', function() {
cb();
});
i.src = src;
i.id = id;
context.appendChild(i);
}
function createFrames(url, callback) {
var frame,
num = 2;
var loaded = 0;
if (typeof url === 'function') {
callback = url;
url = '../mock/frames/frame-frame.html';
}
function onLoad() {
loaded++;
if (loaded >= num) {
callback();
}
}
frame = document.createElement('iframe');
frame.src = url;
frame.addEventListener('load', onLoad);
fixture.appendChild(frame);
frame = document.createElement('iframe');
frame.src = '../mock/frames/nocode.html';
frame.addEventListener('load', onLoad);
fixture.appendChild(frame);
return frame;
}
var fixture = document.getElementById('fixture');
var isNotCalled;
beforeEach(function() {
isNotCalled = function(err) {
throw err || new Error('Reject should not be called');
};
});
afterEach(function() {
fixture.innerHTML = '';
axe._audit = null;
axe._tree = undefined;
});
it('should work', function(done) {
axe._load({
rules: [
{
id: 'html',
selector: 'html',
any: ['html']
}
],
checks: [
{
id: 'html',
evaluate: function() {
return true;
}
}
],
messages: {}
});
var frame = document.createElement('iframe');
frame.src = '../mock/frames/frame-frame.html';
frame.addEventListener('load', function() {
setTimeout(function() {
runRules(
document,
{},
function(r) {
assert.lengthOf(r[0].passes, 3);
done();
},
isNotCalled
);
}, 500);
});
fixture.appendChild(frame);
});
it('should properly order iframes', function(done) {
axe._load({
rules: [
{
id: 'iframe',
selector: 'iframe',
any: ['iframe']
}
],
checks: [
{
id: 'iframe',
evaluate: function() {
return true;
}
}
],
messages: {}
});
var frame = document.createElement('iframe');
frame.addEventListener('load', function() {
setTimeout(function() {
runRules(
document,
{},
function(r) {
var nodes = r[0].passes.map(function(detail) {
return detail.node.selector;
});
assert.deepEqual(nodes, [
['#level0'],
['#level0', '#level1'],
['#level0', '#level1', '#level2a'],
['#level0', '#level1', '#level2b']
]);
done();
},
isNotCalled
);
}, 500);
});
frame.id = 'level0';
frame.src = '../mock/frames/nested0.html';
fixture.appendChild(frame);
});
it('should properly calculate context and return results from matching frames', function(done) {
axe._load({
rules: [
{
id: 'div#target',
selector: '#target',
any: ['has-target']
},
{
id: 'first-div',
selector: 'div:not(#fixture)',
any: ['first-div']
}
],
checks: [
{
id: 'has-target',
evaluate: function() {
return true;
}
},
{
id: 'first-div',
evaluate: function(node) {
this.relatedNodes([node]);
return false;
},
after: function(results) {
if (results.length) {
results[0].result = true;
}
return [results[0]];
}
}
],
messages: {}
});
iframeReady(
'../mock/frames/context.html',
fixture,
'context-test',
function() {
var div = document.createElement('div');
fixture.appendChild(div);
runRules(
'#fixture',
{},
function(results) {
assert.deepEqual(JSON.parse(JSON.stringify(results)), [
{
id: 'div#target',
helpUrl:
'https://dequeuniversity.com/rules/axe/x.y/div#target?application=axeAPI',
pageLevel: false,
impact: null,
inapplicable: [],
incomplete: [],
violations: [],
passes: [
{
result: 'passed',
impact: null,
node: {
selector: ['#context-test', '#target'],
xpath: [
"/iframe[@id='context-test']",
"/div[@id='target']"
],
source: '<div id="target"></div>'
},
any: [
{
id: 'has-target',
data: null,
relatedNodes: []
}
],
all: [],
none: []
}
],
result: 'passed',
tags: []
},
{
id: 'first-div',
helpUrl:
'https://dequeuniversity.com/rules/axe/x.y/first-div?application=axeAPI',
pageLevel: false,
impact: null,
inapplicable: [],
incomplete: [],
violations: [],
passes: [
{
result: 'passed',
impact: null,
node: {
selector: ['#context-test', '#foo'],
xpath: ["/iframe[@id='context-test']", "/div[@id='foo']"],
source: '<div id="foo">\n <div id="bar"></div>\n </div>'
},
any: [
{
id: 'first-div',
data: null,
relatedNodes: [
{
selector: ['#context-test', '#foo'],
xpath: [
"/iframe[@id='context-test']",
"/div[@id='foo']"
],
source:
'<div id="foo">\n <div id="bar"></div>\n </div>'
}
]
}
],
all: [],
none: []
}
],
result: 'passed',
tags: []
}
]);
done();
},
isNotCalled
);
}
);
});
it('should reject if the context is invalid', function(done) {
axe._load({
rules: [
{
id: 'div#target',
selector: '#target',
any: ['has-target']
}
],
messages: {}
});
iframeReady(
'../mock/frames/context.html',
fixture,
'context-test',
function() {
runRules(
'#not-happening',
{},
function() {
assert.fail('This selector should not exist.');
},
function(error) {
assert.isOk(error);
assert.equal(
error.message,
'No elements found for include in page Context'
);
done();
}
);
}
);
});
it('should accept a jQuery-like object', function(done) {
axe._load({
rules: [
{
id: 'test',
selector: '*',
none: ['bob']
}
],
checks: [
{
id: 'bob',
evaluate: function() {
return true;
}
}
]
});
fixture.innerHTML =
'<div id="t1"><span></span></div><div id="t2"><em></em></div>';
var $test = {
0: fixture.querySelector('#t1'),
1: fixture.querySelector('#t2'),
length: 2
};
axe.run($test, function(err, results) {
assert.isNull(err);
assert.lengthOf(results.violations, 1);
assert.lengthOf(results.violations[0].nodes, 4);
assert.deepEqual(results.violations[0].nodes[0].target, ['#t1']);
assert.deepEqual(results.violations[0].nodes[1].target, ['#t1 > span']);
assert.deepEqual(results.violations[0].nodes[2].target, ['#t2']);
assert.deepEqual(results.violations[0].nodes[3].target, ['#t2 > em']);
done();
});
});
it('should accept a NodeList', function(done) {
axe._load({
rules: [
{
id: 'test',
selector: '*',
none: ['fred']
}
],
checks: [
{
id: 'fred',
evaluate: function() {
return true;
}
}
]
});
fixture.innerHTML =
'<div class="foo" id="t1"><span></span></div><div class="foo" id="t2"><em></em></div>';
var test = fixture.querySelectorAll('.foo');
axe.run(test, function(err, results) {
assert.isNull(err);
assert.lengthOf(results.violations, 1);
assert.lengthOf(results.violations[0].nodes, 4);
assert.deepEqual(results.violations[0].nodes[0].target, ['#t1']);
assert.deepEqual(results.violations[0].nodes[1].target, ['#t1 > span']);
assert.deepEqual(results.violations[0].nodes[2].target, ['#t2']);
assert.deepEqual(results.violations[0].nodes[3].target, ['#t2 > em']);
done();
});
});
it('should pull metadata from configuration', function(done) {
axe._load({
rules: [
{
id: 'div#target',
selector: '#target',
any: ['has-target']
},
{
id: 'first-div',
selector: 'div#fixture div',
any: ['first-div']
}
],
checks: [
{
id: 'has-target',
evaluate: function() {
return false;
}
},
{
id: 'first-div',
evaluate: function(node) {
this.relatedNodes([node]);
return false;
},
after: function(results) {
if (results.length) {
results[0].result = true;
}
return [results[0]];
}
}
],
data: {
rules: {
'div#target': {
foo: 'bar',
stuff: 'blah'
},
'first-div': {
bar: 'foo',
stuff: 'no'
}
},
checks: {
'first-div': {
thingy: true,
impact: 'serious',
messages: {
fail: function(checkResult) {
return checkResult.id === 'first-div'
? 'failing is not good'
: 'y u wrong rule?';
},
pass: function(checkResult) {
return checkResult.id === 'first-div'
? 'passing is good'
: 'y u wrong rule?';
}
}
},
'has-target': {
otherThingy: true,
impact: 'moderate',
messages: {
fail: function(checkResult) {
return checkResult.id === 'has-target'
? 'failing is not good'
: 'y u wrong rule?';
},
pass: function(checkResult) {
return checkResult.id === 'has-target'
? 'passing is good'
: 'y u wrong rule?';
}
}
}
}
}
});
fixture.innerHTML = '<div id="target">Target!</div><div>ok</div>';
runRules(
'#fixture',
{},
function(results) {
assert.deepEqual(JSON.parse(JSON.stringify(results)), [
{
id: 'div#target',
helpUrl:
'https://dequeuniversity.com/rules/axe/x.y/div#target?application=axeAPI',
pageLevel: false,
foo: 'bar',
stuff: 'blah',
impact: 'moderate',
passes: [],
inapplicable: [],
incomplete: [],
violations: [
{
result: 'failed',
node: {
selector: ['#target'],
xpath: ["/div[@id='target']"],
source: '<div id="target">Target!</div>'
},
impact: 'moderate',
any: [
{
impact: 'moderate',
otherThingy: true,
message: 'failing is not good',
id: 'has-target',
data: null,
relatedNodes: []
}
],
all: [],
none: []
}
],
result: 'failed',
tags: []
},
{
id: 'first-div',
helpUrl:
'https://dequeuniversity.com/rules/axe/x.y/first-div?application=axeAPI',
pageLevel: false,
bar: 'foo',
stuff: 'no',
impact: null,
inapplicable: [],
incomplete: [],
violations: [],
passes: [
{
result: 'passed',
impact: null,
node: {
selector: ['#target'],
xpath: ["/div[@id='target']"],
source: '<div id="target">Target!</div>'
},
any: [
{
impact: 'serious',
id: 'first-div',
thingy: true,
message: 'passing is good',
data: null,
relatedNodes: [
{
selector: ['#target'],
xpath: ["/div[@id='target']"],
source: '<div id="target">Target!</div>'
}
]
}
],
all: [],
none: []
}
],
result: 'passed',
tags: []
}
]);
done();
},
isNotCalled
);
});
it('should call the reject argument if an error occurs', function(done) {
axe._load({
rules: [
{
id: 'invalidRule'
}
],
checks: [],
messages: {}
});
createFrames(function() {
setTimeout(function() {
runRules(
document,
{},
function() {
assert.ok(false, 'You shall not pass!');
done();
},
function(err) {
assert.instanceOf(err, Error);
done();
}
);
}, 100);
});
});
it('should resolve to cantTell when a rule fails', function(done) {
axe._load({
rules: [
{
id: 'incomplete-1',
selector: '*',
none: ['undeffed']
},
{
id: 'incomplete-2',
selector: '*',
none: ['thrower']
}
],
checks: [
{
id: 'undeffed',
evaluate: function() {
return undefined;
}
},
{
id: 'thrower',
evaluate: function() {
throw new Error('Check failed to complete');
}
}
]
});
fixture.innerHTML = '<div></div>';
axe.run('#fixture', function(err, results) {
assert.isNull(err);
assert.lengthOf(results.incomplete, 2);
assert.equal(results.incomplete[0].id, 'incomplete-1');
assert.equal(results.incomplete[1].id, 'incomplete-2');
assert.include(
results.incomplete[1].description,
'An error occured while running this rule'
);
done();
});
});
it('should resolve to cantTell if an error occurs inside frame rules', function(done) {
axe._load({
rules: [
{
id: 'incomplete-1',
selector: '.nogo',
none: ['undeffed']
},
{
id: 'incomplete-2',
selector: '.nogo',
none: ['thrower']
}
],
checks: [
{
id: 'undeffed',
evaluate: function() {
return false;
}
},
{
id: 'thrower',
evaluate: function() {
return false;
}
}
]
});
iframeReady(
'../mock/frames/rule-error.html',
fixture,
'context-test',
function() {
axe.run('#fixture', function(err, results) {
assert.isNull(err);
assert.lengthOf(results.incomplete, 2);
assert.equal(results.incomplete[0].id, 'incomplete-1');
assert.equal(results.incomplete[1].id, 'incomplete-2');
assert.include(
results.incomplete[1].description,
'An error occured while running this rule'
);
done();
});
}
);
});
it('should cascade `no elements found` errors in frames to reject run_rules', function(done) {
axe._load({
rules: [
{
id: 'invalidRule'
}
],
checks: [],
messages: {}
});
fixture.innerHTML = '<div id="outer"></div>';
var outer = document.getElementById('outer');
iframeReady('../mock/frames/context.html', outer, 'target', function() {
runRules(
[['#target', '#elementNotFound']],
{},
function resolve() {
assert.ok(false, 'frame should have thrown an error');
},
function reject(err) {
assert.instanceOf(err, Error);
assert.include(
err.message,
'No elements found for include in frame Context'
);
done();
}
);
});
});
it('should not call reject when the resolve throws', function(done) {
var rejectCalled = false;
axe._load({
rules: [
{
id: 'html',
selector: 'html',
any: ['html']
}
],
checks: [
{
id: 'html',
evaluate: function() {
return true;
}
}
],
messages: {}
});
function resolve() {
setTimeout(function() {
assert.isFalse(rejectCalled);
axe.log = log;
done();
}, 20);
throw new Error('err');
}
function reject() {
rejectCalled = true;
}
var log = axe.log;
axe.log = function(e) {
assert.equal(e.message, 'err');
axe.log = log;
};
runRules(document, {}, resolve, reject);
});
it('should ignore iframes if `iframes` === false', function(done) {
axe._load({
rules: [
{
id: 'html',
selector: 'html',
any: ['html']
}
],
checks: [
{
id: 'html',
evaluate: function() {
return true;
}
}
],
messages: {}
});
var frame = document.createElement('iframe');
frame.src = '../mock/frames/frame-frame.html';
frame.addEventListener('load', function() {
setTimeout(function() {
runRules(
document,
{ iframes: false },
function(r) {
assert.lengthOf(r[0].passes, 1);
assert.equal(
r[0].passes[0].node.element.ownerDocument,
document,
'Result should not be in an iframe'
);
done();
},
isNotCalled
);
}, 500);
});
fixture.appendChild(frame);
});
it('should not fail if `include` / `exclude` is overwritten', function(done) {
function invalid() {
throw new Error('nope!');
}
Array.prototype.include = invalid;
Array.prototype.exclude = invalid;
axe._load({
rules: [
{
id: 'html',
selector: 'html',
any: ['html']
}
],
checks: [
{
id: 'html',
evaluate: function() {
return true;
}
}
],
messages: {}
});
runRules(
[document],
{},
function(r) {
assert.lengthOf(r[0].passes, 1);
delete Array.prototype.include;
delete Array.prototype.exclude;
done();
},
isNotCalled
);
});
it('should return a cleanup method', function(done) {
axe._load({
rules: [
{
id: 'html',
selector: 'html',
any: ['html']
}
],
checks: [
{
id: 'html',
evaluate: function() {
return true;
}
}
],
messages: {}
});
runRules(
document,
{},
function resolve(out, cleanup) {
assert.isDefined(axe._tree);
assert.isDefined(axe._selectorData);
cleanup();
assert.isUndefined(axe._tree);
assert.isUndefined(axe._selectorData);
done();
},
isNotCalled
);
});
it('should clear up axe._tree / axe._selectorData after an error', function(done) {
axe._load({
rules: [
{
id: 'invalidRule'
}
],
checks: [],
messages: {}
});
createFrames(function() {
setTimeout(function() {
runRules(document, {}, isNotCalled, function() {
assert.isUndefined(axe._tree);
assert.isUndefined(axe._selectorData);
done();
});
}, 100);
});
});
});
| 1 | 14,952 | We no longer have karma output in the test file so these selectors were now unique and the target didn't need a child selector. Updated to force non-unique nodes | dequelabs-axe-core | js |
@@ -1,8 +1,14 @@
+<% content_for :additional_header_links do %>
+ <% if current_user.has_subscription_with_mentor? %>
+ <li class="mentor">
+ <%= mentor_image(current_user.mentor) %>
+ <%= mentor_contact_link(current_user.mentor) %>
+ </li>
+ <% end %>
+<% end %>
+
<section class="workshops vertical-slider revealed">
- <figure class="meta product-card">
- <%= render 'mentor' %>
- <%= render 'trails' %>
- </figure>
+ <%= render 'trails' %>
<%= render partial: 'products/workshop', collection: online_workshops %>
<%= render partial: 'products/workshop', collection: in_person_workshops %>
</section> | 1 | <section class="workshops vertical-slider revealed">
<figure class="meta product-card">
<%= render 'mentor' %>
<%= render 'trails' %>
</figure>
<%= render partial: 'products/workshop', collection: online_workshops %>
<%= render partial: 'products/workshop', collection: in_person_workshops %>
</section>
<section class="screencasts vertical-slider revealed">
<%= render partial: 'products/video', collection: videos %>
</section>
<section class="reading vertical-slider revealed">
<%= render partial: 'products/book', collection: books %>
</section>
<footer>
<%= link_to forum_url, target: '_blank' do %>
<span>Get answers from us, collaborate with other members — View the <%= t('shared.subscription.name') %> forum →</span>
<% end %>
</footer>
| 1 | 8,640 | This moved from a partial to not being in a partial. How about cleaning this view up further by moving it back into a partial? | thoughtbot-upcase | rb |
@@ -42,6 +42,7 @@ func NewLightweightInformer(
objType runtime.Object,
resync time.Duration,
h cache.ResourceEventHandler,
+ recieveUpdates bool,
) cache.Controller {
cacheStore := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{})
fifo := cache.NewDeltaFIFOWithOptions(cache.DeltaFIFOOptions{ | 1 | package k8s
import (
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
)
type lightweightCacheObject struct {
metav1.Object
Name string
Namespace string
}
func (lw *lightweightCacheObject) GetName() string { return lw.Name }
func (lw *lightweightCacheObject) GetNamespace() string { return lw.Namespace }
// The LightweightInformer is an informer thats optimized for memory usage with drawbacks.
//
// The reduction in memory consumption does come at a cost, to achieve this we store small objects
// in the informers cache store. We do this by utilizing storing `lightweightCacheObject` instead
// of the full Kubernetes object.
// `lightweightCacheObject` has just enough metadata for the cache store and DeltaFIFO components to operate normally.
//
// There are drawbacks too using a LightweightInformer and its does not fit all use cases.
// For the Topology Caching this type of solution helped to reduce memory footprint significantly
// for large scale Kubernetes deployments.
//
// Also to note the memory footprint of the cache store is only part of the story.
// While the informers controller is receiving Kubernetes objects it stores that full object in the DeltaFIFO queue.
// This queue while processed quickly does store a vast amount of objects at any given time and contributes to memory usage greatly.
//
// Drawbacks
// - Update resource event handler does not function as expected, old objects will always return nil.
// This is because we dont cache the full k8s object to compute deltas as we are using lightweightCacheObjects instead.
func NewLightweightInformer(
lw cache.ListerWatcher,
objType runtime.Object,
resync time.Duration,
h cache.ResourceEventHandler,
) cache.Controller {
cacheStore := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{})
fifo := cache.NewDeltaFIFOWithOptions(cache.DeltaFIFOOptions{
KnownObjects: cacheStore,
EmitDeltaTypeReplaced: true,
})
return cache.New(&cache.Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resync,
RetryOnError: false,
Process: func(obj interface{}) error {
for _, d := range obj.(cache.Deltas) {
incomingObjectMeta, err := meta.Accessor(d.Object)
if err != nil {
return err
}
lightweightObj := &lightweightCacheObject{
Name: incomingObjectMeta.GetName(),
Namespace: incomingObjectMeta.GetNamespace(),
}
switch d.Type {
case cache.Sync, cache.Replaced, cache.Added, cache.Updated:
if _, exists, err := cacheStore.Get(lightweightObj); err == nil && exists {
if err := cacheStore.Update(lightweightObj); err != nil {
return err
}
h.OnUpdate(nil, d.Object)
} else {
if err := cacheStore.Add(lightweightObj); err != nil {
return err
}
h.OnAdd(d.Object)
}
case cache.Deleted:
if err := cacheStore.Delete(lightweightObj); err != nil {
return err
}
h.OnDelete(d.Object)
default:
return fmt.Errorf("Cache type not supported: %s", d.Type)
}
}
return nil
},
})
}
| 1 | 9,571 | add some form of docs in docstring or on :74 for what the effect is | lyft-clutch | go |
@@ -13,7 +13,8 @@ from kinto.core.storage import (
StorageBase,
exceptions,
)
-from kinto.core.utils import COMPARISON, find_nested_value, json
+from kinto.core.utils import COMPARISON, find_nested_value
+import json
def tree(): | 1 | import numbers
import operator
import re
from collections import abc, defaultdict
from kinto.core import utils
from kinto.core.decorators import deprecate_kwargs, synchronized
from kinto.core.storage import (
DEFAULT_DELETED_FIELD,
DEFAULT_ID_FIELD,
DEFAULT_MODIFIED_FIELD,
MISSING,
StorageBase,
exceptions,
)
from kinto.core.utils import COMPARISON, find_nested_value, json
def tree():
return defaultdict(tree)
class MemoryBasedStorage(StorageBase):
"""Abstract storage class, providing basic operations and
methods for in-memory implementations of sorting and filtering.
"""
json = json
def initialize_schema(self, dry_run=False):
# Nothing to do.
pass
def strip_deleted_object(
self,
resource_name,
parent_id,
obj,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
"""Strip the object of all its fields expect id and timestamp,
and set the deletion field value (e.g deleted=True)
"""
deleted = {}
deleted[id_field] = obj[id_field]
deleted[modified_field] = obj[modified_field]
deleted[deleted_field] = True
return deleted
def set_object_timestamp(
self,
resource_name,
parent_id,
obj,
modified_field=DEFAULT_MODIFIED_FIELD,
last_modified=None,
):
timestamp = self.bump_and_store_timestamp(
resource_name, parent_id, obj, modified_field, last_modified=last_modified
)
obj[modified_field] = timestamp
return obj
def extract_object_set(
self, objects, filters, sorting, id_field, deleted_field, pagination_rules=None, limit=None
):
"""Take the list of objects and handle filtering, sorting and
pagination.
"""
return extract_object_set(
objects,
filters=filters,
sorting=sorting,
id_field=id_field,
deleted_field=deleted_field,
pagination_rules=pagination_rules,
limit=limit,
)
def bump_timestamp(self, resource_timestamp, obj, modified_field, last_modified):
"""Timestamp are base on current millisecond.
.. note ::
Here it is assumed that if requests from the same user burst in,
the time will slide into the future. It is not problematic since
the timestamp notion is opaque, and behaves like a revision number.
"""
is_specified = obj is not None and modified_field in obj or last_modified is not None
if is_specified:
# If there is a timestamp in the new object, try to use it.
if last_modified is not None:
current = last_modified
else:
current = obj[modified_field]
# If it is equal to current resource timestamp, bump it.
if current == resource_timestamp:
resource_timestamp += 1
current = resource_timestamp
# If it is superior (future), use it as new resource timestamp.
elif current > resource_timestamp:
resource_timestamp = current
# Else (past), do nothing.
else:
# Not specified, use a new one.
current = utils.msec_time()
# If two ops in the same msec, bump it.
if current <= resource_timestamp:
current = resource_timestamp + 1
resource_timestamp = current
return current, resource_timestamp
def bump_and_store_timestamp(
self, resource_name, parent_id, obj=None, modified_field=None, last_modified=None
):
"""Use the bump_timestamp to get its next value and store the resource_timestamp."""
raise NotImplementedError
class Storage(MemoryBasedStorage):
"""Storage backend implementation in memory.
Useful for development or testing purposes, but stored data is lost after
each server restart.
Enable in configuration::
kinto.storage_backend = kinto.core.storage.memory
"""
def __init__(self, *args, readonly=False, **kwargs):
super().__init__(*args, **kwargs)
self.readonly = readonly
self.flush()
def flush(self):
self._store = tree()
self._cemetery = tree()
self._timestamps = defaultdict(dict)
@synchronized
def resource_timestamp(self, resource_name, parent_id):
ts = self._timestamps[parent_id].get(resource_name)
if ts is not None:
return ts
if self.readonly:
error_msg = "Cannot initialize empty resource timestamp when running in readonly."
raise exceptions.ReadonlyError(message=error_msg)
return self.bump_and_store_timestamp(resource_name, parent_id)
def bump_and_store_timestamp(
self, resource_name, parent_id, obj=None, modified_field=None, last_modified=None
):
"""Use the bump_timestamp to get its next value and store the resource_timestamp."""
current_resource_timestamp = self._timestamps[parent_id].get(resource_name, 0)
current, resource_timestamp = self.bump_timestamp(
current_resource_timestamp, obj, modified_field, last_modified
)
self._timestamps[parent_id][resource_name] = resource_timestamp
return current
@deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
@synchronized
def create(
self,
resource_name,
parent_id,
obj,
id_generator=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
id_generator = id_generator or self.id_generator
# This is very inefficient, but memory storage is not used in production.
# The serialization provides the necessary consistency with other
# backends implementation, and the deserialization creates a deep
# copy of the passed object.
obj = json.loads(json.dumps(obj))
if id_field in obj:
# Raise unicity error if object with same id already exists.
try:
existing = self.get(resource_name, parent_id, obj[id_field])
raise exceptions.UnicityError(id_field, existing)
except exceptions.ObjectNotFoundError:
pass
else:
obj[id_field] = id_generator()
self.set_object_timestamp(resource_name, parent_id, obj, modified_field=modified_field)
_id = obj[id_field]
self._store[parent_id][resource_name][_id] = obj
self._cemetery[parent_id][resource_name].pop(_id, None)
return obj
@deprecate_kwargs({"collection_id": "resource_name"})
@synchronized
def get(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
objects = self._store[parent_id][resource_name]
if object_id not in objects:
raise exceptions.ObjectNotFoundError(object_id)
return {**objects[object_id]}
@deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
@synchronized
def update(
self,
resource_name,
parent_id,
object_id,
obj,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
# This is very inefficient, but memory storage is not used in production.
# The serialization provides the necessary consistency with other
# backends implementation, and the deserialization creates a deep
# copy of the passed object.
obj = json.loads(json.dumps(obj))
obj[id_field] = object_id
self.set_object_timestamp(resource_name, parent_id, obj, modified_field=modified_field)
self._store[parent_id][resource_name][object_id] = obj
self._cemetery[parent_id][resource_name].pop(object_id, None)
return obj
@deprecate_kwargs({"collection_id": "resource_name"})
@synchronized
def delete(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
last_modified=None,
):
existing = self.get(resource_name, parent_id, object_id)
# Need to delete the last_modified field of the object.
del existing[modified_field]
self.set_object_timestamp(
resource_name,
parent_id,
existing,
modified_field=modified_field,
last_modified=last_modified,
)
existing = self.strip_deleted_object(resource_name, parent_id, existing)
# Add to deleted items, remove from store.
if with_deleted:
deleted = {**existing}
self._cemetery[parent_id][resource_name][object_id] = deleted
self._store[parent_id][resource_name].pop(object_id)
return existing
@deprecate_kwargs({"collection_id": "resource_name"})
@synchronized
def purge_deleted(
self,
resource_name,
parent_id,
before=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
parent_id_match = re.compile(parent_id.replace("*", ".*"))
by_parent_id = {
pid: resources
for pid, resources in self._cemetery.items()
if parent_id_match.match(pid)
}
num_deleted = 0
for pid, resources in by_parent_id.items():
if resource_name is not None:
resources = {resource_name: resources[resource_name]}
for resource, resource_objects in resources.items():
if before is None:
kept = {}
else:
kept = {
key: value
for key, value in resource_objects.items()
if value[modified_field] >= before
}
self._cemetery[pid][resource] = kept
num_deleted += len(resource_objects) - len(kept)
return num_deleted
@synchronized
def list_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
include_deleted=False,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name)
objects, _ = self.extract_object_set(
objects=objects,
filters=filters,
sorting=None,
id_field=id_field,
deleted_field=deleted_field,
)
deleted = []
if include_deleted:
deleted = _get_objects_by_parent_id(self._cemetery, parent_id, resource_name)
objects, _ = self.extract_object_set(
objects=objects + deleted,
filters=filters,
sorting=sorting,
id_field=id_field,
deleted_field=deleted_field,
pagination_rules=pagination_rules,
limit=limit,
)
return objects
@synchronized
def count_all(
self,
resource_name,
parent_id,
filters=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name)
_, count = self.extract_object_set(
objects=objects,
filters=filters,
sorting=None,
id_field=id_field,
deleted_field=deleted_field,
)
return count
@deprecate_kwargs({"collection_id": "resource_name"})
@synchronized
def delete_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
objects = _get_objects_by_parent_id(self._store, parent_id, resource_name, with_meta=True)
objects, count = self.extract_object_set(
objects=objects,
filters=filters,
sorting=sorting,
pagination_rules=pagination_rules,
limit=limit,
id_field=id_field,
deleted_field=deleted_field,
)
deleted = [
self.delete(
r.pop("__resource_name__"),
r.pop("__parent_id__"),
r[id_field],
id_field=id_field,
with_deleted=with_deleted,
modified_field=modified_field,
deleted_field=deleted_field,
)
for r in objects
]
return deleted
def extract_object_set(
objects,
filters,
sorting,
pagination_rules=None,
limit=None,
id_field=DEFAULT_ID_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
"""Apply filters, sorting, limit, and pagination rules to the list of
`objects`.
"""
filtered = list(apply_filters(objects, filters or []))
total_objects = len(filtered)
if pagination_rules:
paginated = []
for rule in pagination_rules:
values = apply_filters(filtered, rule)
paginated.extend(values)
else:
paginated = filtered
sorted_ = apply_sorting(paginated, sorting or [])
filtered_deleted = len([r for r in sorted_ if r.get(deleted_field) is True])
if limit:
sorted_ = list(sorted_)[:limit]
return sorted_, total_objects - filtered_deleted
def canonical_json(obj):
# We just a predictable serialization so that we just compare strings.
return json.dumps(obj, sort_keys=True)
def apply_filters(objects, filters):
"""Filter the specified objects, using basic iteration."""
def contains_filtering(object_value, search_term):
if object_value == MISSING:
return False
try:
search_set = set([canonical_json(v) for v in search_term])
object_value_set = set([canonical_json(v) for v in object_value])
except TypeError:
return False
return object_value_set.intersection(search_set) == search_set
def contains_any_filtering(object_value, search_term):
if object_value == MISSING:
return False
try:
search_set = set([canonical_json(v) for v in search_term])
object_value_set = set([canonical_json(v) for v in object_value])
except TypeError:
return False
return object_value_set.intersection(search_set)
operators = {
COMPARISON.LT: operator.lt,
COMPARISON.MAX: operator.le,
COMPARISON.EQ: operator.eq,
COMPARISON.NOT: operator.ne,
COMPARISON.MIN: operator.ge,
COMPARISON.GT: operator.gt,
COMPARISON.IN: operator.contains,
COMPARISON.EXCLUDE: lambda x, y: not operator.contains(x, y),
COMPARISON.LIKE: lambda x, y: re.search(y, x, re.IGNORECASE),
COMPARISON.CONTAINS: contains_filtering,
COMPARISON.CONTAINS_ANY: contains_any_filtering,
}
for obj in objects:
matches = True
for f in filters:
right = f.value
if f.field == DEFAULT_ID_FIELD:
if isinstance(right, int):
right = str(right)
left = find_nested_value(obj, f.field, MISSING)
if f.operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
right, left = left, right
elif f.operator == COMPARISON.LIKE:
# Add implicit start/end wildchars if none is specified.
if "*" not in right:
right = f"*{right}*"
right = f"^{right.replace('*', '.*')}$"
elif f.operator in (
COMPARISON.LT,
COMPARISON.MAX,
COMPARISON.EQ,
COMPARISON.NOT,
COMPARISON.MIN,
COMPARISON.GT,
):
left = schwartzian_transform(left)
right = schwartzian_transform(right)
if f.operator == COMPARISON.HAS:
matches = left != MISSING if f.value else left == MISSING
else:
matches = matches and operators[f.operator](left, right)
if matches:
yield obj
def schwartzian_transform(value):
"""Decorate a value with a tag that enforces the Postgres sort order.
The sort order, per https://www.postgresql.org/docs/9.6/static/datatype-json.html, is:
Object > Array > Boolean > Number > String > Null
Note that there are more interesting rules for comparing objects
and arrays but we probably don't need to be that compatible.
MISSING represents what would be a SQL NULL, which is "bigger"
than everything else.
"""
if value is None:
return (0, value)
if isinstance(value, str):
return (1, value)
if isinstance(value, bool):
# This has to be before Number, because bools are a subclass
# of int :(
return (3, value)
if isinstance(value, numbers.Number):
return (2, value)
if isinstance(value, abc.Sequence):
return (4, value)
if isinstance(value, abc.Mapping):
return (5, value)
if value is MISSING:
return (6, value)
raise ValueError(f"Unknown value: {value}") # pragma: no cover
def apply_sorting(objects, sorting):
"""Sort the specified objects, using cumulative python sorting."""
result = list(objects)
if not result:
return result
def column(obj, name):
return schwartzian_transform(find_nested_value(obj, name, default=MISSING))
for sort in reversed(sorting):
result = sorted(result, key=lambda r: column(r, sort.field), reverse=(sort.direction < 0))
return result
def _get_objects_by_parent_id(store, parent_id, resource_name, with_meta=False):
parent_id_match = re.compile(f"^{parent_id.replace('*', '.*')}$")
by_parent_id = {
pid: resources for pid, resources in store.items() if parent_id_match.match(pid)
}
objects = []
for pid, resources in by_parent_id.items():
if resource_name is not None:
resources = {resource_name: resources[resource_name]}
for resource, colobjects in resources.items():
for r in colobjects.values():
if with_meta:
objects.append(dict(__resource_name__=resource, __parent_id__=pid, **r))
else:
objects.append(r)
return objects
def load_from_config(config):
return Storage()
| 1 | 12,801 | Couldn't you import `json` from `kinto.core.utils` here too? | Kinto-kinto | py |
@@ -8,6 +8,9 @@ require 'rspec/rails'
require 'steps/user_steps'
require 'steps/approval_steps'
+# mimic production env for view+mail
+ENV["DISABLE_SANDBOX_WARNING"] = 'true'
+
# Requires supporting ruby files with custom matchers and macros, etc, in
# spec/support/ and its subdirectories. Files matching `spec/**/*_spec.rb` are
# run as spec files by default. This means that files in spec/support that end | 1 | # This file is copied to spec/ when you run 'rails generate rspec:install'
ENV["RAILS_ENV"] ||= 'test'
require 'spec_helper'
require File.expand_path("../../config/environment", __FILE__)
require 'rspec/rails'
# Add additional requires below this line. Rails is not loaded until this point!
require 'steps/user_steps'
require 'steps/approval_steps'
# Requires supporting ruby files with custom matchers and macros, etc, in
# spec/support/ and its subdirectories. Files matching `spec/**/*_spec.rb` are
# run as spec files by default. This means that files in spec/support that end
# in _spec.rb will both be required and run as specs, causing the specs to be
# run twice. It is recommended that you do not name files matching this glob to
# end with _spec.rb. You can configure this pattern with the --pattern
# option on the command line or in ~/.rspec, .rspec or `.rspec-local`.
#
# The following line is provided for convenience purposes. It has the downside
# of increasing the boot-up time by auto-requiring all files in the support
# directory. Alternatively, in the individual `*_spec.rb` files, manually
# require only the support files necessary.
#
Dir[Rails.root.join('spec/support/**/*.rb')].each { |f| require f }
Dir.glob('./spec/steps/**/*_steps.rb') { |f| load f, true }
# Checks for pending migrations before tests are run.
# If you are not using ActiveRecord, you can remove this line.
ActiveRecord::Migration.maintain_test_schema!
RSpec.configure do |config|
# Remove this line if you're not using ActiveRecord or ActiveRecord fixtures
config.fixture_path = "#{::Rails.root}/spec/support/fixtures"
# If you're not using ActiveRecord, or you'd prefer not to run each of your
# examples within a transaction, remove the following line or assign false
# instead of true.
config.use_transactional_fixtures = false
# Turnip config
config.raise_error_for_unimplemented_steps = true
# RSpec Rails can automatically mix in different behaviours to your tests
# based on their file location, for example enabling you to call `get` and
# `post` in specs under `spec/controllers`.
#
# You can disable this behaviour by removing the line below, and instead
# explicitly tag your specs with their type, e.g.:
#
# RSpec.describe UsersController, :type => :controller do
# # ...
# end
#
# The different available types are documented in the features, such as in
# https://relishapp.com/rspec/rspec-rails/docs
config.infer_spec_type_from_file_location!
# Add modules for Turnip acceptance tests
config.include ApprovalSteps, type: :feature
config.include UserSteps, type: :feature
# Add modules for helpers
config.include ControllerSpecHelper, type: :controller
config.include RequestSpecHelper, type: :request
[:feature, :request].each do |type|
config.include IntegrationSpecHelper, type: type
end
config.include FeatureSpecHelper, type: :feature
config.include EnvironmentSpecHelper
# Much of the config here pieced together from
# http://stackoverflow.com/questions/8178120/capybara-with-js-true-causes-test-to-fail/28083267
config.before(:suite) do
DatabaseCleaner.strategy = :transaction
DatabaseCleaner.clean_with(:truncation)
end
config.before(:each) do
if Capybara.current_driver == :rack_test
DatabaseCleaner.strategy = :transaction
else
DatabaseCleaner.strategy = :truncation
end
DatabaseCleaner.start
end
config.after(:each) do
DatabaseCleaner.clean
ActionMailer::Base.deliveries.clear
OmniAuth.config.mock_auth[:myusa] = nil
end
Capybara.default_host = 'http://localhost:3000'
OmniAuth.config.test_mode = true
end
| 1 | 14,079 | Minor: Hmmm...is it worth setting this explicitly for the tests that it affects? | 18F-C2 | rb |
@@ -42,7 +42,6 @@ using namespace eprosima::fastrtps::rtps;
StatefulReader::~StatefulReader()
{
- std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
logInfo(RTPS_READER,"StatefulReader destructor.");
is_alive_ = false;
| 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file StatefulReader.cpp
*
*/
#include <fastrtps/rtps/reader/StatefulReader.h>
#include <fastrtps/rtps/reader/ReaderListener.h>
#include <fastrtps/rtps/history/ReaderHistory.h>
#include <fastrtps/rtps/reader/timedevent/HeartbeatResponseDelay.h>
#include <fastrtps/rtps/reader/timedevent/InitialAckNack.h>
#include <fastrtps/log/Log.h>
#include <fastrtps/rtps/messages/RTPSMessageCreator.h>
#include "../participant/RTPSParticipantImpl.h"
#include "FragmentedChangePitStop.h"
#include "WriterProxy.h"
#include <fastrtps/utils/TimeConversion.h>
#include "../history/HistoryAttributesExtension.hpp"
#include <mutex>
#include <thread>
#include <cassert>
#define IDSTRING "(ID:"<< std::this_thread::get_id() <<") "<<
using namespace eprosima::fastrtps::rtps;
StatefulReader::~StatefulReader()
{
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
logInfo(RTPS_READER,"StatefulReader destructor.");
is_alive_ = false;
for(WriterProxy* writer : matched_writers_)
{
delete(writer);
}
for (WriterProxy* writer : matched_writers_pool_)
{
delete(writer);
}
}
StatefulReader::StatefulReader(
RTPSParticipantImpl* pimpl,
const GUID_t& guid,
const ReaderAttributes& att,
ReaderHistory* hist,
ReaderListener* listen)
: RTPSReader(pimpl,guid,att,hist, listen)
, acknack_count_(0)
, nackfrag_count_(0)
, times_(att.times)
, matched_writers_(att.matched_writers_allocation)
, matched_writers_pool_(att.matched_writers_allocation)
, proxy_changes_config_(resource_limits_from_history(hist->m_att, 0))
, disable_positive_acks_(att.disable_positive_acks)
, is_alive_(true)
{
// Update resource limits on proxy changes set adding 256 possibly missing changes
proxy_changes_config_.initial += 256u;
if (proxy_changes_config_.increment == 0)
{
proxy_changes_config_.maximum += 256u;
}
else
{
proxy_changes_config_.maximum = std::max(proxy_changes_config_.maximum, proxy_changes_config_.initial);
}
const RTPSParticipantAttributes& part_att = pimpl->getRTPSParticipantAttributes();
for (size_t n = 0; n < att.matched_writers_allocation.initial; ++n)
{
matched_writers_pool_.push_back(new WriterProxy(this, part_att.allocation.locators, proxy_changes_config_));
}
}
bool StatefulReader::matched_writer_add(const WriterProxyData& wdata)
{
assert(wdata.guid() != c_Guid_Unknown);
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (!is_alive_)
{
return false;
}
for (WriterProxy* it : matched_writers_)
{
if (it->guid() == wdata.guid())
{
logInfo(RTPS_READER, "Attempting to add existing writer, updating information");
it->update(wdata);
for (const Locator_t& locator : it->remote_locators_shrinked())
{
getRTPSParticipant()->createSenderResources(locator);
}
return false;
}
}
// Get a writer proxy from the inactive pool (or create a new one if necessary and allowed)
WriterProxy* wp = nullptr;
if (matched_writers_pool_.empty())
{
size_t max_readers = matched_writers_pool_.max_size();
if (matched_writers_.size() + matched_writers_pool_.size() < max_readers)
{
const RTPSParticipantAttributes& part_att = mp_RTPSParticipant->getRTPSParticipantAttributes();
wp = new WriterProxy(this, part_att.allocation.locators, proxy_changes_config_);
}
else
{
logWarning(RTPS_WRITER, "Maximum number of reader proxies (" << max_readers << \
") reached for writer " << m_guid << endl);
return false;
}
}
else
{
wp = matched_writers_pool_.back();
matched_writers_pool_.pop_back();
}
wp->start(wdata);
for (const Locator_t& locator : wp->remote_locators_shrinked())
{
getRTPSParticipant()->createSenderResources(locator);
}
add_persistence_guid(wdata.guid(), wdata.persistence_guid());
wp->loaded_from_storage_nts(get_last_notified(wdata.guid()));
matched_writers_.push_back(wp);
logInfo(RTPS_READER, "Writer Proxy " << wp->guid() << " added to " << m_guid.entityId);
return true;
}
bool StatefulReader::matched_writer_remove(const GUID_t& writer_guid)
{
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (is_alive_)
{
WriterProxy *wproxy = nullptr;
//Remove cachechanges belonging to the unmatched writer
mp_history->remove_changes_with_guid(writer_guid);
for (ResourceLimitedVector<WriterProxy*>::iterator it = matched_writers_.begin(); it != matched_writers_.end(); ++it)
{
if ((*it)->guid() == writer_guid)
{
logInfo(RTPS_READER, "Writer Proxy removed: " << (*it)->guid());
wproxy = *it;
matched_writers_.erase(it);
remove_persistence_guid(wproxy->guid(), wproxy->attributes().persistence_guid());
break;
}
}
if (wproxy != nullptr)
{
wproxy->stop();
matched_writers_pool_.push_back(wproxy);
return true;
}
logInfo(RTPS_READER, "Writer Proxy " << writer_guid << " doesn't exist in reader " << this->getGuid().entityId);
}
return false;
}
bool StatefulReader::liveliness_expired(const GUID_t& writer_guid)
{
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (is_alive_)
{
WriterProxy *wproxy = nullptr;
//Remove cachechanges belonging to the unmatched writer
mp_history->remove_changes_with_guid(writer_guid);
for (ResourceLimitedVector<WriterProxy*>::iterator it = matched_writers_.begin(); it != matched_writers_.end(); ++it)
{
if ((*it)->guid() == writer_guid)
{
logInfo(RTPS_READER, "Writer Proxy removed: " << (*it)->guid());
wproxy = *it;
matched_writers_.erase(it);
remove_persistence_guid(wproxy->guid(), wproxy->attributes().persistence_guid());
if (mp_listener != nullptr)
{
MatchingInfo info(REMOVED_MATCHING, writer_guid);
mp_listener->onReaderMatched(this, info);
}
wproxy->stop();
matched_writers_pool_.push_back(wproxy);
return true;
}
}
logInfo(RTPS_READER, "Writer Proxy " << writer_guid << " doesn't exist in reader " << this->getGuid().entityId);
}
return false;
}
bool StatefulReader::assert_liveliness(const GUID_t& writer_guid)
{
WriterProxy* WP;
if (matched_writer_lookup(writer_guid, &WP))
{
WP->assert_liveliness();
return true;
}
return false;
}
bool StatefulReader::matched_writer_is_matched(const GUID_t& writer_guid)
{
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (is_alive_)
{
for(WriterProxy* it : matched_writers_)
{
if(it->guid() == writer_guid && it->is_alive())
{
return true;
}
}
}
return false;
}
bool StatefulReader::matched_writer_lookup(
const GUID_t& writerGUID,
WriterProxy** WP)
{
assert(WP);
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (!is_alive_)
{
return false;
}
bool returnedValue = findWriterProxy(writerGUID, WP);
if(returnedValue)
{
logInfo(RTPS_READER, this->getGuid().entityId << " FINDS writerProxy " << writerGUID << " from "
<< matched_writers_.size());
}
else
{
logInfo(RTPS_READER, this->getGuid().entityId << " NOT FINDS writerProxy " << writerGUID << " from "
<< matched_writers_.size());
}
return returnedValue;
}
bool StatefulReader::findWriterProxy(
const GUID_t& writerGUID,
WriterProxy** WP) const
{
assert(WP);
for(WriterProxy* it : matched_writers_)
{
if(it->guid() == writerGUID && it->is_alive())
{
*WP = it;
return true;
}
}
return false;
}
bool StatefulReader::processDataMsg(CacheChange_t *change)
{
WriterProxy *pWP = nullptr;
assert(change);
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (!is_alive_)
{
return false;
}
if(acceptMsgFrom(change->writerGUID, &pWP))
{
// Check if CacheChange was received.
if(!pWP->change_was_received(change->sequenceNumber))
{
logInfo(RTPS_MSG_IN,IDSTRING"Trying to add change " << change->sequenceNumber <<" TO reader: "<< getGuid().entityId);
CacheChange_t* change_to_add;
if(reserveCache(&change_to_add, change->serializedPayload.length)) //Reserve a new cache from the corresponding cache pool
{
#if HAVE_SECURITY
if(getAttributes().security_attributes().is_payload_protected)
{
change_to_add->copy_not_memcpy(change);
if(!getRTPSParticipant()->security_manager().decode_serialized_payload(change->serializedPayload,
change_to_add->serializedPayload, m_guid, change->writerGUID))
{
releaseCache(change_to_add);
logWarning(RTPS_MSG_IN, "Cannont decode serialized payload");
return false;
}
}
else
{
#endif
if (!change_to_add->copy(change))
{
logWarning(RTPS_MSG_IN,IDSTRING"Problem copying CacheChange, received data is: " << change->serializedPayload.length
<< " bytes and max size in reader " << getGuid().entityId << " is " << change_to_add->serializedPayload.max_size);
releaseCache(change_to_add);
return false;
}
#if HAVE_SECURITY
}
#endif
}
else
{
logError(RTPS_MSG_IN,IDSTRING"Problem reserving CacheChange in reader: " << getGuid().entityId);
return false;
}
// Assertion has to be done before call change_received,
// because this function can unlock the StatefulReader timed_mutex.
if(pWP != nullptr)
{
pWP->assert_liveliness(); //Asser liveliness since you have received a DATA MESSAGE.
}
if(!change_received(change_to_add, pWP))
{
logInfo(RTPS_MSG_IN,IDSTRING"MessageReceiver not add change "<<change_to_add->sequenceNumber);
releaseCache(change_to_add);
if(pWP == nullptr && getGuid().entityId == c_EntityId_SPDPReader)
{
mp_RTPSParticipant->assertRemoteRTPSParticipantLiveliness(change->writerGUID.guidPrefix);
}
}
}
}
return true;
}
bool StatefulReader::processDataFragMsg(
CacheChange_t* incomingChange,
uint32_t sampleSize,
uint32_t fragmentStartingNum)
{
WriterProxy *pWP = nullptr;
assert(incomingChange);
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (!is_alive_)
{
return false;
}
if(acceptMsgFrom(incomingChange->writerGUID, &pWP))
{
// Check if CacheChange was received.
if(!pWP->change_was_received(incomingChange->sequenceNumber))
{
logInfo(RTPS_MSG_IN, IDSTRING"Trying to add fragment " << incomingChange->sequenceNumber.to64long() << " TO reader: " << getGuid().entityId);
CacheChange_t* change_to_add = incomingChange;
#if HAVE_SECURITY
if(getAttributes().security_attributes().is_payload_protected)
{
if(reserveCache(&change_to_add, incomingChange->serializedPayload.length)) //Reserve a new cache from the corresponding cache pool
{
change_to_add->copy_not_memcpy(incomingChange);
if(!getRTPSParticipant()->security_manager().decode_serialized_payload(incomingChange->serializedPayload,
change_to_add->serializedPayload, m_guid, incomingChange->writerGUID))
{
releaseCache(change_to_add);
logWarning(RTPS_MSG_IN, "Cannont decode serialized payload");
return false;
}
}
}
#endif
// Fragments manager has to process incomming fragments.
// If CacheChange_t is completed, it will be returned;
CacheChange_t* change_completed = fragmentedChangePitStop_->process(change_to_add, sampleSize, fragmentStartingNum);
#if HAVE_SECURITY
if(getAttributes().security_attributes().is_payload_protected)
releaseCache(change_to_add);
#endif
// Assertion has to be done before call change_received,
// because this function can unlock the StatefulReader mutex.
if(pWP != nullptr)
{
pWP->assert_liveliness(); //Asser liveliness since you have received a DATA MESSAGE.
}
if(change_completed != nullptr)
{
if(!change_received(change_completed, pWP))
{
logInfo(RTPS_MSG_IN, IDSTRING"MessageReceiver not add change " << change_completed->sequenceNumber.to64long());
// Assert liveliness because it is a participant discovery info.
if(pWP == nullptr && getGuid().entityId == c_EntityId_SPDPReader)
{
mp_RTPSParticipant->assertRemoteRTPSParticipantLiveliness(incomingChange->writerGUID.guidPrefix);
}
releaseCache(change_completed);
}
}
}
}
return true;
}
bool StatefulReader::processHeartbeatMsg(
const GUID_t& writerGUID,
uint32_t hbCount,
const SequenceNumber_t& firstSN,
const SequenceNumber_t& lastSN,
bool finalFlag,
bool livelinessFlag)
{
WriterProxy *writer = nullptr;
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (!is_alive_)
{
return false;
}
if(acceptMsgFrom(writerGUID, &writer))
{
if (writer->process_heartbeat(hbCount, firstSN, lastSN, finalFlag, livelinessFlag, disable_positive_acks_))
{
fragmentedChangePitStop_->try_to_remove_until(firstSN, writerGUID);
// Maybe now we have to notify user from new CacheChanges.
NotifyChanges(writer);
}
}
return true;
}
bool StatefulReader::processGapMsg(
const GUID_t& writerGUID,
const SequenceNumber_t& gapStart,
const SequenceNumberSet_t& gapList)
{
WriterProxy *pWP = nullptr;
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (!is_alive_)
{
return false;
}
if(acceptMsgFrom(writerGUID, &pWP))
{
// TODO (Miguel C): Refactor this inside WriterProxy
std::unique_lock<std::recursive_mutex> wpLock(*pWP->get_mutex());
SequenceNumber_t auxSN;
SequenceNumber_t finalSN = gapList.base() - 1;
for(auxSN = gapStart; auxSN<=finalSN;auxSN++)
{
if(pWP->irrelevant_change_set(auxSN))
{
fragmentedChangePitStop_->try_to_remove(auxSN, pWP->guid());
}
}
gapList.for_each([&](SequenceNumber_t it)
{
if(pWP->irrelevant_change_set(it))
{
fragmentedChangePitStop_->try_to_remove(it, pWP->guid());
}
});
wpLock.unlock();
// Maybe now we have to notify user from new CacheChanges.
NotifyChanges(pWP);
}
return true;
}
bool StatefulReader::acceptMsgFrom(
const GUID_t& writerId,
WriterProxy **wp) const
{
assert(wp != nullptr);
for(WriterProxy* it : matched_writers_)
{
if(it->guid() == writerId && it->is_alive())
{
*wp = it;
return true;
}
}
return false;
}
bool StatefulReader::change_removed_by_history(
CacheChange_t* a_change,
WriterProxy* wp)
{
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (is_alive_)
{
if(wp != nullptr || matched_writer_lookup(a_change->writerGUID,&wp))
{
wp->change_removed_from_history(a_change->sequenceNumber);
return true;
}
else
{
logError(RTPS_READER," You should always find the WP associated with a change, something is very wrong");
}
}
return false;
}
bool StatefulReader::change_received(
CacheChange_t* a_change,
WriterProxy* prox)
{
//First look for WriterProxy in case is not provided
if(prox == nullptr)
{
if(!findWriterProxy(a_change->writerGUID, &prox))
{
logInfo(RTPS_READER, "Writer Proxy " << a_change->writerGUID <<" not matched to this Reader "<< m_guid.entityId);
return false;
}
}
// TODO (Miguel C): Refactor this inside WriterProxy
std::unique_lock<std::recursive_mutex> writerProxyLock(*prox->get_mutex());
size_t unknown_missing_changes_up_to = prox->unknown_missing_changes_up_to(a_change->sequenceNumber);
if(this->mp_history->received_change(a_change, unknown_missing_changes_up_to))
{
bool ret = prox->received_change_set(a_change->sequenceNumber);
GUID_t proxGUID = prox->guid();
// If KEEP_LAST and history full, make older changes as lost.
CacheChange_t* aux_change = nullptr;
if(this->mp_history->isFull() && mp_history->get_min_change_from(&aux_change, proxGUID))
{
prox->lost_changes_update(aux_change->sequenceNumber);
fragmentedChangePitStop_->try_to_remove_until(aux_change->sequenceNumber, proxGUID);
}
writerProxyLock.unlock();
NotifyChanges(prox);
return ret;
}
return false;
}
void StatefulReader::NotifyChanges(WriterProxy* prox)
{
GUID_t proxGUID = prox->guid();
update_last_notified(proxGUID, prox->available_changes_max());
SequenceNumber_t nextChangeToNotify = prox->next_cache_change_to_be_notified();
while (nextChangeToNotify != SequenceNumber_t::unknown())
{
mp_history->postSemaphore();
if (getListener() != nullptr)
{
CacheChange_t* ch_to_give = nullptr;
if (mp_history->get_change(nextChangeToNotify, proxGUID, &ch_to_give))
{
if (!ch_to_give->isRead)
{
getListener()->onNewCacheChangeAdded((RTPSReader*)this, ch_to_give);
}
}
// Search again the WriterProxy because could be removed after the unlock.
if (!findWriterProxy(proxGUID, &prox))
break;
}
nextChangeToNotify = prox->next_cache_change_to_be_notified();
}
}
bool StatefulReader::nextUntakenCache(
CacheChange_t** change,
WriterProxy** wpout)
{
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (!is_alive_)
{
return false;
}
std::vector<CacheChange_t*> toremove;
bool takeok = false;
for(std::vector<CacheChange_t*>::iterator it = mp_history->changesBegin();
it!=mp_history->changesEnd();++it)
{
WriterProxy* wp;
if(this->matched_writer_lookup((*it)->writerGUID, &wp))
{
// TODO Revisar la comprobacion
SequenceNumber_t seq = wp->available_changes_max();
if(seq >= (*it)->sequenceNumber)
{
*change = *it;
if(wpout !=nullptr)
*wpout = wp;
takeok = true;
break;
// if((*it)->kind == ALIVE)
// {
// this->mp_type->deserialize(&(*it)->serializedPayload,data);
// }
// (*it)->isRead = true;
// if(info!=NULL)
// {
// info->sampleKind = (*it)->kind;
// info->writerGUID = (*it)->writerGUID;
// info->sourceTimestamp = (*it)->sourceTimestamp;
// info->iHandle = (*it)->instanceHandle;
// if(this->m_qos.m_ownership.kind == EXCLUSIVE_OWNERSHIP_QOS)
// info->ownershipStrength = wp->m_data->m_qos.m_ownershipStrength.value;
// }
// m_reader_cache.decreaseUnreadCount();
// logInfo(RTPS_READER,this->getGuid().entityId<<": reading change "<< (*it)->sequenceNumber.to64long());
// readok = true;
// break;
}
}
else
{
toremove.push_back((*it));
}
}
for(std::vector<CacheChange_t*>::iterator it = toremove.begin();
it!=toremove.end();++it)
{
logWarning(RTPS_READER,"Removing change "<<(*it)->sequenceNumber << " from " << (*it)->writerGUID << " because is no longer paired");
mp_history->remove_change(*it);
}
return takeok;
}
// TODO Porque elimina aqui y no cuando hay unpairing
bool StatefulReader::nextUnreadCache(
CacheChange_t** change,
WriterProxy** wpout)
{
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (!is_alive_)
{
return false;
}
std::vector<CacheChange_t*> toremove;
bool readok = false;
for(std::vector<CacheChange_t*>::iterator it = mp_history->changesBegin();
it!=mp_history->changesEnd();++it)
{
if((*it)->isRead)
continue;
WriterProxy* wp;
if(this->matched_writer_lookup((*it)->writerGUID,&wp))
{
SequenceNumber_t seq;
seq = wp->available_changes_max();
if(seq >= (*it)->sequenceNumber)
{
*change = *it;
if(wpout !=nullptr)
*wpout = wp;
readok = true;
break;
// if((*it)->kind == ALIVE)
// {
// this->mp_type->deserialize(&(*it)->serializedPayload,data);
// }
// (*it)->isRead = true;
// if(info!=NULL)
// {
// info->sampleKind = (*it)->kind;
// info->writerGUID = (*it)->writerGUID;
// info->sourceTimestamp = (*it)->sourceTimestamp;
// info->iHandle = (*it)->instanceHandle;
// if(this->m_qos.m_ownership.kind == EXCLUSIVE_OWNERSHIP_QOS)
// info->ownershipStrength = wp->m_data->m_qos.m_ownershipStrength.value;
// }
// m_reader_cache.decreaseUnreadCount();
// logInfo(RTPS_READER,this->getGuid().entityId<<": reading change "<< (*it)->sequenceNumber.to64long());
// readok = true;
// break;
}
}
else
{
toremove.push_back((*it));
}
}
for(std::vector<CacheChange_t*>::iterator it = toremove.begin();
it!=toremove.end();++it)
{
logWarning(RTPS_READER,"Removing change "<<(*it)->sequenceNumber << " from " << (*it)->writerGUID << " because is no longer paired");
mp_history->remove_change(*it);
}
return readok;
}
bool StatefulReader::updateTimes(const ReaderTimes& ti)
{
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (is_alive_)
{
if(times_.heartbeatResponseDelay != ti.heartbeatResponseDelay)
{
times_ = ti;
for(WriterProxy* writer : matched_writers_)
{
writer->update_heartbeat_response_interval(times_.heartbeatResponseDelay);
}
}
}
return true;
}
bool StatefulReader::isInCleanState()
{
bool cleanState = true;
std::unique_lock<std::recursive_timed_mutex> lock(mp_mutex);
if (is_alive_)
{
for (WriterProxy* wp : matched_writers_)
{
if (wp->number_of_changes_from_writer() != 0)
{
cleanState = false;
break;
}
}
}
return cleanState;
}
void StatefulReader::send_acknack(
const SequenceNumberSet_t& sns,
RTPSMessageGroup_t& buffer,
const RTPSMessageSenderInterface& sender,
bool is_final)
{
Count_t acknackCount = 0;
{//BEGIN PROTECTION
std::lock_guard<std::recursive_timed_mutex> guard_reader(mp_mutex);
acknack_count_++;
acknackCount = acknack_count_;
}
logInfo(RTPS_READER, "Sending ACKNACK: " << sns);
RTPSMessageGroup group(getRTPSParticipant(), this, buffer, sender);
group.add_acknack(sns, acknackCount, is_final);
}
void StatefulReader::send_acknack(
const WriterProxy* writer,
RTPSMessageGroup_t& buffer,
const RTPSMessageSenderInterface& sender,
bool heartbeat_was_final)
{
// Protect reader
std::lock_guard<std::recursive_timed_mutex> guard(mp_mutex);
if (!is_alive_)
{
return;
}
SequenceNumberSet_t missing_changes = writer->missing_changes();
// Stores missing changes but there is some fragments received.
std::vector<CacheChange_t*> uncompleted_changes;
try
{
RTPSMessageGroup group(getRTPSParticipant(), this, buffer, sender);
if (!missing_changes.empty() || !heartbeat_was_final)
{
GUID_t guid = sender.remote_guids().at(0);
SequenceNumberSet_t sns(writer->available_changes_max() + 1);
missing_changes.for_each(
[&](const SequenceNumber_t& seq)
{
// Check if the CacheChange_t is uncompleted.
CacheChange_t* uncomplete_change = findCacheInFragmentedCachePitStop(seq, guid);
if (uncomplete_change == nullptr)
{
if (!sns.add(seq))
{
logInfo(RTPS_READER, "Sequence number " << seq
<< " exceeded bitmap limit of AckNack. SeqNumSet Base: " << sns.base());
}
}
else
{
uncompleted_changes.push_back(uncomplete_change);
}
});
acknack_count_++;
logInfo(RTPS_READER, "Sending ACKNACK: " << sns;);
bool final = sns.empty();
group.add_acknack(sns, acknack_count_, final);
}
// Now generage NACK_FRAGS
if (!uncompleted_changes.empty())
{
for (auto cit : uncompleted_changes)
{
FragmentNumberSet_t frag_sns;
// Search first fragment not present.
uint32_t frag_num = 0;
auto fit = cit->getDataFragments()->begin();
for (; fit != cit->getDataFragments()->end(); ++fit)
{
++frag_num;
if (*fit == ChangeFragmentStatus_t::NOT_PRESENT)
break;
}
// Never should happend.
assert(frag_num != 0);
assert(fit != cit->getDataFragments()->end());
// Store FragmentNumberSet_t base.
frag_sns.base(frag_num);
// Fill the FragmentNumberSet_t bitmap.
for (; fit != cit->getDataFragments()->end(); ++fit)
{
if (*fit == ChangeFragmentStatus_t::NOT_PRESENT)
frag_sns.add(frag_num);
++frag_num;
}
++nackfrag_count_;
logInfo(RTPS_READER, "Sending NACKFRAG for sample" << cit->sequenceNumber << ": " << frag_sns;);
group.add_nackfrag(cit->sequenceNumber, frag_sns, nackfrag_count_);
}
}
}
catch(const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
bool StatefulReader::send_sync_nts(
CDRMessage_t* message,
const Locator_t& locator,
std::chrono::steady_clock::time_point& max_blocking_time_point)
{
return mp_RTPSParticipant->sendSync(message, locator, max_blocking_time_point);
}
| 1 | 14,492 | This was added to fix a race condition and you are reverting it here, right? | eProsima-Fast-DDS | cpp |
@@ -2,11 +2,16 @@ package commands
import (
"context"
+ "fmt"
"github.com/ledgerwatch/turbo-geth/common"
)
-// Etherbase is the address that mining rewards will be send to
+// Coinbase is the address that mining rewards will be sent to
func (api *APIImpl) Coinbase(_ context.Context) (common.Address, error) {
+ if api.ethBackend == nil {
+ // We're running in --chaindata mode or otherwise cannot get the backend
+ return common.Address{}, fmt.Errorf("eth_coinbase function is not available")
+ }
return api.ethBackend.Etherbase()
} | 1 | package commands
import (
"context"
"github.com/ledgerwatch/turbo-geth/common"
)
// Etherbase is the address that mining rewards will be send to
func (api *APIImpl) Coinbase(_ context.Context) (common.Address, error) {
return api.ethBackend.Etherbase()
}
| 1 | 21,783 | Could you add some extra text here, so that it reads "eth_coinbase function is not available, please use --private.api.addr option instead of --chaindata option", so that it is clear that the function can work, but different options | ledgerwatch-erigon | go |
@@ -159,6 +159,19 @@ feature 'Creating an NCR work order' do
expect(page).to have_content('$2,000 for construction')
end
+ scenario "selecting Expense Type toggles Building required flag", :js do
+ login_as(requester)
+ visit "/ncr/work_orders/new"
+ find("#ncr_work_order_expense_type_ba60").click
+ expect(find('.ncr_work_order_building_number input')['class']).to_not match('required')
+ find("#ncr_work_order_expense_type_ba61").click
+ expect(find('.ncr_work_order_building_number input')['class']).to match('required')
+ find("#ncr_work_order_expense_type_ba80").click
+ expect(find('.ncr_work_order_building_number input')['class']).to match('required')
+ find("#ncr_work_order_expense_type_ba60").click
+ expect(find('.ncr_work_order_building_number input')['class']).to_not match('required')
+ end
+
scenario 'defaults to no approver if there was no previous request' do
login_as(requester)
visit '/ncr/work_orders/new' | 1 | feature 'Creating an NCR work order' do
scenario 'requires sign-in' do
visit '/ncr/work_orders/new'
expect(current_path).to eq('/')
expect(page).to have_content("You need to sign in")
end
with_feature 'RESTRICT_ACCESS' do
scenario 'requires a GSA email address' do
user = create(:user, email_address: '[email protected]', client_slug: "ncr")
login_as(user)
visit '/ncr/work_orders/new'
expect(page.status_code).to eq(403)
expect(page).to have_content("You must be logged in with a GSA email address")
end
end
context 'when signed in as the requester' do
let(:requester) { create(:user, client_slug: "ncr") }
let(:ncr_helper_class) { Class.new { extend Ncr::WorkOrdersHelper } }
scenario 'saves a Proposal with the attributes' do
approver = create(:user, client_slug: "ncr")
project_title = "buying stuff"
login_as(requester)
expect(Dispatcher).to receive(:deliver_new_proposal_emails)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: project_title
fill_in 'Description', with: "desc content"
choose 'BA80'
fill_in 'RWA Number', with: 'F1234567'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
check "I am going to be using direct pay for this transaction"
select approver.email_address, from: 'ncr_work_order[approving_official_email]'
fill_in 'Building number', with: Ncr::BUILDING_NUMBERS[0]
select Ncr::Organization.all[0], from: 'ncr_work_order_org_code'
expect { click_on 'Submit for approval' }.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(proposal.public_id).to have_content("FY")
expect(page).to have_content("Proposal submitted")
expect(current_path).to eq("/proposals/#{proposal.id}")
expect(proposal.name).to eq(project_title)
expect(proposal.flow).to eq('linear')
work_order = proposal.client_data
expect(work_order.client_slug).to eq("ncr")
expect(work_order.expense_type).to eq('BA80')
expect(work_order.vendor).to eq('ACME')
expect(work_order.amount).to eq(123.45)
expect(work_order.direct_pay).to eq(true)
expect(work_order.building_number).to eq(Ncr::BUILDING_NUMBERS[0])
expect(work_order.org_code).to eq(Ncr::Organization.all[0].to_s)
expect(work_order.description).to eq('desc content')
expect(proposal.requester).to eq(requester)
expect(proposal.approvers.map(&:email_address)).to eq(
[approver.email_address, Ncr::Mailboxes.ba80_budget])
end
scenario 'saves a BA60 Proposal with the attributes' do
approver = create(:user, client_slug: "ncr")
login_as(requester)
expect(Dispatcher).to receive(:deliver_new_proposal_emails)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "blue shells"
fill_in 'Description', with: "desc content"
choose 'BA60'
fill_in 'Vendor', with: 'Yoshi'
fill_in 'Amount', with: 123.45
select approver.email_address, from: "Approving official's email address"
fill_in 'Building number', with: Ncr::BUILDING_NUMBERS[0]
select Ncr::Organization.all[0], from: 'ncr_work_order_org_code'
expect { click_on 'Submit for approval' }.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
work_order = proposal.client_data
expect(work_order.expense_type).to eq('BA60')
expect(proposal.approvers.map(&:email_address)).to eq [
approver.email_address,
Ncr::Mailboxes.ba61_tier1_budget,
Ncr::Mailboxes.ba61_tier2_budget
]
end
scenario "flash message on error does not persist" do
approver = create(:user, client_slug: "ncr")
login_as(requester)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "blue shells"
fill_in 'Description', with: "desc content"
choose 'BA60'
fill_in 'Vendor', with: 'Yoshi'
fill_in 'Amount', with: 123.45
fill_in 'Building number', with: Ncr::BUILDING_NUMBERS[0]
select Ncr::Organization.all[0], from: 'ncr_work_order_org_code'
click_on 'Submit for approval'
expect(page).to have_content("Approving official email can't be blank")
visit "/proposals"
expect(page).to_not have_content("Approving official email can't be blank")
end
scenario "inactive users do not appear as potential approvers", :js do
approver = create(:user, client_slug: "ncr")
inactive_user = create(:user, client_slug: "ncr", active: false)
login_as(requester)
visit '/ncr/work_orders/new'
within(".ncr_work_order_approving_official_email") do
find(".selectize-control").click
expect(page).not_to have_content(inactive_user.email_address)
end
end
scenario 'shows the radio button' do
login_as(requester)
visit '/ncr/work_orders/new'
expect(page).to have_content('BA60')
expect(page).to have_content('BA61')
expect(page).to have_content('BA80')
end
scenario "does not show system approver emails as approver options", :js do
expect(Ncr::WorkOrder.all_system_approver_emails.size).to eq 4
login_as(requester)
approving_official = create(:user, client_slug: "ncr")
visit "/ncr/work_orders/new"
within(".ncr_work_order_approving_official_email") do
find(".selectize-control").click
expect(page).to have_content(approving_official.email_address)
expect(page).not_to have_content(Ncr::WorkOrder.ba61_tier1_budget_mailbox)
expect(page).not_to have_content(Ncr::WorkOrder.ba61_tier2_budget_mailbox)
expect(page).not_to have_content(Ncr::WorkOrder.ba80_budget_mailbox)
expect(page).not_to have_content(Ncr::WorkOrder.ool_ba80_budget_mailbox)
end
end
scenario "does not show requester as approver option", :js do
login_as(requester)
visit "/ncr/work_orders/new"
within(".ncr_work_order_approving_official_email") do
find(".selectize-control").click
expect(page).not_to have_content(requester.email_address)
end
end
scenario 'shows hint text for amount field', :js do
login_as(requester)
visit '/ncr/work_orders/new'
focus_field 'ncr_work_order_amount'
expect(page).to have_content('$3,500 for supplies')
expect(page).to have_content('$2,500 for services')
expect(page).to have_content('$2,000 for construction')
end
scenario 'defaults to no approver if there was no previous request' do
login_as(requester)
visit '/ncr/work_orders/new'
expect(find_field("Approving official's email address").value).to eq('')
end
scenario 'defaults to the approver from the last request' do
login_as(requester)
proposal = create(:proposal, :with_serial_approvers, requester: requester, client_slug: "ncr")
visit '/ncr/work_orders/new'
expect(find_field("Approving official's email address").value).to eq(proposal.approvers.first.email_address)
end
scenario 'requires a project_title' do
login_as(requester)
visit '/ncr/work_orders/new'
expect { click_on 'Submit for approval' }.to_not change { Proposal.count }
expect(page).to have_content("Project title can't be blank")
end
scenario "doesn't show the budget fields" do
login_as(requester)
visit '/ncr/work_orders/new'
expect(page).to_not have_field('CL number')
expect(page).to_not have_field('Function code')
expect(page).to_not have_field('SOC code')
end
scenario "doesn't save when the amount is too high" do
login_as(requester)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
choose 'BA80'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 10_000
expect { click_on 'Submit for approval' }.to_not change { Proposal.count }
expect(current_path).to eq('/ncr/work_orders')
expect(page).to have_content("Amount must be less than or equal to $")
expect(find_field('Amount').value).to eq('10000')
end
scenario "preserve form values on submission error" do
login_as(requester)
work_order = create(:ncr_work_order, :with_approvers)
expect(Proposal.count).to eq(1)
expect(ncr_helper_class.vendor_options).to eq([work_order.vendor])
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
choose 'BA80'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 10_000
expect { click_on 'Submit for approval' }.to_not change { Proposal.count }
expect(ncr_helper_class.vendor_options('zzbar')).to eq([work_order.vendor, 'zzbar'])
expect(find_field('Amount').value).to eq('10000')
expect(find_field('Vendor').value).to eq('ACME')
expect(JSON.parse(find_field('Vendor')['data-initial'])).to eq(['ACME', work_order.vendor])
end
scenario "includes has overwritten field names" do
approver = create(:user, client_slug: "ncr")
login_as(requester)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
choose 'BA80'
fill_in 'RWA Number', with: 'B9876543'
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
select approver.email_address, from: 'ncr_work_order[approving_official_email]'
fill_in 'Building number', with: Ncr::BUILDING_NUMBERS[0]
select Ncr::Organization.all[0], from: 'ncr_work_order_org_code'
click_on 'Submit for approval'
expect(current_path).to eq("/proposals/#{Proposal.last.id}")
expect(page).to have_content("RWA Number")
end
scenario 'hides fields based on expense', :js do
login_as(requester)
visit '/ncr/work_orders/new'
expect(page).to have_no_field("RWA Number")
expect(page).to have_no_field("Work Order")
expect(page).to have_no_field("emergency")
choose 'BA61'
expect(page).to have_no_field("RWA Number")
expect(page).to have_no_field("Work Order")
expect(page).to have_field("emergency")
expect(find_field("emergency", visible: false)).to be_visible
choose 'BA80'
expect(page).to have_field("RWA Number")
expect(page).to have_field("Work Order")
expect(page).to have_no_field("emergency")
expect(find_field("RWA Number")).to be_visible
end
scenario 'allows attachments to be added during intake without JS' do
login_as(requester)
visit '/ncr/work_orders/new'
expect(page).to have_content("Attachments")
expect(page).not_to have_selector(".js-am-minus")
expect(page).not_to have_selector(".js-am-plus")
expect(page).to have_selector("input[type=file]", count: 10)
end
scenario 'allows attachments to be added during intake with JS', :js do
login_as(requester)
visit '/ncr/work_orders/new'
expect(page).to have_content("Attachments")
first_minus = find(".js-am-minus")
first_plus = find(".js-am-plus")
expect(first_minus).to be_visible
expect(first_plus).to be_visible
expect(first_minus).to be_disabled
expect(find("input[type=file]")[:name]).to eq("attachments[]")
first_plus.click # Adds one row
expect(page).to have_selector(".js-am-minus", count: 2)
expect(page).to have_selector(".js-am-plus", count: 2)
expect(page).to have_selector("input[type=file]", count: 2)
end
scenario 'includes an initial list of buildings', :js do
login_as(requester)
visit '/ncr/work_orders/new'
option = Ncr::BUILDING_NUMBERS.sample
expect(page).not_to have_selector(".option[data-value='#{option}']")
find("input[aria-label='Building number']").native.send_keys(option)
expect(page).to have_selector("div.option[data-value='#{option}']")
end
scenario 'does not include custom buildings initially', :js do
login_as(requester)
visit '/ncr/work_orders/new'
find("input[aria-label='Building number']").native.send_keys("BillDing")
expect(page).not_to have_selector("div.option[data-value='BillDing']")
end
scenario 'includes previously entered buildings, too', :js do
login_as(requester)
create(:ncr_work_order, building_number: "BillDing")
visit '/ncr/work_orders/new'
find("input[aria-label='Building number']").native.send_keys("BillDing")
expect(page).to have_selector("div.option[data-value='BillDing']")
end
context "selected common values on proposal page" do
before do
approver = create(:user, client_slug: "ncr")
login_as(requester)
visit '/ncr/work_orders/new'
fill_in 'Project title', with: "buying stuff"
fill_in 'Vendor', with: 'ACME'
fill_in 'Amount', with: 123.45
select approver.email_address, from: 'ncr_work_order[approving_official_email]'
fill_in 'Building number', with: Ncr::BUILDING_NUMBERS[0]
select Ncr::Organization.all[0], from: 'ncr_work_order_org_code'
end
scenario 'approves emergencies' do
choose 'BA61'
check "This request was an emergency and I received a verbal Notice to Proceed (NTP)"
expect { click_on 'Submit for approval' }.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(page).to have_content("Proposal submitted")
expect(current_path).to eq("/proposals/#{proposal.id}")
expect(page).to have_content("0 of 0 approved")
expect(proposal.client_data.emergency).to eq(true)
expect(proposal.approved?).to eq(true)
expect(proposal.approvers).to be_empty
expect(proposal.client_data.decorate.current_approver_email_address).to eq(Ncr::WorkOrderDecorator::EMERGENCY_APPROVER_EMAIL)
expect(proposal.client_data.decorate.final_approver_email_address).to eq(Ncr::WorkOrderDecorator::EMERGENCY_APPROVER_EMAIL)
end
scenario 'does not set emergencies if form type changes' do
choose 'BA61'
check 'This request was an emergency and I received a verbal Notice to Proceed (NTP)'
choose 'BA80'
fill_in 'RWA Number', with: 'R9876543'
expect { click_on 'Submit for approval' }.to change { Proposal.count }.from(0).to(1)
proposal = Proposal.last
expect(page).to have_content('Proposal submitted')
expect(current_path).to eq("/proposals/#{proposal.id}")
expect(proposal.client_data.emergency).to eq(false)
expect(proposal.approved?).to eq(false)
end
end
scenario 'does not disable the emergency field' do
login_as(requester)
visit '/ncr/work_orders/new'
expect(find_field('emergency')).not_to be_disabled
end
end
def focus_field(field_id)
execute_script "document.getElementById('#{field_id}').scrollIntoView()"
execute_script "$('##{field_id}').focus()"
end
end
| 1 | 15,755 | `new_ncr_work_order_path` ? (I've been slowly moving specs over to that way of calling paths) | 18F-C2 | rb |
@@ -15,7 +15,9 @@
application/javascript
text/css
image/*
- application/x-shockwave-flash
+ application/x-shockwave-flash ?
+ font/*
+ application/font-*
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response | 1 | """
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bs rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
import functools
import re
import sys
from typing import ClassVar, Sequence, Type, Protocol, Union
import pyparsing as pp
from mitmproxy import flow, http, tcp
def only(*types):
def decorator(fn):
@functools.wraps(fn)
def filter_types(self, flow):
if isinstance(flow, types):
return fn(self, flow)
return False
return filter_types
return decorator
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print("{spacing}{name}{expr}".format(
spacing="\t" * indent,
name=self.__class__.__name__,
expr=getattr(self, "expr", "")
), file=fp)
class _Action(_Token):
code: ClassVar[str]
help: ClassVar[str]
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FMarked(_Action):
code = "marked"
help = "Match marked flows"
def __call__(self, f):
return bool(f.marked)
class FHTTP(_Action):
code = "http"
help = "Match HTTP flows"
@only(http.HTTPFlow)
def __call__(self, f):
return True
class FWebSocket(_Action):
code = "websocket"
help = "Match WebSocket flows"
@only(http.HTTPFlow)
def __call__(self, f: http.HTTPFlow):
return f.websocket is not None
class FTCP(_Action):
code = "tcp"
help = "Match TCP flows"
@only(tcp.TCPFlow)
def __call__(self, f):
return True
class FReq(_Action):
code = "q"
help = "Match request with no response"
@only(http.HTTPFlow)
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
@only(http.HTTPFlow)
def __call__(self, f):
return bool(f.response)
class FAll(_Action):
code = "all"
help = "Match all flows"
def __call__(self, f: flow.Flow):
return True
class _Rex(_Action):
flags = 0
is_binary = True
def __init__(self, expr):
self.expr = expr
if self.is_binary:
expr = expr.encode()
try:
self.re = re.compile(expr, self.flags)
except Exception:
raise ValueError("Cannot compile expression.")
def _check_content_type(rex, message):
return any(
name.lower() == b"content-type" and
rex.search(value)
for name, value in message.headers.fields
)
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, JavaScript, images."
ASSET_TYPES = [re.compile(x) for x in [
b"text/javascript",
b"application/x-javascript",
b"application/javascript",
b"text/css",
b"image/.*"
]]
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
@only(http.HTTPFlow)
def __call__(self, f):
if _check_content_type(self.re, f.request):
return True
elif f.response and _check_content_type(self.re, f.response):
return True
return False
class FContentTypeRequest(_Rex):
code = "tq"
help = "Request Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
return _check_content_type(self.re, f.request)
class FContentTypeResponse(_Rex):
code = "ts"
help = "Response Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
return _check_content_type(self.re, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
if f.response and self.re.search(bytes(f.response.headers)):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and self.re.search(bytes(f.response.headers)):
return True
class FBod(_Rex):
code = "b"
help = "Body"
flags = re.DOTALL
@only(http.HTTPFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
if f.websocket:
for msg in f.websocket.messages:
if self.re.search(msg.content):
return True
elif isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if self.re.search(msg.content):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
flags = re.DOTALL
@only(http.HTTPFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.websocket:
for msg in f.websocket.messages:
if msg.from_client and self.re.search(msg.content):
return True
elif isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if msg.from_client and self.re.search(msg.content):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
flags = re.DOTALL
@only(http.HTTPFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
if f.websocket:
for msg in f.websocket.messages:
if not msg.from_client and self.re.search(msg.content):
return True
elif isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if not msg.from_client and self.re.search(msg.content):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
flags = re.IGNORECASE
@only(http.HTTPFlow)
def __call__(self, f):
return bool(self.re.search(f.request.data.method))
class FDomain(_Rex):
code = "d"
help = "Domain"
flags = re.IGNORECASE
is_binary = False
@only(http.HTTPFlow)
def __call__(self, f):
return bool(
self.re.search(f.request.host) or
self.re.search(f.request.pretty_host)
)
class FUrl(_Rex):
code = "u"
help = "URL"
is_binary = False
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
@only(http.HTTPFlow)
def __call__(self, f):
if not f or not f.request:
return False
return self.re.search(f.request.pretty_url)
class FSrc(_Rex):
code = "src"
help = "Match source address"
is_binary = False
def __call__(self, f):
if not f.client_conn or not f.client_conn.peername:
return False
r = "{}:{}".format(f.client_conn.peername[0], f.client_conn.peername[1])
return f.client_conn.peername and self.re.search(r)
class FDst(_Rex):
code = "dst"
help = "Match destination address"
is_binary = False
def __call__(self, f):
if not f.server_conn or not f.server_conn.address:
return False
r = "{}:{}".format(f.server_conn.address[0], f.server_conn.address[1])
return f.server_conn.address and self.re.search(r)
class FReplay(_Action):
code = "replay"
help = "Match replayed flows"
def __call__(self, f):
return f.is_replay is not None
class FReplayClient(_Action):
code = "replayq"
help = "Match replayed client request"
def __call__(self, f):
return f.is_replay == 'request'
class FReplayServer(_Action):
code = "replays"
help = "Match replayed server response"
def __call__(self, f):
return f.is_replay == 'response'
class FMeta(_Rex):
code = "meta"
help = "Flow metadata"
flags = re.MULTILINE
is_binary = False
def __call__(self, f):
m = "\n".join([f"{key}: {value}" for key, value in f.metadata.items()])
return self.re.search(m)
class FMarker(_Rex):
code = "marker"
help = "Match marked flows with specified marker"
is_binary = False
def __call__(self, f):
return self.re.search(f.marked)
class FComment(_Rex):
code = "comment"
help = "Flow comment"
flags = re.MULTILINE
is_binary = False
def __call__(self, f):
return self.re.search(f.comment)
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and f.response.status_code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filter_unary: Sequence[Type[_Action]] = [
FAsset,
FErr,
FHTTP,
FMarked,
FReplay,
FReplayClient,
FReplayServer,
FReq,
FResp,
FTCP,
FWebSocket,
FAll,
]
filter_rex: Sequence[Type[_Rex]] = [
FBod,
FBodRequest,
FBodResponse,
FContentType,
FContentTypeRequest,
FContentTypeResponse,
FDomain,
FDst,
FHead,
FHeadRequest,
FHeadResponse,
FMethod,
FSrc,
FUrl,
FMeta,
FMarker,
FComment,
]
filter_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for cls in filter_unary:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd()
f.setParseAction(cls.make)
parts.append(f)
# This is a bit of a hack to simulate Word(pyparsing_unicode.printables),
# which has a horrible performance with len(pyparsing.pyparsing_unicode.printables) == 1114060
unicode_words = pp.CharsNotIn("()~'\"" + pp.ParserElement.DEFAULT_WHITE_CHARS)
unicode_words.skipWhitespace = True
regex = (
unicode_words
| pp.QuotedString('"', escChar='\\')
| pp.QuotedString("'", escChar='\\')
)
for cls in filter_rex:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + regex.copy()
f.setParseAction(cls.make)
parts.append(f)
for cls in filter_int:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + pp.Word(pp.nums)
f.setParseAction(cls.make)
parts.append(f)
# A naked rex is a URL rex:
f = regex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.infixNotation(
atom,
[(pp.Literal("!").suppress(),
1,
pp.opAssoc.RIGHT,
lambda x: FNot(*x)),
(pp.Literal("&").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FAnd(*x)),
(pp.Literal("|").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FOr(*x)),
])
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
class TFilter(Protocol):
pattern: str
def __call__(self, f: flow.Flow) -> bool:
... # pragma: no cover
def parse(s: str) -> TFilter:
"""
Parse a filter expression and return the compiled filter function.
If the filter syntax is invalid, `ValueError` is raised.
"""
if not s:
raise ValueError("Empty filter expression")
try:
flt = bnf.parseString(s, parseAll=True)[0]
flt.pattern = s
return flt
except (pp.ParseException, ValueError) as e:
raise ValueError(f"Invalid filter expression: {s!r}") from e
def match(flt: Union[str, TFilter], flow: flow.Flow) -> bool:
"""
Matches a flow against a compiled filter expression.
Returns True if matched, False if not.
If flt is a string, it will be compiled as a filter expression.
If the expression is invalid, ValueError is raised.
"""
if isinstance(flt, str):
flt = parse(flt)
if flt:
return flt(flow)
return True
match_all: TFilter = parse("~all")
"""A filter function that matches all flows"""
help = []
for a in filter_unary:
help.append(
(f"~{a.code}", a.help)
)
for b in filter_rex:
help.append(
(f"~{b.code} regex", b.help)
)
for c in filter_int:
help.append(
(f"~{c.code} int", c.help)
)
help.sort()
help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
| 1 | 16,004 | Should flash be included in the patterns too? It currently isn't. | mitmproxy-mitmproxy | py |
@@ -18,7 +18,7 @@ namespace Microsoft.Cci.Differs.Rules
if (impl.IsEffectivelySealed() && !contract.IsEffectivelySealed())
{
differences.AddIncompatibleDifference(this,
- "Type '{0}' is sealed in the implementation but not sealed in the contract.", impl.FullName());
+ $"Type '{impl.FullName()}' is sealed in the {Right} but not sealed in the {Left}.");
return DifferenceType.Changed;
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Cci.Extensions;
using Microsoft.Cci.Extensions.CSharp;
namespace Microsoft.Cci.Differs.Rules
{
[ExportDifferenceRule]
internal class CannotSealType : DifferenceRule
{
public override DifferenceType Diff(IDifferences differences, ITypeDefinition impl, ITypeDefinition contract)
{
if (impl == null || contract == null)
return DifferenceType.Unknown;
if (impl.IsEffectivelySealed() && !contract.IsEffectivelySealed())
{
differences.AddIncompatibleDifference(this,
"Type '{0}' is sealed in the implementation but not sealed in the contract.", impl.FullName());
return DifferenceType.Changed;
}
return DifferenceType.Unknown;
}
}
}
| 1 | 14,803 | I suspect I will have to keep looking whether Left/Right refers to contract/Implementation while working on the rules code. | dotnet-buildtools | .cs |
@@ -76,7 +76,7 @@ namespace AutoRest.CSharp.Azure
if (model.Extensions.ContainsKey(AzureExtensions.AzureResourceExtension) &&
(bool)model.Extensions[AzureExtensions.AzureResourceExtension])
{
- model.BaseModelType = new CompositeType { Name = "IResource", SerializedName = "IResource" };
+ model.BaseModelType = new CompositeType { Name = "Microsoft.Rest.Azure.IResource", SerializedName = "Microsoft.Rest.Azure.IResource" };
}
}
} | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using AutoRest.Core;
using AutoRest.Core.ClientModel;
using AutoRest.CSharp.Azure.TemplateModels;
using AutoRest.CSharp.Azure.Templates;
using AutoRest.CSharp.TemplateModels;
using AutoRest.CSharp.Templates;
using AutoRest.Extensions.Azure;
namespace AutoRest.CSharp.Azure
{
public class AzureCSharpCodeGenerator : CSharpCodeGenerator
{
private readonly AzureCSharpCodeNamer _namer;
private const string ClientRuntimePackage = "Microsoft.Rest.ClientRuntime.Azure.3.2.0";
// page extensions class dictionary.
private IDictionary<KeyValuePair<string, string>, string> pageClasses;
public AzureCSharpCodeGenerator(Settings settings) : base(settings)
{
_namer = new AzureCSharpCodeNamer(settings);
IsSingleFileGenerationSupported = true;
pageClasses = new Dictionary<KeyValuePair<string, string>, string>();
}
public override string Name
{
get { return "Azure.CSharp"; }
}
public override string Description
{
get { return "Azure specific C# code generator."; }
}
public override string UsageInstructions
{
get
{
return string.Format(CultureInfo.InvariantCulture,
Properties.Resources.UsageInformation, ClientRuntimePackage);
}
}
public override string ImplementationFileExtension
{
get { return ".cs"; }
}
/// <summary>
/// Normalizes client model by updating names and types to be language specific.
/// </summary>
/// <param name="serviceClient"></param>
public override void NormalizeClientModel(ServiceClient serviceClient)
{
AzureExtensions.NormalizeAzureClientModel(serviceClient, Settings, _namer);
_namer.NormalizeClientModel(serviceClient);
_namer.ResolveNameCollisions(serviceClient, Settings.Namespace,
Settings.Namespace + ".Models");
_namer.NormalizePaginatedMethods(serviceClient, pageClasses);
_namer.NormalizeODataMethods(serviceClient);
if (serviceClient != null)
{
foreach (var model in serviceClient.ModelTypes)
{
if (model.Extensions.ContainsKey(AzureExtensions.AzureResourceExtension) &&
(bool)model.Extensions[AzureExtensions.AzureResourceExtension])
{
model.BaseModelType = new CompositeType { Name = "IResource", SerializedName = "IResource" };
}
}
}
}
/// <summary>
/// Generates C# code for service client.
/// </summary>
/// <param name="serviceClient"></param>
/// <returns></returns>
public override async Task Generate(ServiceClient serviceClient)
{
// Service client
var serviceClientTemplate = new AzureServiceClientTemplate
{
Model = new AzureServiceClientTemplateModel(serviceClient, InternalConstructors),
};
await Write(serviceClientTemplate, serviceClient.Name + ".cs");
// Service client extensions
if (serviceClient.Methods.Any(m => m.Group == null))
{
var extensionsTemplate = new ExtensionsTemplate
{
Model = new AzureExtensionsTemplateModel(serviceClient, null, SyncMethods),
};
await Write(extensionsTemplate, serviceClient.Name + "Extensions.cs");
}
// Service client interface
var serviceClientInterfaceTemplate = new ServiceClientInterfaceTemplate
{
Model = new AzureServiceClientTemplateModel(serviceClient, InternalConstructors),
};
await Write(serviceClientInterfaceTemplate, "I" + serviceClient.Name + ".cs");
// Operations
foreach (var group in serviceClient.MethodGroups)
{
// Operation
var operationsTemplate = new AzureMethodGroupTemplate
{
Model = new AzureMethodGroupTemplateModel(serviceClient, group),
};
await Write(operationsTemplate, operationsTemplate.Model.MethodGroupType + ".cs");
// Service client extensions
var operationExtensionsTemplate = new ExtensionsTemplate
{
Model = new AzureExtensionsTemplateModel(serviceClient, group, SyncMethods),
};
await Write(operationExtensionsTemplate, operationExtensionsTemplate.Model.ExtensionName + "Extensions.cs");
// Operation interface
var operationsInterfaceTemplate = new MethodGroupInterfaceTemplate
{
Model = new AzureMethodGroupTemplateModel(serviceClient, group),
};
await Write(operationsInterfaceTemplate, "I" + operationsInterfaceTemplate.Model.MethodGroupType + ".cs");
}
// Models
foreach (var model in serviceClient.ModelTypes.Concat(serviceClient.HeaderTypes))
{
if (model.Extensions.ContainsKey(AzureExtensions.ExternalExtension) &&
(bool) model.Extensions[AzureExtensions.ExternalExtension])
{
continue;
}
var modelTemplate = new ModelTemplate
{
Model = new AzureModelTemplateModel(model),
};
await Write(modelTemplate, Path.Combine("Models", model.Name + ".cs"));
}
// Enums
foreach (var enumType in serviceClient.EnumTypes)
{
var enumTemplate = new EnumTemplate
{
Model = new EnumTemplateModel(enumType),
};
await Write(enumTemplate, Path.Combine("Models", enumTemplate.Model.TypeDefinitionName + ".cs"));
}
// Page class
foreach (var pageClass in pageClasses)
{
var pageTemplate = new PageTemplate
{
Model = new PageTemplateModel(pageClass.Value, pageClass.Key.Key, pageClass.Key.Value),
};
await Write(pageTemplate, Path.Combine("Models", pageTemplate.Model.TypeDefinitionName + ".cs"));
}
// Exceptions
foreach (var exceptionType in serviceClient.ErrorTypes)
{
if (exceptionType.Name == "CloudError")
{
continue;
}
var exceptionTemplate = new ExceptionTemplate
{
Model = new ModelTemplateModel(exceptionType),
};
await Write(exceptionTemplate, Path.Combine("Models", exceptionTemplate.Model.ExceptionTypeDefinitionName + ".cs"));
}
}
}
}
| 1 | 22,605 | (ignore this. testing a codeflow bug) | Azure-autorest | java |
@@ -137,9 +137,9 @@ func buildCLIOptions() *cli.App {
EnvVar: "CASSANDRA_TLS_CA",
},
cli.BoolFlag{
- Name: schema.CLIFlagTLSEnableHostVerification,
- Usage: "TLS host verification",
- EnvVar: "CASSANDRA_TLS_VERIFY_HOST",
+ Name: schema.CLIFlagTLSDisableHostVerification,
+ Usage: "Cassandra tls verify server hostname",
+ EnvVar: "CASSANDRA_TLS_DISABLE_HOST_VERIFICATION",
},
}
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"os"
"github.com/urfave/cli"
"go.temporal.io/server/environment"
"go.temporal.io/server/tools/common/schema"
)
// RunTool runs the temporal-cassandra-tool command line tool
func RunTool(args []string) error {
app := buildCLIOptions()
return app.Run(args)
}
// SetupSchema setups the cassandra schema
func SetupSchema(config *SetupSchemaConfig) error {
if err := validateCQLClientConfig(&config.CQLClientConfig); err != nil {
return err
}
db, err := newCQLClient(&config.CQLClientConfig)
if err != nil {
return err
}
return schema.SetupFromConfig(&config.SetupConfig, db)
}
// root handler for all cli commands
func cliHandler(c *cli.Context, handler func(c *cli.Context) error) {
quiet := c.GlobalBool(schema.CLIOptQuiet)
err := handler(c)
if err != nil && !quiet {
os.Exit(1)
}
}
func buildCLIOptions() *cli.App {
app := cli.NewApp()
app.Name = "temporal-cassandra-tool"
app.Usage = "Command line tool for temporal cassandra operations"
app.Version = "0.0.1"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: schema.CLIFlagEndpoint,
Value: "127.0.0.1",
Usage: "hostname or ip address of cassandra host to connect to",
EnvVar: "CASSANDRA_HOST",
},
cli.IntFlag{
Name: schema.CLIFlagPort,
Value: environment.GetCassandraPort(),
Usage: "Port of cassandra host to connect to",
EnvVar: "CASSANDRA_PORT",
},
cli.StringFlag{
Name: schema.CLIFlagUser,
Value: "",
Usage: "User name used for authentication for connecting to cassandra host",
EnvVar: "CASSANDRA_USER",
},
cli.StringFlag{
Name: schema.CLIFlagPassword,
Value: "",
Usage: "Password used for authentication for connecting to cassandra host",
EnvVar: "CASSANDRA_PASSWORD",
},
cli.IntFlag{
Name: schema.CLIFlagTimeout,
Value: defaultTimeout,
Usage: "request Timeout in seconds used for cql client",
EnvVar: "CASSANDRA_TIMEOUT",
},
cli.StringFlag{
Name: schema.CLIFlagKeyspace,
Value: "temporal",
Usage: "name of the cassandra Keyspace",
EnvVar: "CASSANDRA_KEYSPACE",
},
cli.StringFlag{
Name: schema.CLIFlagDatacenter,
Value: "",
Usage: "enable NetworkTopologyStrategy by providing datacenter name",
EnvVar: "CASSANDRA_DATACENTER",
},
cli.BoolFlag{
Name: schema.CLIFlagQuiet,
Usage: "Don't set exit status to 1 on error",
},
cli.BoolFlag{
Name: schema.CLIFlagEnableTLS,
Usage: "enable TLS",
EnvVar: "CASSANDRA_ENABLE_TLS",
},
cli.StringFlag{
Name: schema.CLIFlagTLSCertFile,
Usage: "TLS cert file",
EnvVar: "CASSANDRA_TLS_CERT",
},
cli.StringFlag{
Name: schema.CLIFlagTLSKeyFile,
Usage: "TLS key file",
EnvVar: "CASSANDRA_TLS_KEY",
},
cli.StringFlag{
Name: schema.CLIFlagTLSCaFile,
Usage: "TLS CA file",
EnvVar: "CASSANDRA_TLS_CA",
},
cli.BoolFlag{
Name: schema.CLIFlagTLSEnableHostVerification,
Usage: "TLS host verification",
EnvVar: "CASSANDRA_TLS_VERIFY_HOST",
},
}
app.Commands = []cli.Command{
{
Name: "setup-schema",
Aliases: []string{"setup"},
Usage: "setup initial version of cassandra schema",
Flags: []cli.Flag{
cli.StringFlag{
Name: schema.CLIFlagVersion,
Usage: "initial version of the schema, cannot be used with disable-versioning",
},
cli.StringFlag{
Name: schema.CLIFlagSchemaFile,
Usage: "path to the .cql schema file; if un-specified, will just setup versioning tables",
},
cli.BoolFlag{
Name: schema.CLIFlagDisableVersioning,
Usage: "disable setup of schema versioning",
},
cli.BoolFlag{
Name: schema.CLIFlagOverwrite,
Usage: "drop all existing tables before setting up new schema",
},
},
Action: func(c *cli.Context) {
cliHandler(c, setupSchema)
},
},
{
Name: "update-schema",
Aliases: []string{"update"},
Usage: "update cassandra schema to a specific version",
Flags: []cli.Flag{
cli.StringFlag{
Name: schema.CLIFlagTargetVersion,
Usage: "target version for the schema update, defaults to latest",
},
cli.StringFlag{
Name: schema.CLIFlagSchemaDir,
Usage: "path to directory containing versioned schema",
},
},
Action: func(c *cli.Context) {
cliHandler(c, updateSchema)
},
},
{
Name: "create-keyspace",
Aliases: []string{"create", "create-Keyspace"},
Usage: "creates a keyspace with simple strategy or network topology if datacenter name is provided",
Flags: []cli.Flag{
cli.StringFlag{
Name: schema.CLIFlagKeyspace,
Usage: "name of the keyspace",
},
cli.IntFlag{
Name: schema.CLIFlagReplicationFactor,
Value: 1,
Usage: "replication factor for the keyspace",
},
cli.StringFlag{
Name: schema.CLIFlagDatacenter,
Value: "",
Usage: "enable NetworkTopologyStrategy by providing datacenter name",
},
},
Action: func(c *cli.Context) {
cliHandler(c, createKeyspace)
},
},
{
Name: "drop-keyspace",
Aliases: []string{"drop"},
Usage: "drops a keyspace with simple strategy or network topology if datacenter name is provided",
Flags: []cli.Flag{
cli.StringFlag{
Name: schema.CLIFlagKeyspace,
Usage: "name of the keyspace",
},
cli.IntFlag{
Name: schema.CLIFlagReplicationFactor,
Value: 1,
Usage: "replication factor for the keyspace",
},
cli.StringFlag{
Name: schema.CLIFlagDatacenter,
Value: "",
Usage: "enable NetworkTopologyStrategy by providing datacenter name",
},
cli.BoolFlag{
Name: schema.CLIFlagForce,
Usage: "don't prompt for confirmation",
},
},
Action: func(c *cli.Context) {
drop := c.Bool(schema.CLIOptForce)
if !drop {
keyspace := c.String(schema.CLIOptKeyspace)
fmt.Printf("Are you sure you want to drop keyspace %q (y/N)? ", keyspace)
y := ""
_, _ = fmt.Scanln(&y)
if y == "y" || y == "Y" {
drop = true
}
}
if drop {
cliHandler(c, dropKeyspace)
}
},
},
{
Name: "validate-health",
Aliases: []string{"vh"},
Usage: "validates health of cassandra by attempting to establish CQL session to system keyspace",
Action: func(c *cli.Context) {
cliHandler(c, validateHealth)
},
},
}
return app
}
| 1 | 11,626 | [Nit] can you change the Usage wording to indicate that the flag is used to opt-out of server certificate verification? (e.g. "disables validation of the Cassandra cluster's server certificate.") | temporalio-temporal | go |
@@ -14,6 +14,8 @@
package zipkin.storage.elasticsearch;
import com.google.common.base.Throwables;
+import java.io.IOException;
+import java.util.List;
import zipkin.DependencyLink;
import zipkin.Span;
import zipkin.internal.MergeById; | 1 | /**
* Copyright 2015-2016 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.elasticsearch;
import com.google.common.base.Throwables;
import zipkin.DependencyLink;
import zipkin.Span;
import zipkin.internal.MergeById;
import zipkin.internal.Util;
import zipkin.storage.DependenciesTest;
import zipkin.storage.InMemorySpanStore;
import zipkin.storage.InMemoryStorage;
import zipkin.storage.elasticsearch.http.HttpElasticsearchDependencyWriter;
import java.io.IOException;
import java.util.List;
import static zipkin.TestObjects.DAY;
import static zipkin.TestObjects.TODAY;
import static zipkin.internal.Util.midnightUTC;
public abstract class ElasticsearchDependenciesTest extends DependenciesTest {
protected abstract ElasticsearchStorage storage();
@Override public void clear() throws IOException {
storage().clear();
}
/**
* The current implementation does not include dependency aggregation. It includes retrieval of
* pre-aggregated links.
*
* <p>This uses {@link InMemorySpanStore} to prepare links and {@link #writeDependencyLinks(List,
* long)}} to store them.
*/
@Override public void processDependencies(List<Span> spans) {
InMemoryStorage mem = new InMemoryStorage();
mem.spanConsumer().accept(spans);
List<DependencyLink> links = mem.spanStore().getDependencies(TODAY + DAY, null);
// This gets or derives a timestamp from the spans
long midnight = midnightUTC(MergeById.apply(spans).get(0).timestamp / 1000);
writeDependencyLinks(links, midnight);
}
protected void writeDependencyLinks(List<DependencyLink> links, long timestampMillis) {
long midnight = Util.midnightUTC(timestampMillis);
String index = storage().indexNameFormatter.indexNameForTimestamp(midnight);
try {
HttpElasticsearchDependencyWriter.writeDependencyLinks(storage().client(), links, index,
ElasticsearchConstants.DEPENDENCY_LINK);
} catch (Exception ex) {
throw Throwables.propagate(ex);
}
}
}
| 1 | 11,977 | Sorry :) Would be really helpful to integrate something like Eclipse Code Formatter, so it will fail if the code style is broken (the same as eslint fails on the frontend) I use IntelliJ IDEA and their vision of imports is a bit different :) | openzipkin-zipkin | java |
@@ -362,7 +362,17 @@ module Beaker
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora/
logger.debug("Disabling iptables on #{host.name}")
- host.exec(Command.new("sudo su -c \"/etc/init.d/iptables stop\""), {:pty => true})
+ if host.exec(Command.new('whoami')).stdout.chomp =~ /root/ #don't need to sudo if we are root
+ if host.check_for_command('iptables') #is iptables even installed?
+ host.exec(Command.new("/etc/init.d/iptables stop"))
+ else
+ logger.warn("iptables not installed on #{host.name} (#{host['platform']})")
+ end
+ else
+ if host.exec(Command.new("sudo su -c \"/etc/init.d/iptables stop\""), {:pty => true, :acceptable_exit_codes => (0..255)}).exit_code != '0'
+ logger.warn("failed to disable iptables on #{host.name} (#{host['platform']})")
+ end
+ end
else
logger.warn("Attempting to disable iptables on non-supported platform: #{host.name}: #{host['platform']}")
end | 1 | [ 'command', "dsl/patterns" ].each do |lib|
require "beaker/#{lib}"
end
module Beaker
#Provides convienience methods for commonly run actions on hosts
module HostPrebuiltSteps
include Beaker::DSL::Patterns
NTPSERVER = 'pool.ntp.org'
SLEEPWAIT = 5
TRIES = 5
UNIX_PACKAGES = ['curl', 'ntpdate']
WINDOWS_PACKAGES = ['curl']
SLES_PACKAGES = ['curl', 'ntp']
DEBIAN_PACKAGES = ['curl', 'ntpdate', 'lsb-release']
ETC_HOSTS_PATH = "/etc/hosts"
ETC_HOSTS_PATH_SOLARIS = "/etc/inet/hosts"
ROOT_KEYS_SCRIPT = "https://raw.githubusercontent.com/puppetlabs/puppetlabs-sshkeys/master/templates/scripts/manage_root_authorized_keys"
ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s"
APT_CFG = %q{ Acquire::http::Proxy "http://proxy.puppetlabs.net:3128/"; }
IPS_PKG_REPO="http://solaris-11-internal-repo.delivery.puppetlabs.net"
#Run timesync on the provided hosts
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def timesync host, opts
logger = opts[:logger]
ntp_server = opts[:ntp_server] ? opts[:ntp_server] : NTPSERVER
block_on host do |host|
logger.notify "Update system time sync for '#{host.name}'"
if host['platform'].include? 'windows'
# The exit code of 5 is for Windows 2008 systems where the w32tm /register command
# is not actually necessary.
host.exec(Command.new("w32tm /register"), :acceptable_exit_codes => [0,5])
host.exec(Command.new("net start w32time"), :acceptable_exit_codes => [0,2])
host.exec(Command.new("w32tm /config /manualpeerlist:#{ntp_server} /syncfromflags:manual /update"))
host.exec(Command.new("w32tm /resync"))
logger.notify "NTP date succeeded on #{host}"
else
case
when host['platform'] =~ /sles-/
ntp_command = "sntp #{ntp_server}"
else
ntp_command = "ntpdate -t 20 #{ntp_server}"
end
success=false
try = 0
until try >= TRIES do
try += 1
if host.exec(Command.new(ntp_command), :acceptable_exit_codes => (0..255)).exit_code == 0
success=true
break
end
sleep SLEEPWAIT
end
if success
logger.notify "NTP date succeeded on #{host} after #{try} tries"
else
raise "NTP date was not successful after #{try} tries"
end
end
end
rescue => e
report_and_raise(logger, e, "timesync (--ntp)")
end
#Validate that hosts are prepared to be used as SUTs, if packages are missing attempt to
#install them. Verifies the presence of #{HostPrebuiltSteps::UNIX_PACKAGES} on unix platform hosts,
#{HostPrebuiltSteps::SLES_PACKAGES} on SUSE platform hosts, #{HostPrebuiltSteps::DEBIAN_PACKAGES on debian platform
#hosts and {HostPrebuiltSteps::WINDOWS_PACKAGES} on windows
#platforms.
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def validate_host host, opts
logger = opts[:logger]
block_on host do |host|
case
when host['platform'] =~ /sles-/
SLES_PACKAGES.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
when host['platform'] =~ /debian/
DEBIAN_PACKAGES.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
when host['platform'] =~ /windows/
WINDOWS_PACKAGES.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
when host['platform'] !~ /debian|aix|solaris|windows|sles-|osx-/
UNIX_PACKAGES.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
end
end
rescue => e
report_and_raise(logger, e, "validate")
end
#Install a set of authorized keys using {HostPrebuiltSteps::ROOT_KEYS_SCRIPT}. This is a
#convenience method to allow for easy login to hosts after they have been provisioned with
#Beaker.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def sync_root_keys host, opts
# JJM This step runs on every system under test right now. We're anticipating
# issues on Windows and maybe Solaris. We will likely need to filter this step
# but we're deliberately taking the approach of "assume it will work, fix it
# when reality dictates otherwise"
logger = opts[:logger]
block_on host do |host|
logger.notify "Sync root authorized_keys from github on #{host.name}"
# Allow all exit code, as this operation is unlikely to cause problems if it fails.
if host['platform'].include? 'solaris'
host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "bash"), :acceptable_exit_codes => (0..255))
else
host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "env PATH=/usr/gnu/bin:$PATH bash"), :acceptable_exit_codes => (0..255))
end
end
rescue => e
report_and_raise(logger, e, "sync_root_keys")
end
#Determine the Extra Packages for Enterprise Linux URL for the provided Enterprise Linux host.
# @param [Host, Array<Host>] host One host to act on. Will use host epel_url, epel_arch and epel_pkg
# before using defaults provided in opts.
# @return [String, String, String] The URL, arch and package name for EPL for the provided host
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [String] :epel_url Link to download
# @option opts [String] :epel_arch Architecture to download (i386, x86_64, etc), defaults to i386
# @option opts [String] :epel_6_pkg Package to download from provided link for el-6
# @option opts [String] :epel_5_pkg Package to download from provided link for el-5
# @raise [Exception] Raises an error if the host provided's platform != /el-(5|6)/
def epel_info_for host, opts
version = host['platform'].version
if not version
raise "epel_info_for not available for #{host.name} on platform #{host['platform']}"
end
if version == '6'
url = "#{host[:epel_url] || opts[:epel_url]}/#{version}"
pkg = host[:epel_pkg] || opts[:epel_6_pkg]
elsif version == '5'
url = "#{host[:epel_url] || opts[:epel_url]}/#{version}"
pkg = host[:epel_pkg] || opts[:epel_5_pkg]
else
raise "epel_info_for does not support el version #{version}, on #{host.name}"
end
return url, host[:epel_arch] || opts[:epel_arch] || 'i386', pkg
end
#Run 'apt-get update' on the provided host or hosts. If the platform of the provided host is not
#ubuntu or debian do nothing.
# @param [Host, Array<Host>] hosts One or more hosts to act upon
def apt_get_update hosts
block_on hosts do |host|
if host[:platform] =~ /(ubuntu)|(debian)/
host.exec(Command.new("apt-get update"))
end
end
end
#Create a file on host or hosts at the provided file path with the provided file contents.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [String] file_path The path at which the new file will be created on the host or hosts.
# @param [String] file_content The contents of the file to be created on the host or hosts.
def copy_file_to_remote(host, file_path, file_content)
block_on host do |host|
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
host.do_scp_to(tempfile.path, file_path, @options)
end
end
end
#Alter apt configuration on ubuntu and debian host or hosts to internal Puppet Labs
# proxy {HostPrebuiltSteps::APT_CFG} proxy, alter pkg on solaris-11 host or hosts
# to point to interal Puppetlabs proxy {HostPrebuiltSteps::IPS_PKG_REPO}. Do nothing
# on non-ubuntu, debian or solaris-11 platform host or hosts.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def proxy_config( host, opts )
# repo_proxy
# supports ubuntu, debian and solaris platforms
logger = opts[:logger]
block_on host do |host|
case
when host['platform'] =~ /ubuntu/
host.exec(Command.new("if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi"))
copy_file_to_remote(host, '/etc/apt/apt.conf', APT_CFG)
apt_get_update(host)
when host['platform'] =~ /debian/
host.exec(Command.new("if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi"))
copy_file_to_remote(host, '/etc/apt/apt.conf', APT_CFG)
apt_get_update(host)
when host['platform'] =~ /solaris-11/
host.exec(Command.new("/usr/bin/pkg unset-publisher solaris || :"))
host.exec(Command.new("/usr/bin/pkg set-publisher -g %s solaris" % IPS_PKG_REPO))
else
logger.debug "#{host}: repo proxy configuration not modified"
end
end
rescue => e
report_and_raise(logger, e, "proxy_config")
end
#Install EPEL on host or hosts with platform = /el-(5|6)/. Do nothing on host or hosts of other platforms.
# @param [Host, Array<Host>] host One or more hosts to act upon. Will use individual host epel_url, epel_arch
# and epel_pkg before using defaults provided in opts.
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :debug If true, print verbose rpm information when installing EPEL
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
# @option opts [String] :epel_url Link to download from
# @option opts [String] :epel_arch Architecture of epel to download (i386, x86_64, etc)
# @option opts [String] :epel_6_pkg Package to download from provided link for el-6
# @option opts [String] :epel_5_pkg Package to download from provided link for el-5
def add_el_extras( host, opts )
#add_el_extras
#only supports el-* platforms
logger = opts[:logger]
debug_opt = opts[:debug] ? 'vh' : ''
block_on host do |host|
case
when host['platform'] =~ /el-(5|6)/
result = host.exec(Command.new('rpm -qa | grep epel-release'), :acceptable_exit_codes => [0,1])
if result.exit_code == 1
url, arch, pkg = epel_info_for host, opts
host.exec(Command.new("rpm -i#{debug_opt} #{url}/#{arch}/#{pkg}"))
#update /etc/yum.repos.d/epel.repo for new baseurl
host.exec(Command.new("sed -i -e 's;#baseurl.*$;baseurl=#{Regexp.escape(url)}/\$basearch;' /etc/yum.repos.d/epel.repo"))
#remove mirrorlist
host.exec(Command.new("sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo"))
host.exec(Command.new('yum clean all && yum makecache'))
end
else
logger.debug "#{host}: package repo configuration not modified"
end
end
rescue => e
report_and_raise(logger, e, "add_repos")
end
#Determine the domain name of the provided host from its /etc/resolv.conf
# @param [Host] host the host to act upon
def get_domain_name(host)
domain = nil
search = nil
resolv_conf = host.exec(Command.new("cat /etc/resolv.conf")).stdout
resolv_conf.each_line { |line|
if line =~ /^\s*domain\s+(\S+)/
domain = $1
elsif line =~ /^\s*search\s+(\S+)/
search = $1
end
}
return domain if domain
return search if search
end
#Determine the ip address of the provided host
# @param [Host] host the host to act upon
# @deprecated use {Host#get_ip}
def get_ip(host)
host.get_ip
end
#Append the provided string to the /etc/hosts file of the provided host
# @param [Host] host the host to act upon
# @param [String] etc_hosts The string to append to the /etc/hosts file
def set_etc_hosts(host, etc_hosts)
host.exec(Command.new("echo '#{etc_hosts}' > /etc/hosts"))
end
#Make it possible to log in as root by copying the current users ssh keys to the root account
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def copy_ssh_to_root host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug "Give root a copy of current user's keys, on #{host.name}"
if host['platform'] =~ /windows/
host.exec(Command.new('cp -r .ssh /cygdrive/c/Users/Administrator/.'))
host.exec(Command.new('chown -R Administrator /cygdrive/c/Users/Administrator/.ssh'))
else
host.exec(Command.new('sudo su -c "cp -r .ssh /root/."'), {:pty => true})
end
end
end
#Update /etc/hosts to make it possible for each provided host to reach each other host by name.
#Assumes that each provided host has host[:ip] set.
# @param [Host, Array<Host>] hosts An array of hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def hack_etc_hosts hosts, opts
etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n"
hosts.each do |host|
etc_hosts += "#{host['ip'].to_s}\t#{host[:vmhostname] || host.name}\n"
end
hosts.each do |host|
set_etc_hosts(host, etc_hosts)
end
end
#Update sshd_config on debian, ubuntu, centos, el, redhat and fedora boxes to allow for root login, does nothing on other platfoms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def enable_root_login host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug "Update /etc/ssh/sshd_config to allow root login"
host.exec(Command.new("sudo su -c \"sed -i 's/PermitRootLogin no/PermitRootLogin yes/' /etc/ssh/sshd_config\""), {:pty => true}
)
#restart sshd
if host['platform'] =~ /debian|ubuntu/
host.exec(Command.new("sudo su -c \"service ssh restart\""), {:pty => true})
elsif host['platform'] =~ /centos|el-|redhat|fedora/
host.exec(Command.new("sudo su -c \"service sshd restart\""), {:pty => true})
else
@logger.warn("Attempting to update ssh on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
#Disable SELinux on centos, does nothing on other platforms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def disable_se_linux host, opts
logger = opts[:logger]
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora/
@logger.debug("Disabling se_linux on #{host.name}")
host.exec(Command.new("sudo su -c \"setenforce 0\""), {:pty => true})
else
@logger.warn("Attempting to disable SELinux on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
#Disable iptables on centos, does nothing on other platforms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def disable_iptables host, opts
logger = opts[:logger]
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora/
logger.debug("Disabling iptables on #{host.name}")
host.exec(Command.new("sudo su -c \"/etc/init.d/iptables stop\""), {:pty => true})
else
logger.warn("Attempting to disable iptables on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
# Setup files for enabling requests to pass to a proxy server
# This works for the APT package manager on debian and ubuntu
# and YUM package manager on el, centos, fedora and redhat.
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def package_proxy host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug("enabling proxy support on #{host.name}")
case host['platform']
when /ubuntu/, /debian/
host.exec(Command.new("echo 'Acquire::http::Proxy \"#{opts[:package_proxy]}/\";' >> /etc/apt/apt.conf.d/10proxy"))
when /^el-/, /centos/, /fedora/, /redhat/
host.exec(Command.new("echo 'proxy=#{opts[:package_proxy]}/' >> /etc/yum.conf"))
else
logger.debug("Attempting to enable package manager proxy support on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
end
end
| 1 | 7,499 | this will still fail on systemd | voxpupuli-beaker | rb |
@@ -3760,6 +3760,8 @@ class RDSConnection(AWSQueryConnection):
path='/', params=params)
body = response.read()
boto.log.debug(body)
+ if type(body) == bytes:
+ body = body.decode('utf-8')
if response.status == 200:
return json.loads(body)
else: | 1 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
from boto.compat import json
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2013-09-09"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphens; it cannot end with a hyphen or contain two consecutive
hyphens.
Constraints:
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='AddSourceIdentifierToSubscription',
verb='POST',
path='/', params=params)
def add_tags_to_resource(self, resource_name, tags):
"""
Adds metadata tags to an Amazon RDS resource. These tags can
also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in Condition
statement in IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see `Tagging
Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be added
to. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tags: list
:param tags: The tags to be assigned to the Amazon RDS resource.
"""
params = {'ResourceName': resource_name, }
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='AddTagsToResource',
verb='POST',
path='/', params=params)
def authorize_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Enables ingress to a DBSecurityGroup using one of two forms of
authorization. First, EC2 or VPC security groups can be added
to the DBSecurityGroup if the application using the database
is running on EC2 or VPC instances. Second, IP ranges are
available if the application accessing your database is
running on the Internet. Required parameters for this API are
one of CIDR range, EC2SecurityGroupId for VPC, or
(EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId for non-VPC).
You cannot authorize ingress from an EC2 security group in one
Region to an Amazon RDS DB instance in another. You cannot
authorize ingress from a VPC security group in one VPC to an
Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to add
authorization to.
:type cidrip: string
:param cidrip: The IP range to authorize.
:type ec2_security_group_name: string
:param ec2_security_group_name: Name of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: Id of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: AWS Account Number of the owner of
the EC2 security group specified in the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def copy_db_snapshot(self, source_db_snapshot_identifier,
target_db_snapshot_identifier, tags=None):
"""
Copies the specified DBSnapshot. The source DBSnapshot must be
in the "available" state.
:type source_db_snapshot_identifier: string
:param source_db_snapshot_identifier: The identifier for the source DB
snapshot.
Constraints:
+ Must be the identifier for a valid system snapshot in the "available"
state.
Example: `rds:mydb-2012-04-02-00-01`
:type target_db_snapshot_identifier: string
:param target_db_snapshot_identifier: The identifier for the copied
snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-db-snapshot`
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CopyDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_instance(self, db_instance_identifier, allocated_storage,
db_instance_class, engine, master_username,
master_user_password, db_name=None,
db_security_groups=None,
vpc_security_group_ids=None,
availability_zone=None, db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None, port=None,
multi_az=None, engine_version=None,
auto_minor_version_upgrade=None,
license_model=None, iops=None,
option_group_name=None, character_set_name=None,
publicly_accessible=None, tags=None):
"""
Creates a new DB instance.
:type db_name: string
:param db_name: The meaning of this parameter differs according to the
database engine you use.
**MySQL**
The name of the database to create when the DB instance is created. If
this parameter is not specified, no database is created in the DB
instance.
Constraints:
+ Must contain 1 to 64 alphanumeric characters
+ Cannot be a word reserved by the specified database engine
Type: String
**Oracle**
The Oracle System ID (SID) of the created DB instance.
Default: `ORCL`
Constraints:
+ Cannot be longer than 8 characters
**SQL Server**
Not applicable. Must be null.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This
parameter is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
for SQL Server).
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
Example: `mydbinstance`
:type allocated_storage: integer
:param allocated_storage: The amount of storage (in gigabytes) to be
initially allocated for the database instance.
**MySQL**
Constraints: Must be an integer from 5 to 1024.
Type: Integer
**Oracle**
Constraints: Must be an integer from 10 to 1024.
**SQL Server**
Constraints: Must be an integer from 200 to 1024 (Standard Edition and
Enterprise Edition) or from 30 to 1024 (Express Edition and Web
Edition)
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the DB
instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
:type engine: string
:param engine: The name of the database engine to be used for this
instance.
Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
`sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
:type master_username: string
:param master_username:
The name of master user for the client DB instance.
**MySQL**
Constraints:
+ Must be 1 to 16 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
Type: String
**Oracle**
Constraints:
+ Must be 1 to 30 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
**SQL Server**
Constraints:
+ Must be 1 to 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
:type master_user_password: string
:param master_user_password: The password for the master database user.
Can be any printable ASCII character except "/", '"', or "@".
Type: String
**MySQL**
Constraints: Must contain from 8 to 41 characters.
**Oracle**
Constraints: Must contain from 8 to 30 characters.
**SQL Server**
Constraints: Must contain from 8 to 128 characters.
:type db_security_groups: list
:param db_security_groups: A list of DB security groups to associate
with this DB instance.
Default: The default DB security group for the database engine.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of EC2 VPC security groups to
associate with this DB instance.
Default: The default EC2 VPC security group for the DB subnet group's
VPC.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
Constraint: The AvailabilityZone parameter cannot be specified if the
MultiAZ parameter is set to `True`. The specified Availability Zone
must be in the same region as the current endpoint.
:type db_subnet_group_name: string
:param db_subnet_group_name: A DB subnet group to associate with this
DB instance.
If there is no DB subnet group, then it is a non-VPC DB instance.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. To see the
time blocks available, see ` Adjusting the Preferred Maintenance
Window`_ in the Amazon RDS User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group to associate with this DB instance.
If this argument is omitted, the default DBParameterGroup for the
specified engine will be used.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: integer
:param backup_retention_period:
The number of days for which automated backups are retained. Setting
this parameter to a positive number enables backups. Setting this
parameter to 0 disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
:type port: integer
:param port: The port number on which the database accepts connections.
**MySQL**
Default: `3306`
Valid Values: `1150-65535`
Type: Integer
**Oracle**
Default: `1521`
Valid Values: `1150-65535`
**SQL Server**
Default: `1433`
Valid Values: `1150-65535` except for `1434` and `3389`.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
You cannot set the AvailabilityZone parameter if the MultiAZ
parameter is set to true.
:type engine_version: string
:param engine_version: The version number of the database engine to
use.
**MySQL**
Example: `5.1.42`
Type: String
**Oracle**
Example: `11.2.0.2.v2`
Type: String
**SQL Server**
Example: `10.50.2789.0.v1`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the
maintenance window.
Default: `True`
:type license_model: string
:param license_model: License model information for this DB instance.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type character_set_name: string
:param character_set_name: For supported engines, indicates that the DB
instance should be associated with the specified CharacterSet.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': db_instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
}
if db_name is not None:
params['DBName'] = db_name
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if port is not None:
params['Port'] = port
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if character_set_name is not None:
params['CharacterSetName'] = character_set_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstance',
verb='POST',
path='/', params=params)
def create_db_instance_read_replica(self, db_instance_identifier,
source_db_instance_identifier,
db_instance_class=None,
availability_zone=None, port=None,
auto_minor_version_upgrade=None,
iops=None, option_group_name=None,
publicly_accessible=None, tags=None):
"""
Creates a DB instance that acts as a read replica of a source
DB instance.
All read replica DB instances are created as Single-AZ
deployments with backups disabled. All other DB instance
attributes (including DB security groups and DB parameter
groups) are inherited from the source DB instance, except as
specified below.
The source DB instance must have backup retention enabled.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier of the read
replica. This is the unique key that identifies a DB instance. This
parameter is stored as a lowercase string.
:type source_db_instance_identifier: string
:param source_db_instance_identifier: The identifier of the DB instance
that will act as the source for the read replica. Each DB instance
can have up to five read replicas.
Constraints: Must be the identifier of an existing DB instance that is
not already a read replica DB instance.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the read
replica.
Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
| db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
Default: Inherits from the source DB instance.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone that the
read replica will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
:type port: integer
:param port: The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: `1150-65535`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the read replica during the
maintenance window.
Default: Inherits from the source DB instance
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
:type option_group_name: string
:param option_group_name: The option group the DB instance will be
associated with. If omitted, the default option group for the
engine specified will be used.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'SourceDBInstanceIdentifier': source_db_instance_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if port is not None:
params['Port'] = port
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstanceReadReplica',
verb='POST',
path='/', params=params)
def create_db_parameter_group(self, db_parameter_group_name,
db_parameter_group_family, description,
tags=None):
"""
Creates a new DB parameter group.
A DB parameter group is initially created with the default
parameters for the database engine used by the DB instance. To
provide custom values for any of the parameters, you must
modify the group after creating it using
ModifyDBParameterGroup . Once you've created a DB parameter
group, you need to associate it with your DB instance using
ModifyDBInstance . When you associate a new DB parameter group
with a running DB instance, you need to reboot the DB Instance
for the new DB parameter group and associated settings to take
effect.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
This value is stored as a lower-case string.
:type db_parameter_group_family: string
:param db_parameter_group_family: The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
:type description: string
:param description: The description for the DB parameter group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBParameterGroupName': db_parameter_group_name,
'DBParameterGroupFamily': db_parameter_group_family,
'Description': description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBParameterGroup',
verb='POST',
path='/', params=params)
def create_db_security_group(self, db_security_group_name,
db_security_group_description, tags=None):
"""
Creates a new DB security group. DB security groups control
access to a DB instance.
:type db_security_group_name: string
:param db_security_group_name: The name for the DB security group. This
value is stored as a lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
Example: `mysecuritygroup`
:type db_security_group_description: string
:param db_security_group_description: The description for the DB
security group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSecurityGroupName': db_security_group_name,
'DBSecurityGroupDescription': db_security_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSecurityGroup',
verb='POST',
path='/', params=params)
def create_db_snapshot(self, db_snapshot_identifier,
db_instance_identifier, tags=None):
"""
Creates a DBSnapshot. The source DBInstance must be in
"available" state.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The identifier for the DB snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This is the unique key that identifies a DB
instance. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSnapshotIdentifier': db_snapshot_identifier,
'DBInstanceIdentifier': db_instance_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_subnet_group(self, db_subnet_group_name,
db_subnet_group_description, subnet_ids,
tags=None):
"""
Creates a new DB subnet group. DB subnet groups must contain
at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
:type tags: list
:param tags: A list of tags into tuples.
"""
params = {
'DBSubnetGroupName': db_subnet_group_name,
'DBSubnetGroupDescription': db_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, event_categories=None,
source_ids=None, enabled=None, tags=None):
"""
Creates an RDS event notification subscription. This action
requires a topic ARN (Amazon Resource Name) created by either
the RDS console, the SNS console, or the SNS API. To obtain an
ARN with SNS, you must create a topic in Amazon SNS and
subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the type of source (SourceType) you want to be
notified of, provide a list of RDS sources (SourceIds) that
triggers the events, and provide a list of event categories
(EventCategories) for events you want to be notified of. For
example, you can specify SourceType = db-instance, SourceIds =
mydbinstance1, mydbinstance2 and EventCategories =
Availability, Backup.
If you specify both the SourceType and SourceIds, such as
SourceType = db-instance and SourceIdentifier = myDBInstance1,
you will be notified of all the db-instance events for the
specified source. If you specify a SourceType but do not
specify a SourceIdentifier, you will receive notice of the
events for that source type for all your RDS sources. If you
do not specify either the SourceType nor the SourceIdentifier,
you will be notified of events generated from all RDS sources
belonging to your customer account.
:type subscription_name: string
:param subscription_name: The name of the subscription.
Constraints: The name must be less than 255 characters.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type source_ids: list
:param source_ids:
The list of identifiers of the event sources for which events will be
returned. If not specified, then all sources are included in the
response. An identifier must begin with a letter and must contain
only ASCII letters, digits, and hyphens; it cannot end with a
hyphen or contain two consecutive hyphens.
Constraints:
+ If SourceIds are supplied, SourceType must also be provided.
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription, set to **false** to create the subscription but not
active it.
:type tags: list
:param tags: A list of tags.
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_option_group(self, option_group_name, engine_name,
major_engine_version, option_group_description,
tags=None):
"""
Creates a new option group. You can create up to 20 option
groups.
:type option_group_name: string
:param option_group_name: Specifies the name of the option group to be
created.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `myoptiongroup`
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be associated with.
:type option_group_description: string
:param option_group_description: The description of the option group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'OptionGroupName': option_group_name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': option_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateOptionGroup',
verb='POST',
path='/', params=params)
def delete_db_instance(self, db_instance_identifier,
skip_final_snapshot=None,
final_db_snapshot_identifier=None):
"""
The DeleteDBInstance action deletes a previously provisioned
DB instance. A successful response from the web service
indicates the request was received correctly. When you delete
a DB instance, all automated backups for that instance are
deleted and cannot be recovered. Manual DB snapshots of the DB
instance to be deleted are not deleted.
If a final DB snapshot is requested the status of the RDS
instance will be "deleting" until the DB snapshot is created.
The API action `DescribeDBInstance` is used to monitor the
status of this operation. The action cannot be canceled or
reverted once submitted.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier for the DB instance to be deleted. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type skip_final_snapshot: boolean
:param skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If `True` is specified,
no DBSnapshot is created. If false is specified, a DB snapshot is
created before the DB instance is deleted.
The FinalDBSnapshotIdentifier parameter must be specified if
SkipFinalSnapshot is `False`.
Default: `False`
:type final_db_snapshot_identifier: string
:param final_db_snapshot_identifier:
The DBSnapshotIdentifier of the new DBSnapshot created when
SkipFinalSnapshot is set to `False`.
Specifying this parameter and also setting the SkipFinalShapshot
parameter to true results in an error.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if skip_final_snapshot is not None:
params['SkipFinalSnapshot'] = str(
skip_final_snapshot).lower()
if final_db_snapshot_identifier is not None:
params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
return self._make_request(
action='DeleteDBInstance',
verb='POST',
path='/', params=params)
def delete_db_parameter_group(self, db_parameter_group_name):
"""
Deletes a specified DBParameterGroup. The DBParameterGroup
cannot be associated with any RDS instances to be deleted.
The specified DB parameter group cannot be associated with any
DB instances.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ You cannot delete a default DB parameter group
+ Cannot be associated with any DB instances
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
return self._make_request(
action='DeleteDBParameterGroup',
verb='POST',
path='/', params=params)
def delete_db_security_group(self, db_security_group_name):
"""
Deletes a DB security group.
The specified DB security group must not be associated with
any DB instances.
:type db_security_group_name: string
:param db_security_group_name:
The name of the DB security group to delete.
You cannot delete the default DB security group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
"""
params = {'DBSecurityGroupName': db_security_group_name, }
return self._make_request(
action='DeleteDBSecurityGroup',
verb='POST',
path='/', params=params)
def delete_db_snapshot(self, db_snapshot_identifier):
"""
Deletes a DBSnapshot.
The DBSnapshot must be in the `available` state to be deleted.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The DBSnapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the
`available` state.
"""
params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
return self._make_request(
action='DeleteDBSnapshot',
verb='POST',
path='/', params=params)
def delete_db_subnet_group(self, db_subnet_group_name):
"""
Deletes a DB subnet group.
The specified database subnet group must not be associated
with any DB instances.
:type db_subnet_group_name: string
:param db_subnet_group_name:
The name of the database subnet group to delete.
You cannot delete the default subnet group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
return self._make_request(
action='DeleteDBSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an RDS event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to delete.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_option_group(self, option_group_name):
"""
Deletes an existing option group.
:type option_group_name: string
:param option_group_name:
The name of the option group to be deleted.
You cannot delete default option groups.
"""
params = {'OptionGroupName': option_group_name, }
return self._make_request(
action='DeleteOptionGroup',
verb='POST',
path='/', params=params)
def describe_db_engine_versions(self, engine=None, engine_version=None,
db_parameter_group_family=None,
max_records=None, marker=None,
default_only=None,
list_supported_character_sets=None):
"""
Returns a list of the available DB engines.
:type engine: string
:param engine: The database engine to return.
:type engine_version: string
:param engine_version: The database engine version to return.
Example: `5.1.49`
:type db_parameter_group_family: string
:param db_parameter_group_family:
The name of a specific DB parameter group family to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
:type default_only: boolean
:param default_only: Indicates that only the default version of the
specified engine or engine and major version combination is
returned.
:type list_supported_character_sets: boolean
:param list_supported_character_sets: If this parameter is specified,
and if the requested engine supports the CharacterSetName parameter
for CreateDBInstance, the response includes a list of supported
character sets for each engine version.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_parameter_group_family is not None:
params['DBParameterGroupFamily'] = db_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
if list_supported_character_sets is not None:
params['ListSupportedCharacterSets'] = str(
list_supported_character_sets).lower()
return self._make_request(
action='DescribeDBEngineVersions',
verb='POST',
path='/', params=params)
def describe_db_instances(self, db_instance_identifier=None,
filters=None, max_records=None, marker=None):
"""
Returns information about provisioned RDS instances. This API
supports pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBInstances request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords` .
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBInstances',
verb='POST',
path='/', params=params)
def describe_db_log_files(self, db_instance_identifier,
filename_contains=None, file_last_written=None,
file_size=None, max_records=None, marker=None):
"""
Returns a list of DB log files for the DB instance.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filename_contains: string
:param filename_contains: Filters the available log files for log file
names that contain the specified string.
:type file_last_written: long
:param file_last_written: Filters the available log files for files
written since the specified date, in POSIX timestamp format.
:type file_size: long
:param file_size: Filters the available log files for files larger than
the specified size.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if filename_contains is not None:
params['FilenameContains'] = filename_contains
if file_last_written is not None:
params['FileLastWritten'] = file_last_written
if file_size is not None:
params['FileSize'] = file_size
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBLogFiles',
verb='POST',
path='/', params=params)
def describe_db_parameter_groups(self, db_parameter_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBParameterGroup` descriptions. If a
`DBParameterGroupName` is specified, the list will contain
only the description of the specified DB parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameterGroups` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameterGroups',
verb='POST',
path='/', params=params)
def describe_db_parameters(self, db_parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns the detailed parameter list for a particular DB
parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type source: string
:param source: The parameter types to return.
Default: All parameter types returned
Valid Values: `user | system | engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameters` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameters',
verb='POST',
path='/', params=params)
def describe_db_security_groups(self, db_security_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBSecurityGroup` descriptions. If a
`DBSecurityGroupName` is specified, the list will contain only
the descriptions of the specified DB security group.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
return details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSecurityGroups request. If this parameter is specified,
the response includes only records beyond the marker, up to the
value specified by `MaxRecords`.
"""
params = {}
if db_security_group_name is not None:
params['DBSecurityGroupName'] = db_security_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSecurityGroups',
verb='POST',
path='/', params=params)
def describe_db_snapshots(self, db_instance_identifier=None,
db_snapshot_identifier=None,
snapshot_type=None, filters=None,
max_records=None, marker=None):
"""
Returns information about DB snapshots. This API supports
pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
A DB instance identifier to retrieve the list of DB snapshots for.
Cannot be used in conjunction with `DBSnapshotIdentifier`. This
parameter is not case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier:
A specific DB snapshot identifier to describe. Cannot be used in
conjunction with `DBInstanceIdentifier`. This value is stored as a
lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ If this is the identifier of an automated snapshot, the
`SnapshotType` parameter must also be specified.
:type snapshot_type: string
:param snapshot_type: The type of snapshots that will be returned.
Values can be "automated" or "manual." If not specified, the
returned results will include all snapshots types.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBSnapshots` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if db_snapshot_identifier is not None:
params['DBSnapshotIdentifier'] = db_snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSnapshots',
verb='POST',
path='/', params=params)
def describe_db_subnet_groups(self, db_subnet_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a
DBSubnetGroupName is specified, the list will contain only the
descriptions of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name of the DB subnet group to return
details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSubnetGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self, db_parameter_group_family,
max_records=None, marker=None):
"""
Returns the default engine and system parameter information
for the specified database engine.
:type db_parameter_group_family: string
:param db_parameter_group_family: The name of the DB parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeEngineDefaultParameters` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {
'DBParameterGroupFamily': db_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of categories for all event source types, or,
if specified, for a specified source type. You can see a list
of the event categories and source types in the ` Events`_
topic in the Amazon RDS User Guide.
:type source_type: string
:param source_type: The type of source that will be generating the
events.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
filters=None, max_records=None,
marker=None):
"""
Lists all the subscription descriptions for a customer
account. The description for a subscription includes
SubscriptionName, SNSTopicARN, CustomerID, SourceType,
SourceID, CreationTime, and Status.
If you specify a SubscriptionName, lists the description for
that subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to describe.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
event_categories=None, max_records=None, marker=None):
"""
Returns events related to DB instances, DB security groups, DB
snapshots, and DB parameter groups for the past 14 days.
Events specific to a particular DB instance, DB security
group, database snapshot, or DB parameter group can be
obtained by providing the name as a parameter. By default, the
past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If not specified, then all sources are included in the response.
Constraints:
+ If SourceIdentifier is supplied, SourceType must also be provided.
+ If the source type is `DBInstance`, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
be supplied.
+ If the source type is `DBParameterGroup`, a `DBParameterGroupName`
must be supplied.
+ If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
supplied.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type duration: integer
:param duration: The number of minutes to retrieve events for.
Default: 60
:type event_categories: list
:param event_categories: A list of event categories that trigger
notifications for a event notification subscription.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeEvents request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_option_group_options(self, engine_name,
major_engine_version=None,
max_records=None, marker=None):
"""
Describes all available options.
:type engine_name: string
:param engine_name: A required parameter. Options available for the
given Engine name will be described.
:type major_engine_version: string
:param major_engine_version: If specified, filters the results to
include only options for the specified major engine version.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {'EngineName': engine_name, }
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOptionGroupOptions',
verb='POST',
path='/', params=params)
def describe_option_groups(self, option_group_name=None, filters=None,
marker=None, max_records=None,
engine_name=None, major_engine_version=None):
"""
Describes the available option groups.
:type option_group_name: string
:param option_group_name: The name of the option group to describe.
Cannot be supplied together with EngineName or MajorEngineVersion.
:type filters: list
:param filters:
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOptionGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type engine_name: string
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: string
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific database engine version.
If specified, then EngineName must also be specified.
"""
params = {}
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if marker is not None:
params['Marker'] = marker
if max_records is not None:
params['MaxRecords'] = max_records
if engine_name is not None:
params['EngineName'] = engine_name
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
return self._make_request(
action='DescribeOptionGroups',
verb='POST',
path='/', params=params)
def describe_orderable_db_instance_options(self, engine,
engine_version=None,
db_instance_class=None,
license_model=None, vpc=None,
max_records=None, marker=None):
"""
Returns a list of orderable DB instance options for the
specified engine.
:type engine: string
:param engine: The name of the engine to retrieve DB instance options
for.
:type engine_version: string
:param engine_version: The engine version filter value. Specify this
parameter to show only the available offerings matching the
specified engine version.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type license_model: string
:param license_model: The license model filter value. Specify this
parameter to show only the available offerings matching the
specified license model.
:type vpc: boolean
:param vpc: The VPC filter value. Specify this parameter to show only
the available VPC or non-VPC offerings.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {'Engine': engine, }
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if license_model is not None:
params['LicenseModel'] = license_model
if vpc is not None:
params['Vpc'] = str(
vpc).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableDBInstanceOptions',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances(self, reserved_db_instance_id=None,
reserved_db_instances_offering_id=None,
db_instance_class=None, duration=None,
product_description=None,
offering_type=None, multi_az=None,
filters=None, max_records=None,
marker=None):
"""
Returns information about reserved DB instances for this
account, or about a specified reserved DB instance.
:type reserved_db_instance_id: string
:param reserved_db_instance_id: The reserved DB instance identifier
filter value. Specify this parameter to show only the reservation
that matches the specified reservation ID.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only purchased
reservations matching the specified offering identifier.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only those reservations matching the
specified DB instances class.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Specify this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value.
Specify this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only those reservations matching the specified Multi-AZ
parameter.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstances',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances_offerings(self,
reserved_db_instances_offering_id=None,
db_instance_class=None,
duration=None,
product_description=None,
offering_type=None,
multi_az=None,
max_records=None,
marker=None):
"""
Lists available reserved DB instance offerings.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only the available
offering that matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Specify this parameter to show only reservations for this duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: Product description filter value. Specify
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only the available offerings matching the specified Multi-AZ
parameter.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstancesOfferings',
verb='POST',
path='/', params=params)
def download_db_log_file_portion(self, db_instance_identifier,
log_file_name, marker=None,
number_of_lines=None):
"""
Downloads the last line of the specified log file.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type log_file_name: string
:param log_file_name: The name of the log file to be downloaded.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
:type number_of_lines: integer
:param number_of_lines: The number of lines remaining to be downloaded.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'LogFileName': log_file_name,
}
if marker is not None:
params['Marker'] = marker
if number_of_lines is not None:
params['NumberOfLines'] = number_of_lines
return self._make_request(
action='DownloadDBLogFilePortion',
verb='POST',
path='/', params=params)
def list_tags_for_resource(self, resource_name):
"""
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource with tags to be listed.
This value is an Amazon Resource Name (ARN). For information about
creating an ARN, see ` Constructing an RDS Amazon Resource Name
(ARN)`_.
"""
params = {'ResourceName': resource_name, }
return self._make_request(
action='ListTagsForResource',
verb='POST',
path='/', params=params)
def modify_db_instance(self, db_instance_identifier,
allocated_storage=None, db_instance_class=None,
db_security_groups=None,
vpc_security_group_ids=None,
apply_immediately=None, master_user_password=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None,
preferred_maintenance_window=None, multi_az=None,
engine_version=None,
allow_major_version_upgrade=None,
auto_minor_version_upgrade=None, iops=None,
option_group_name=None,
new_db_instance_identifier=None):
"""
Modify settings for a DB instance. You can change one or more
database configuration parameters by specifying these
parameters and the new values in the request.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This value is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type allocated_storage: integer
:param allocated_storage: The new storage capacity of the RDS instance.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
**MySQL**
Default: Uses existing setting
Valid Values: 5-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
**Oracle**
Default: Uses existing setting
Valid Values: 10-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
**SQL Server**
Cannot be modified.
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type db_instance_class: string
:param db_instance_class: The new compute and memory capacity of the DB
instance. To determine the instance classes that are available for
a particular DB engine, use the DescribeOrderableDBInstanceOptions
action.
Passing a value for this parameter causes an outage during the change
and is applied during the next maintenance window, unless the
`ApplyImmediately` parameter is specified as `True` for this
request.
Default: Uses existing setting
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type db_security_groups: list
:param db_security_groups:
A list of DB security groups to authorize on this DB instance. Changing
this parameter does not result in an outage and the change is
asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids:
A list of EC2 VPC security groups to authorize on this DB instance.
This change is asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type apply_immediately: boolean
:param apply_immediately: Specifies whether or not the modifications in
this request and any pending modifications are asynchronously
applied as soon as possible, regardless of the
`PreferredMaintenanceWindow` setting for the DB instance.
If this parameter is passed as `False`, changes to the DB instance are
applied on the next call to RebootDBInstance, the next maintenance
reboot, or the next failure reboot, whichever occurs first. See
each parameter to determine when a change is applied.
Default: `False`
:type master_user_password: string
:param master_user_password:
The new password for the DB instance master user. Can be any printable
ASCII character except "/", '"', or "@".
Changing this parameter does not result in an outage and the change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting
Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
alphanumeric characters (Oracle), or 8 to 128 alphanumeric
characters (SQL Server).
Amazon RDS API actions never return the password, so this action
provides a way to regain access to a master instance user if the
password is lost.
:type db_parameter_group_name: string
:param db_parameter_group_name: The name of the DB parameter group to
apply to this DB instance. Changing this parameter does not result
in an outage and the change is applied during the next maintenance
window unless the `ApplyImmediately` parameter is set to `True` for
this request.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter
group family as this DB instance.
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Changing this parameter can result in an outage if you change from 0 to
a non-zero value or from a non-zero value to 0. These changes are
applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
you change the parameter from one non-zero value to another non-
zero value, the change is asynchronously applied as soon as
possible.
Default: Uses existing setting
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas or if the DB instance is a read replica
:type preferred_backup_window: string
:param preferred_backup_window:
The daily time range during which automated backups are created if
automated backups are enabled, as determined by the
`BackupRetentionPeriod`. Changing this parameter does not result in
an outage and the change is asynchronously applied as soon as
possible.
Constraints:
+ Must be in the format hh24:mi-hh24:mi
+ Times should be Universal Time Coordinated (UTC)
+ Must not conflict with the preferred maintenance window
+ Must be at least 30 minutes
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, which may result in an
outage. Changing this parameter does not result in an outage,
except in the following situation, and the change is asynchronously
applied as soon as possible. If there are pending actions that
cause a reboot, and the maintenance window is changed to include
the current time, then changing this parameter will cause a reboot
of the DB instance. If moving this window to the current time,
there must be at least 30 minutes between the current time and end
of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
Constraints: Cannot be specified if the DB instance is a read replica.
:type engine_version: string
:param engine_version: The version number of the database engine to
upgrade to. Changing this parameter results in an outage and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
For major version upgrades, if a non-default DB parameter group is
currently in use, a new DB parameter group in the DB parameter
group family for the new engine version must be specified. The new
DB parameter group can be the default for that DB parameter group
family.
Example: `5.1.42`
:type allow_major_version_upgrade: boolean
:param allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an
outage and the change is asynchronously applied as soon as
possible.
Constraints: This parameter must be set to true when specifying a value
for the EngineVersion parameter that is a different major version
than the DB instance's current version.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window. Changing this parameter does not result in
an outage except in the following case and the change is
asynchronously applied as soon as possible. An outage will result
if this parameter is set to `True` during the maintenance window,
and a newer minor version is available, and RDS has enabled auto
patching for that engine version.
:type iops: integer
:param iops: The new Provisioned IOPS (I/O operations per second) value
for the RDS instance. Changing this parameter does not result in an
outage and the change is applied during the next maintenance window
unless the `ApplyImmediately` parameter is set to `True` for this
request.
Default: Uses existing setting
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group. Changing this parameter
does not result in an outage except in the following case and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
the parameter change results in an option group that enables OEM,
this change can cause a brief (sub-second) period during which new
connections are rejected but existing connections are not
interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type new_db_instance_identifier: string
:param new_db_instance_identifier:
The new DB instance identifier for the DB instance when renaming a DB
Instance. This value is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if allocated_storage is not None:
params['AllocatedStorage'] = allocated_storage
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if allow_major_version_upgrade is not None:
params['AllowMajorVersionUpgrade'] = str(
allow_major_version_upgrade).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if new_db_instance_identifier is not None:
params['NewDBInstanceIdentifier'] = new_db_instance_identifier
return self._make_request(
action='ModifyDBInstance',
verb='POST',
path='/', params=params)
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group. To modify
more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A
maximum of 20 parameters can be modified in a single request.
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type parameters: list
:param parameters:
An array of parameter names, values, and the apply method for the
parameter update. At least one parameter name, value, and apply
method must be supplied; subsequent arguments are optional. A
maximum of 20 parameters may be modified in a single request.
Valid Values (for the application method): `immediate | pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the pending-reboot value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ModifyDBParameterGroup',
verb='POST',
path='/', params=params)
def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
db_subnet_group_description=None):
"""
Modifies an existing DB subnet group. DB subnet groups must
contain at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the DB subnet group.
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if db_subnet_group_description is not None:
params['DBSubnetGroupDescription'] = db_subnet_group_description
return self._make_request(
action='ModifyDBSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
event_categories=None, enabled=None):
"""
Modifies an existing RDS event notification subscription. Note
that you cannot modify the source identifiers using this call;
to change source identifiers for a subscription, use the
AddSourceIdentifierToSubscription and
RemoveSourceIdentifierFromSubscription calls.
You can see a list of the event categories for a given
SourceType in the `Events`_ topic in the Amazon RDS User Guide
or by using the **DescribeEventCategories** action.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription.
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_option_group(self, option_group_name, options_to_include=None,
options_to_remove=None, apply_immediately=None):
"""
Modifies an existing option group.
:type option_group_name: string
:param option_group_name: The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type options_to_include: list
:param options_to_include: Options in this list are added to the option
group or, if already present, the specified configuration is used
to update the existing configuration.
:type options_to_remove: list
:param options_to_remove: Options in this list are removed from the
option group.
:type apply_immediately: boolean
:param apply_immediately: Indicates whether the changes should be
applied immediately, or during the next maintenance window for each
instance associated with the option group.
"""
params = {'OptionGroupName': option_group_name, }
if options_to_include is not None:
self.build_complex_list_params(
params, options_to_include,
'OptionsToInclude.member',
('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
if options_to_remove is not None:
self.build_list_params(params,
options_to_remove,
'OptionsToRemove.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
return self._make_request(
action='ModifyOptionGroup',
verb='POST',
path='/', params=params)
def promote_read_replica(self, db_instance_identifier,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promotes a read replica DB instance to a standalone DB
instance.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This value
is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing read replica DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: mydbinstance
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
return self._make_request(
action='PromoteReadReplica',
verb='POST',
path='/', params=params)
def purchase_reserved_db_instances_offering(self,
reserved_db_instances_offering_id,
reserved_db_instance_id=None,
db_instance_count=None,
tags=None):
"""
Purchases a reserved DB instance offering.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The ID of the Reserved DB
instance offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_db_instance_id: string
:param reserved_db_instance_id: Customer-specified identifier to track
this reservation.
Example: myreservationID
:type db_instance_count: integer
:param db_instance_count: The number of instances to reserve.
Default: `1`
:type tags: list
:param tags: A list of tags.
"""
params = {
'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if db_instance_count is not None:
params['DBInstanceCount'] = db_instance_count
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='PurchaseReservedDBInstancesOffering',
verb='POST',
path='/', params=params)
def reboot_db_instance(self, db_instance_identifier, force_failover=None):
"""
Rebooting a DB instance restarts the database engine service.
A reboot also applies to the DB instance any modifications to
the associated DB parameter group that were pending. Rebooting
a DB instance results in a momentary outage of the instance,
during which the DB instance status is set to rebooting. If
the RDS instance is configured for MultiAZ, it is possible
that the reboot will be conducted through a failover. An
Amazon RDS event is created when the reboot is completed.
If your DB instance is deployed in multiple Availability
Zones, you can force a failover from one AZ to the other
during the reboot. You might force a failover to test the
availability of your DB instance deployment or to restore
operations to the original AZ after a failover occurs.
The time required to reboot is a function of the specific
database engine's crash recovery process. To improve the
reboot time, we recommend that you reduce database activities
as much as possible during the reboot process to reduce
rollback activity for in-transit transactions.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type force_failover: boolean
:param force_failover: When `True`, the reboot will be conducted
through a MultiAZ failover.
Constraint: You cannot specify `True` if the instance is not configured
for MultiAZ.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if force_failover is not None:
params['ForceFailover'] = str(
force_failover).lower()
return self._make_request(
action='RebootDBInstance',
verb='POST',
path='/', params=params)
def remove_source_identifier_from_subscription(self, subscription_name,
source_identifier):
"""
Removes a source identifier from an existing RDS event
notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to remove a source identifier from.
:type source_identifier: string
:param source_identifier: The source identifier to be removed from the
subscription, such as the **DB instance identifier** for a DB
instance or the name of a security group.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='RemoveSourceIdentifierFromSubscription',
verb='POST',
path='/', params=params)
def remove_tags_from_resource(self, resource_name, tag_keys):
"""
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be removed
from. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tag_keys: list
:param tag_keys: The tag key (name) of the tag to be removed.
"""
params = {'ResourceName': resource_name, }
self.build_list_params(params,
tag_keys,
'TagKeys.member')
return self._make_request(
action='RemoveTagsFromResource',
verb='POST',
path='/', params=params)
def reset_db_parameter_group(self, db_parameter_group_name,
reset_all_parameters=None, parameters=None):
"""
Modifies the parameters of a DB parameter group to the
engine/system default value. To reset specific parameters
submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB parameter group, specify
the `DBParameterGroup` name and `ResetAllParameters`
parameters. When resetting the entire group, dynamic
parameters are updated immediately and static parameters are
set to `pending-reboot` to take effect on the next DB instance
restart or `RebootDBInstance` request.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type reset_all_parameters: boolean
:param reset_all_parameters: Specifies whether ( `True`) or not (
`False`) to reset all parameters in the DB parameter group to
default values.
Default: `True`
:type parameters: list
:param parameters: An array of parameter names, values, and the apply
method for the parameter update. At least one parameter name,
value, and apply method must be supplied; subsequent arguments are
optional. A maximum of 20 parameters may be modified in a single
request.
**MySQL**
Valid Values (for Apply method): `immediate` | `pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the `pending-reboot` value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
**Oracle**
Valid Values (for Apply method): `pending-reboot`
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ResetDBParameterGroup',
verb='POST',
path='/', params=params)
def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
db_snapshot_identifier,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Creates a new DB instance from a DB snapshot. The target
database is created from the source database restore point
with the same configuration as the original source database,
except that the new RDS instance is created with the default
security group.
:type db_instance_identifier: string
:param db_instance_identifier:
The identifier for the DB snapshot to restore from.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier: Name of the DB instance to create from
the DB snapshot. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type port: integer
:param port: The port number on which the database accepts connections.
Default: The same port as the original DB instance
Constraints: Value must be `1150-65535`
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter doesn't apply to the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: Specifies the amount of provisioned IOPS for the DB
instance, expressed in I/O operations per second. If this parameter
is not specified, the IOPS value will be taken from the backup. If
this parameter is set to 0, the new instance will be converted to a
non-PIOPS instance, which will take additional time, though your DB
instance will be available for connections before the conversion
starts.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'DBSnapshotIdentifier': db_snapshot_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceFromDBSnapshot',
verb='POST',
path='/', params=params)
def restore_db_instance_to_point_in_time(self,
source_db_instance_identifier,
target_db_instance_identifier,
restore_time=None,
use_latest_restorable_time=None,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Restores a DB instance to an arbitrary point-in-time. Users
can restore to any point in time before the
latestRestorableTime for up to backupRetentionPeriod days. The
target database is created from the source database with the
same configuration as the original database except that the DB
instance is created with the default DB security group.
:type source_db_instance_identifier: string
:param source_db_instance_identifier:
The identifier of the source DB instance from which to restore.
Constraints:
+ Must be the identifier of an existing database instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type target_db_instance_identifier: string
:param target_db_instance_identifier:
The name of the new database instance to be created.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type restore_time: timestamp
:param restore_time: The date and time to restore from.
Valid Values: Value must be a UTC time
Constraints:
+ Must be before the latest restorable time for the DB instance
+ Cannot be specified if UseLatestRestorableTime parameter is true
Example: `2009-09-07T23:45:00Z`
:type use_latest_restorable_time: boolean
:param use_latest_restorable_time: Specifies whether ( `True`) or not (
`False`) the DB instance is restored from the latest backup time.
Default: `False`
Constraints: Cannot be specified if RestoreTime parameter is provided.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
Default: The same DBInstanceClass as the original DB instance.
:type port: integer
:param port: The port number on which the database accepts connections.
Constraints: Value must be `1150-65535`
Default: The same port as the original DB instance.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to true.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter is not used for the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBInstanceIdentifier': source_db_instance_identifier,
'TargetDBInstanceIdentifier': target_db_instance_identifier,
}
if restore_time is not None:
params['RestoreTime'] = restore_time
if use_latest_restorable_time is not None:
params['UseLatestRestorableTime'] = str(
use_latest_restorable_time).lower()
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceToPointInTime',
verb='POST',
path='/', params=params)
def revoke_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Revokes ingress from a DBSecurityGroup for previously
authorized IP ranges or EC2 or VPC Security Groups. Required
parameters for this API are one of CIDRIP, EC2SecurityGroupId
for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId).
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
revoke ingress from.
:type cidrip: string
:param cidrip: The IP range to revoke access from. Must be a valid CIDR
range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
`EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: The id of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS Account Number of the owner
of the EC2 security group specified in the `EC2SecurityGroupName`
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| 1 | 10,841 | The convention in most of the codebase is to use `if isinstance(body, bytes):` instead. | boto-boto | py |
@@ -249,7 +249,9 @@ class ConsoleUI(wx.Frame):
if self.completionAmbiguous:
menu = wx.Menu()
for comp in completions:
- item = menu.Append(wx.ID_ANY, comp)
+ # Only show text after the last dot (so as to not keep repeting the class or module in the context menu)
+ label=comp.split('.')[-1]
+ item = menu.Append(wx.ID_ANY, label)
self.Bind(wx.EVT_MENU,
lambda evt, completion=comp: self._insertCompletion(original, completion),
item) | 1 | #pythonConsole.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2008-2013 NV Access Limited
import watchdog
"""Provides an interactive Python console which can be run from within NVDA.
To use, call L{initialize} to create a singleton instance of the console GUI. This can then be accessed externally as L{consoleUI}.
"""
import __builtin__
import os
import code
import sys
import pydoc
import re
import itertools
import rlcompleter
import wx
from baseObject import AutoPropertyObject
import speech
import queueHandler
import api
import gui
from logHandler import log
import braille
class HelpCommand(object):
"""
Emulation of the 'help' command found in the Python interactive shell.
"""
_reprMessage=_("Type help(object) to get help about object.")
def __repr__(self):
return self._reprMessage
def __call__(self,*args,**kwargs):
return pydoc.help(*args,**kwargs)
class ExitConsoleCommand(object):
"""
An object that can be used as an exit command that can close the console or print a friendly message for its repr.
"""
def __init__(self, exitFunc):
self._exitFunc = exitFunc
_reprMessage=_("Type exit() to exit the console")
def __repr__(self):
return self._reprMessage
def __call__(self):
self._exitFunc()
#: The singleton Python console UI instance.
consoleUI = None
class Completer(rlcompleter.Completer):
def _callable_postfix(self, val, word):
# Just because something is callable doesn't always mean we want to call it.
return word
class PythonConsole(code.InteractiveConsole, AutoPropertyObject):
"""An interactive Python console for NVDA which directs output to supplied functions.
This is necessary for a Python console with input/output other than stdin/stdout/stderr.
Input is always received via the L{push} method.
This console handles redirection of stdout and stderr and prevents clobbering of the gettext "_" builtin.
The console's namespace is populated with useful modules
and can be updated with a snapshot of NVDA's state using L{updateNamespaceSnapshotVars}.
"""
def __init__(self, outputFunc, setPromptFunc, exitFunc, echoFunc=None, **kwargs):
self._output = outputFunc
self._echo = echoFunc
self._setPrompt = setPromptFunc
#: The namespace available to the console. This can be updated externally.
#: @type: dict
# Populate with useful modules.
exitCmd = ExitConsoleCommand(exitFunc)
self.namespace = {
"help": HelpCommand(),
"exit": exitCmd,
"quit": exitCmd,
"sys": sys,
"os": os,
"wx": wx,
"log": log,
"api": api,
"queueHandler": queueHandler,
"speech": speech,
"braille": braille,
}
#: The variables last added to the namespace containing a snapshot of NVDA's state.
#: @type: dict
self._namespaceSnapshotVars = None
# Can't use super here because stupid code.InteractiveConsole doesn't sub-class object. Grrr!
code.InteractiveConsole.__init__(self, locals=self.namespace, **kwargs)
self.prompt = ">>>"
def _set_prompt(self, prompt):
self._prompt = prompt
self._setPrompt(prompt)
def _get_prompt(self):
return self._prompt
def write(self, data):
self._output(data)
def push(self, line):
if self._echo:
self._echo("%s %s\n" % (self.prompt, line))
# Capture stdout/stderr output as well as code interaction.
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = sys.stderr = self
# Prevent this from messing with the gettext "_" builtin.
saved_ = __builtin__._
more = code.InteractiveConsole.push(self, line)
sys.stdout, sys.stderr = stdout, stderr
__builtin__._ = saved_
self.prompt = "..." if more else ">>>"
return more
def updateNamespaceSnapshotVars(self):
"""Update the console namespace with a snapshot of NVDA's current state.
This creates/updates variables for the current focus, navigator object, etc.
"""
self._namespaceSnapshotVars = {
"focus": api.getFocusObject(),
# Copy the focus ancestor list, as it gets mutated once it is replaced in api.setFocusObject.
"focusAnc": list(api.getFocusAncestors()),
"fdl": api.getFocusDifferenceLevel(),
"fg": api.getForegroundObject(),
"nav": api.getNavigatorObject(),
"review":api.getReviewPosition(),
"mouse": api.getMouseObject(),
"brlRegions": braille.handler.buffer.regions,
}
self.namespace.update(self._namespaceSnapshotVars)
def removeNamespaceSnapshotVars(self):
"""Remove the variables from the console namespace containing the last snapshot of NVDA's state.
This removes the variables added by L{updateNamespaceSnapshotVars}.
"""
if not self._namespaceSnapshotVars:
return
for key in self._namespaceSnapshotVars:
try:
del self.namespace[key]
except KeyError:
pass
self._namespaceSnapshotVars = None
class ConsoleUI(wx.Frame):
"""The NVDA Python console GUI.
"""
def __init__(self, parent):
super(ConsoleUI, self).__init__(parent, wx.ID_ANY, _("NVDA Python Console"))
self.Bind(wx.EVT_ACTIVATE, self.onActivate)
self.Bind(wx.EVT_CLOSE, self.onClose)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.outputCtrl = wx.TextCtrl(self, wx.ID_ANY, size=(500, 500), style=wx.TE_MULTILINE | wx.TE_READONLY|wx.TE_RICH)
self.outputCtrl.Bind(wx.EVT_KEY_DOWN, self.onOutputKeyDown)
self.outputCtrl.Bind(wx.EVT_CHAR, self.onOutputChar)
mainSizer.Add(self.outputCtrl, proportion=2, flag=wx.EXPAND)
inputSizer = wx.BoxSizer(wx.HORIZONTAL)
self.promptLabel = wx.StaticText(self, wx.ID_ANY)
inputSizer.Add(self.promptLabel, flag=wx.EXPAND)
self.inputCtrl = wx.TextCtrl(self, wx.ID_ANY, style=wx.TE_DONTWRAP | wx.TE_PROCESS_TAB)
self.inputCtrl.Bind(wx.EVT_CHAR, self.onInputChar)
inputSizer.Add(self.inputCtrl, proportion=1, flag=wx.EXPAND)
mainSizer.Add(inputSizer, proportion=1, flag=wx.EXPAND)
self.SetSizer(mainSizer)
mainSizer.Fit(self)
self.console = PythonConsole(outputFunc=self.output, echoFunc=self.echo, setPromptFunc=self.setPrompt, exitFunc=self.Close)
self.completer = Completer(namespace=self.console.namespace)
self.completionAmbiguous = False
# Even the most recent line has a position in the history, so initialise with one blank line.
self.inputHistory = [""]
self.inputHistoryPos = 0
def onActivate(self, evt):
if evt.GetActive():
self.inputCtrl.SetFocus()
evt.Skip()
def onClose(self, evt):
self.Hide()
self.console.removeNamespaceSnapshotVars()
def output(self, data):
self.outputCtrl.write(data)
if data and not data.isspace():
queueHandler.queueFunction(queueHandler.eventQueue, speech.speakText, data)
def echo(self, data):
self.outputCtrl.write(data)
def setPrompt(self, prompt):
self.promptLabel.SetLabel(prompt)
queueHandler.queueFunction(queueHandler.eventQueue, speech.speakText, prompt)
def execute(self):
data = self.inputCtrl.GetValue()
watchdog.alive()
self.console.push(data)
watchdog.asleep()
if data:
# Only add non-blank lines to history.
if len(self.inputHistory) > 1 and self.inputHistory[-2] == data:
# The previous line was the same and we don't want consecutive duplicates, so trash the most recent line.
del self.inputHistory[-1]
else:
# Update the content for the most recent line of history.
self.inputHistory[-1] = data
# Start with a new, blank line.
self.inputHistory.append("")
self.inputHistoryPos = len(self.inputHistory) - 1
self.inputCtrl.ChangeValue("")
def historyMove(self, movement):
newIndex = self.inputHistoryPos + movement
if not (0 <= newIndex < len(self.inputHistory)):
# No more lines in this direction.
return False
# Update the content of the history at the current position.
self.inputHistory[self.inputHistoryPos] = self.inputCtrl.GetValue()
self.inputHistoryPos = newIndex
self.inputCtrl.ChangeValue(self.inputHistory[newIndex])
self.inputCtrl.SetInsertionPointEnd()
return True
RE_COMPLETE_UNIT = re.compile(r"[\w.]*$")
def complete(self):
try:
original = self.RE_COMPLETE_UNIT.search(self.inputCtrl.GetValue()).group(0)
except AttributeError:
return False
completions = list(self._getCompletions(original))
if self.completionAmbiguous:
menu = wx.Menu()
for comp in completions:
item = menu.Append(wx.ID_ANY, comp)
self.Bind(wx.EVT_MENU,
lambda evt, completion=comp: self._insertCompletion(original, completion),
item)
self.PopupMenu(menu)
menu.Destroy()
return True
self.completionAmbiguous = len(completions) > 1
completed = self._findBestCompletion(original, completions)
if not completed:
return False
self._insertCompletion(original, completed)
return not self.completionAmbiguous
def _getCompletions(self, original):
for state in itertools.count():
completion = self.completer.complete(original, state)
if not completion:
break
yield completion
def _findBestCompletion(self, original, completions):
if not completions:
return None
if len(completions) == 1:
return completions[0]
# Find the longest completion.
longestComp = None
longestCompLen = 0
for comp in completions:
compLen = len(comp)
if compLen > longestCompLen:
longestComp = comp
longestCompLen = compLen
# Find the longest common prefix.
for prefixLen in xrange(longestCompLen, 0, -1):
prefix = comp[:prefixLen]
for comp in completions:
if not comp.startswith(prefix):
break
else:
# This prefix is common to all completions.
if prefix == original:
# We didn't actually complete anything.
return None
return prefix
return None
def _insertCompletion(self, original, completed):
self.completionAmbiguous = False
insert = completed[len(original):]
if not insert:
return
self.inputCtrl.SetValue(self.inputCtrl.GetValue() + insert)
queueHandler.queueFunction(queueHandler.eventQueue, speech.speakText, insert)
self.inputCtrl.SetInsertionPointEnd()
def onInputChar(self, evt):
key = evt.GetKeyCode()
if key == wx.WXK_TAB:
line = self.inputCtrl.GetValue()
if line and not line.isspace():
if not self.complete():
wx.Bell()
return
# This is something other than autocompletion, so reset autocompletion state.
self.completionAmbiguous = False
if key == wx.WXK_RETURN:
self.execute()
return
elif key in (wx.WXK_UP, wx.WXK_DOWN):
if self.historyMove(-1 if key == wx.WXK_UP else 1):
return
elif key == wx.WXK_F6:
self.outputCtrl.SetFocus()
return
elif key == wx.WXK_ESCAPE:
self.Close()
return
evt.Skip()
def onOutputKeyDown(self, evt):
key = evt.GetKeyCode()
# #3763: WX 3 no longer passes escape to evt_char for richEdit fields, therefore evt_key_down is used.
if key == wx.WXK_ESCAPE:
self.Close()
return
evt.Skip()
def onOutputChar(self, evt):
key = evt.GetKeyCode()
if key == wx.WXK_F6:
self.inputCtrl.SetFocus()
return
evt.Skip()
def initialize():
"""Initialize the NVDA Python console GUI.
This creates a singleton instance of the console GUI. This is accessible as L{consoleUI}. This may be manipulated externally.
"""
global consoleUI
consoleUI = ConsoleUI(gui.mainFrame)
def activate():
"""Activate the console GUI.
This shows the GUI and brings it to the foreground if possible.
@precondition: L{initialize} has been called.
"""
global consoleUI
consoleUI.Raise()
# There is a MAXIMIZE style which can be used on the frame at construction, but it doesn't seem to work the first time it is shown,
# probably because it was in the background.
# Therefore, explicitly maximise it here.
# This also ensures that it will be maximized whenever it is activated, even if the user restored/minimised it.
consoleUI.Maximize()
consoleUI.Show()
| 1 | 19,371 | I think this would be better as: `label = comp.rsplit('.', 1)[-1]` | nvaccess-nvda | py |
@@ -57,8 +57,8 @@ public class RubyProvider {
}
@SuppressWarnings("unchecked")
- public <Element> GeneratedResult generate(
- Element element, SnippetDescriptor snippetDescriptor, RubyGapicContext context) {
+ public <Element, ContextT> GeneratedResult generate(
+ Element element, SnippetDescriptor snippetDescriptor, ContextT context) {
ImmutableMap<String, Object> globalMap =
ImmutableMap.<String, Object>builder().put("context", context).build();
RubySnippetSet<Element> snippets = | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.ruby;
import com.google.api.codegen.GeneratedResult;
import com.google.api.codegen.SnippetDescriptor;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.snippet.Doc;
import com.google.api.tools.framework.snippet.SnippetSet;
import com.google.api.tools.framework.tools.ToolUtil;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multimap;
import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* A RubyProvider provides general Ruby code generation logic.
*/
public class RubyProvider {
/**
* The path to the root of snippet resources.
*/
static final String SNIPPET_RESOURCE_ROOT =
RubyGapicProvider.class.getPackage().getName().replace('.', '/');
public <Element> void output(
String packageRoot, String outputPath, Multimap<Element, GeneratedResult> elements)
throws IOException {
Map<String, Doc> files = new LinkedHashMap<>();
for (Map.Entry<Element, GeneratedResult> entry : elements.entries()) {
Element element = entry.getKey();
GeneratedResult generatedResult = entry.getValue();
String root;
if (element instanceof Method) {
root = ((Method) element).getFile().getFullName().replace('.', '/');
} else {
root = packageRoot;
}
files.put(root + "/" + generatedResult.getFilename(), generatedResult.getDoc());
}
ToolUtil.writeFiles(files, outputPath);
}
@SuppressWarnings("unchecked")
public <Element> GeneratedResult generate(
Element element, SnippetDescriptor snippetDescriptor, RubyGapicContext context) {
ImmutableMap<String, Object> globalMap =
ImmutableMap.<String, Object>builder().put("context", context).build();
RubySnippetSet<Element> snippets =
SnippetSet.createSnippetInterface(
RubySnippetSet.class,
SNIPPET_RESOURCE_ROOT,
snippetDescriptor.getSnippetInputName(),
globalMap);
Doc filenameDoc = snippets.generateFilename(element);
String outputFilename = filenameDoc.prettyPrint();
Doc result = snippets.generateClass(element);
return GeneratedResult.create(result, outputFilename);
}
}
| 1 | 14,953 | We have been creating a language-level context interface for each language instead of parameterizing the generate function. | googleapis-gapic-generator | java |
@@ -21,6 +21,6 @@ class UuidGenerator implements GeneratorInterface
{
$id = (string) $media->getId();
- return sprintf('%s/%04s/%02s', $media->getContext(), substr($id, 0, 4), substr($id, 4, 2));
+ return sprintf('%s/%04s/%02s', $media->getContext() ?? '', substr($id, 0, 4), substr($id, 4, 2));
}
} | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Generator;
use Sonata\MediaBundle\Model\MediaInterface;
class UuidGenerator implements GeneratorInterface
{
public function generatePath(MediaInterface $media): string
{
$id = (string) $media->getId();
return sprintf('%s/%04s/%02s', $media->getContext(), substr($id, 0, 4), substr($id, 4, 2));
}
}
| 1 | 12,403 | Same thing about context here. | sonata-project-SonataMediaBundle | php |
@@ -634,9 +634,16 @@ func (handler *workflowTaskHandlerImpl) handleCommandContinueAsNewWorkflow(
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil)
}
+ namespaceEntry, err := handler.namespaceCache.GetNamespaceByID(handler.mutableState.GetExecutionInfo().NamespaceId)
+ if err != nil {
+ return err
+ }
+ namespace := namespaceEntry.GetInfo().Name
+
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateContinueAsNewWorkflowExecutionAttributes(
+ namespace,
attr,
handler.mutableState.GetExecutionInfo(),
) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"fmt"
"time"
"github.com/pborman/uuid"
commandpb "go.temporal.io/api/command/v1"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
failurepb "go.temporal.io/api/failure/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/enums"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/payloads"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/service/history/configs"
)
type (
commandAttrValidationFn func() error
workflowTaskHandlerImpl struct {
identity string
workflowTaskCompletedID int64
// internal state
hasBufferedEvents bool
workflowTaskFailedCause *workflowTaskFailedCause
activityNotStartedCancelled bool
continueAsNewBuilder mutableState
stopProcessing bool // should stop processing any more commands
mutableState mutableState
initiatedChildExecutionsInBatch map[string]struct{} // Set of initiated child executions in the workflow task
// validation
attrValidator *commandAttrValidator
sizeLimitChecker *workflowSizeChecker
logger log.Logger
namespaceCache cache.NamespaceCache
metricsClient metrics.Client
config *configs.Config
}
workflowTaskFailedCause struct {
failedCause enumspb.WorkflowTaskFailedCause
causeErr error
}
)
func newWorkflowTaskHandler(
identity string,
workflowTaskCompletedID int64,
mutableState mutableState,
attrValidator *commandAttrValidator,
sizeLimitChecker *workflowSizeChecker,
logger log.Logger,
namespaceCache cache.NamespaceCache,
metricsClient metrics.Client,
config *configs.Config,
) *workflowTaskHandlerImpl {
return &workflowTaskHandlerImpl{
identity: identity,
workflowTaskCompletedID: workflowTaskCompletedID,
// internal state
hasBufferedEvents: mutableState.HasBufferedEvents(),
workflowTaskFailedCause: nil,
activityNotStartedCancelled: false,
continueAsNewBuilder: nil,
stopProcessing: false,
mutableState: mutableState,
initiatedChildExecutionsInBatch: make(map[string]struct{}),
// validation
attrValidator: attrValidator,
sizeLimitChecker: sizeLimitChecker,
logger: logger,
namespaceCache: namespaceCache,
metricsClient: metricsClient,
config: config,
}
}
func (handler *workflowTaskHandlerImpl) handleCommands(
commands []*commandpb.Command,
) error {
for _, command := range commands {
err := handler.handleCommand(command)
if err != nil || handler.stopProcessing {
return err
}
}
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommand(command *commandpb.Command) error {
switch command.GetCommandType() {
case enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK:
return handler.handleCommandScheduleActivity(command.GetScheduleActivityTaskCommandAttributes())
case enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION:
return handler.handleCommandCompleteWorkflow(command.GetCompleteWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION:
return handler.handleCommandFailWorkflow(command.GetFailWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION:
return handler.handleCommandCancelWorkflow(command.GetCancelWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_START_TIMER:
return handler.handleCommandStartTimer(command.GetStartTimerCommandAttributes())
case enumspb.COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK:
return handler.handleCommandRequestCancelActivity(command.GetRequestCancelActivityTaskCommandAttributes())
case enumspb.COMMAND_TYPE_CANCEL_TIMER:
return handler.handleCommandCancelTimer(command.GetCancelTimerCommandAttributes())
case enumspb.COMMAND_TYPE_RECORD_MARKER:
return handler.handleCommandRecordMarker(command.GetRecordMarkerCommandAttributes())
case enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION:
return handler.handleCommandRequestCancelExternalWorkflow(command.GetRequestCancelExternalWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION:
return handler.handleCommandSignalExternalWorkflow(command.GetSignalExternalWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION:
return handler.handleCommandContinueAsNewWorkflow(command.GetContinueAsNewWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION:
return handler.handleCommandStartChildWorkflow(command.GetStartChildWorkflowExecutionCommandAttributes())
case enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES:
return handler.handleCommandUpsertWorkflowSearchAttributes(command.GetUpsertWorkflowSearchAttributesCommandAttributes())
default:
return serviceerror.NewInvalidArgument(fmt.Sprintf("Unknown command type: %v", command.GetCommandType()))
}
}
func (handler *workflowTaskHandlerImpl) handleCommandScheduleActivity(
attr *commandpb.ScheduleActivityTaskCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeScheduleActivityCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceId
targetNamespaceID := namespaceID
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to schedule activity across namespace %v.", attr.GetNamespace()))
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateActivityScheduleAttributes(
namespaceID,
targetNamespaceID,
attr,
timestamp.DurationValue(executionInfo.WorkflowRunTimeout),
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK.String()),
attr.GetInput().Size(),
"ScheduleActivityTaskCommandAttributes.Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
enums.SetDefaultTaskQueueKind(&attr.GetTaskQueue().Kind)
_, _, err = handler.mutableState.AddActivityTaskScheduledEvent(handler.workflowTaskCompletedID, attr)
if err != nil {
if _, ok := err.(*serviceerror.InvalidArgument); ok {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_SCHEDULE_ACTIVITY_DUPLICATE_ID, err)
}
return err
}
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelActivity(
attr *commandpb.RequestCancelActivityTaskCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelActivityCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateActivityCancelAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
scheduleID := attr.GetScheduledEventId()
actCancelReqEvent, ai, err := handler.mutableState.AddActivityTaskCancelRequestedEvent(
handler.workflowTaskCompletedID,
scheduleID,
handler.identity,
)
if err != nil {
if _, ok := err.(*serviceerror.InvalidArgument); ok {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES, err)
}
return err
}
if ai != nil {
// If ai is nil, the activity has already been canceled/completed/timedout. The cancel request
// will be recorded in the history, but no further action will be taken.
if ai.StartedId == common.EmptyEventID {
// We haven't started the activity yet, we can cancel the activity right away and
// schedule a workflow task to ensure the workflow makes progress.
_, err = handler.mutableState.AddActivityTaskCanceledEvent(
ai.ScheduleId,
ai.StartedId,
actCancelReqEvent.GetEventId(),
payloads.EncodeString(activityCancellationMsgActivityNotStarted),
handler.identity,
)
if err != nil {
return err
}
handler.activityNotStartedCancelled = true
}
}
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommandStartTimer(
attr *commandpb.StartTimerCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeStartTimerCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateTimerScheduleAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_TIMER_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
_, _, err := handler.mutableState.AddTimerStartedEvent(handler.workflowTaskCompletedID, attr)
if err != nil {
if _, ok := err.(*serviceerror.InvalidArgument); ok {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_START_TIMER_DUPLICATE_ID, err)
}
return err
}
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommandCompleteWorkflow(
attr *commandpb.CompleteWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCompleteWorkflowCounter,
)
if handler.hasBufferedEvents {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil)
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateCompleteWorkflowExecutionAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION.String()),
attr.GetResult().Size(),
"CompleteWorkflowExecutionCommandAttributes.Result exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
// check if this is a cron workflow
cronBackoff, err := handler.mutableState.GetCronBackoffDuration()
if err != nil {
handler.stopProcessing = true
return err
}
if cronBackoff == backoff.NoBackoff {
// not cron, so complete this workflow execution
if _, err := handler.mutableState.AddCompletedWorkflowEvent(handler.workflowTaskCompletedID, attr); err != nil {
return serviceerror.NewInternal("Unable to add complete workflow event.")
}
return nil
}
// this is a cron workflow
startEvent, err := handler.mutableState.GetStartEvent()
if err != nil {
return err
}
startAttributes := startEvent.GetWorkflowExecutionStartedEventAttributes()
return handler.retryCronContinueAsNew(
startAttributes,
cronBackoff,
enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE,
nil,
attr.Result,
)
}
func (handler *workflowTaskHandlerImpl) handleCommandFailWorkflow(
attr *commandpb.FailWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeFailWorkflowCounter,
)
if handler.hasBufferedEvents {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil)
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateFailWorkflowExecutionAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION.String()),
attr.GetFailure().Size(),
"FailWorkflowExecutionCommandAttributes.Failure exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
// below will check whether to do continue as new based on backoff & backoff or cron
backoffInterval, retryState := handler.mutableState.GetRetryBackoffDuration(attr.GetFailure())
continueAsNewInitiator := enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY
// first check the backoff retry
if backoffInterval == backoff.NoBackoff {
// if no backoff retry, set the backoffInterval using cron schedule
backoffInterval, err = handler.mutableState.GetCronBackoffDuration()
if err != nil {
handler.stopProcessing = true
return err
}
continueAsNewInitiator = enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE
}
// second check the backoff / cron schedule
if backoffInterval == backoff.NoBackoff {
// no retry or cron
if _, err := handler.mutableState.AddFailWorkflowEvent(handler.workflowTaskCompletedID, retryState, attr); err != nil {
return err
}
return nil
}
// this is a cron / backoff workflow
startEvent, err := handler.mutableState.GetStartEvent()
if err != nil {
return err
}
startAttributes := startEvent.GetWorkflowExecutionStartedEventAttributes()
return handler.retryCronContinueAsNew(
startAttributes,
backoffInterval,
continueAsNewInitiator,
attr.GetFailure(),
startAttributes.LastCompletionResult,
)
}
func (handler *workflowTaskHandlerImpl) handleCommandCancelTimer(
attr *commandpb.CancelTimerCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelTimerCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateTimerCancelAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
_, err := handler.mutableState.AddTimerCanceledEvent(
handler.workflowTaskCompletedID,
attr,
handler.identity)
if err != nil {
if _, ok := err.(*serviceerror.InvalidArgument); ok {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES, err)
}
return err
}
// timer deletion is a success, we may have deleted a fired timer in
// which case we should reset hasBufferedEvents
// TODO deletion of timer fired event refreshing hasBufferedEvents
// is not entirely correct, since during these commands processing, new event may appear
handler.hasBufferedEvents = handler.mutableState.HasBufferedEvents()
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommandCancelWorkflow(
attr *commandpb.CancelWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelWorkflowCounter)
if handler.hasBufferedEvents {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil)
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateCancelWorkflowExecutionAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
_, err := handler.mutableState.AddWorkflowExecutionCanceledEvent(handler.workflowTaskCompletedID, attr)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandRequestCancelExternalWorkflow(
attr *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeCancelExternalWorkflowCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceId
targetNamespaceID := namespaceID
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to cancel workflow across namespace: %v.", attr.GetNamespace()))
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateCancelExternalWorkflowExecutionAttributes(
namespaceID,
targetNamespaceID,
handler.initiatedChildExecutionsInBatch,
attr,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
cancelRequestID := uuid.New()
_, _, err := handler.mutableState.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(
handler.workflowTaskCompletedID, cancelRequestID, attr,
)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandRecordMarker(
attr *commandpb.RecordMarkerCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeRecordMarkerCounter,
)
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateRecordMarkerAttributes(attr)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_RECORD_MARKER.String()),
common.GetPayloadsMapSize(attr.GetDetails()),
"RecordMarkerCommandAttributes.Details exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
_, err = handler.mutableState.AddRecordMarkerEvent(handler.workflowTaskCompletedID, attr)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandContinueAsNewWorkflow(
attr *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeContinueAsNewCounter,
)
if handler.hasBufferedEvents {
return handler.failCommand(enumspb.WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, nil)
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateContinueAsNewWorkflowExecutionAttributes(
attr,
handler.mutableState.GetExecutionInfo(),
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION.String()),
attr.GetInput().Size(),
"ContinueAsNewWorkflowExecutionCommandAttributes. Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
// If the workflow task has more than one completion event than just pick the first one
if !handler.mutableState.IsWorkflowExecutionRunning() {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.MultipleCompletionCommandsCounter,
)
handler.logger.Warn(
"Multiple completion commands",
tag.WorkflowCommandType(enumspb.COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION),
tag.ErrorTypeMultipleCompletionCommands,
)
return nil
}
// Extract parentNamespace so it can be passed down to next run of workflow execution
var parentNamespace string
if handler.mutableState.HasParentExecution() {
parentNamespaceID := handler.mutableState.GetExecutionInfo().ParentNamespaceId
parentNamespaceEntry, err := handler.namespaceCache.GetNamespaceByID(parentNamespaceID)
if err != nil {
return err
}
parentNamespace = parentNamespaceEntry.GetInfo().Name
}
_, newStateBuilder, err := handler.mutableState.AddContinueAsNewEvent(
handler.workflowTaskCompletedID,
handler.workflowTaskCompletedID,
parentNamespace,
attr,
)
if err != nil {
return err
}
handler.continueAsNewBuilder = newStateBuilder
return nil
}
func (handler *workflowTaskHandlerImpl) handleCommandStartChildWorkflow(
attr *commandpb.StartChildWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeChildWorkflowCounter,
)
parentNamespaceEntry := handler.mutableState.GetNamespaceEntry()
parentNamespaceID := parentNamespaceEntry.GetInfo().Id
parentNamespace := parentNamespaceEntry.GetInfo().Name
targetNamespaceID := parentNamespaceID
targetNamespace := parentNamespace
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to schedule child execution across namespace %v.", attr.GetNamespace()))
}
targetNamespace = targetNamespaceEntry.GetInfo().Name
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
} else {
attr.Namespace = parentNamespace
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateStartChildExecutionAttributes(
parentNamespaceID,
targetNamespaceID,
targetNamespace,
attr,
handler.mutableState.GetExecutionInfo(),
handler.config.DefaultWorkflowTaskTimeout,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION.String()),
attr.GetInput().Size(),
"StartChildWorkflowExecutionCommandAttributes.Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
enabled := handler.config.EnableParentClosePolicy(parentNamespace)
if enabled {
enums.SetDefaultParentClosePolicy(&attr.ParentClosePolicy)
} else {
attr.ParentClosePolicy = enumspb.PARENT_CLOSE_POLICY_ABANDON
}
enums.SetDefaultWorkflowIdReusePolicy(&attr.WorkflowIdReusePolicy)
requestID := uuid.New()
_, _, err = handler.mutableState.AddStartChildWorkflowExecutionInitiatedEvent(
handler.workflowTaskCompletedID, requestID, attr,
)
if err == nil {
// Keep track of all child initiated commands in this workflow task to validate request cancel commands
handler.initiatedChildExecutionsInBatch[attr.GetWorkflowId()] = struct{}{}
}
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandSignalExternalWorkflow(
attr *commandpb.SignalExternalWorkflowExecutionCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeSignalExternalWorkflowCounter,
)
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceId
targetNamespaceID := namespaceID
if attr.GetNamespace() != "" {
targetNamespaceEntry, err := handler.namespaceCache.GetNamespace(attr.GetNamespace())
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to signal workflow across namespace: %v.", attr.GetNamespace()))
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateSignalExternalWorkflowExecutionAttributes(
namespaceID,
targetNamespaceID,
attr,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION.String()),
attr.GetInput().Size(),
"SignalExternalWorkflowExecutionCommandAttributes.Input exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
signalRequestID := uuid.New() // for deduplicate
_, _, err = handler.mutableState.AddSignalExternalWorkflowExecutionInitiatedEvent(
handler.workflowTaskCompletedID, signalRequestID, attr,
)
return err
}
func (handler *workflowTaskHandlerImpl) handleCommandUpsertWorkflowSearchAttributes(
attr *commandpb.UpsertWorkflowSearchAttributesCommandAttributes,
) error {
handler.metricsClient.IncCounter(
metrics.HistoryRespondWorkflowTaskCompletedScope,
metrics.CommandTypeUpsertWorkflowSearchAttributesCounter,
)
// get namespace name
executionInfo := handler.mutableState.GetExecutionInfo()
namespaceID := executionInfo.NamespaceId
namespaceEntry, err := handler.namespaceCache.GetNamespaceByID(namespaceID)
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("Unable to get namespace for namespaceID: %v.", namespaceID))
}
namespace := namespaceEntry.GetInfo().Name
// valid search attributes for upsert
if err := handler.validateCommandAttr(
func() error {
return handler.attrValidator.validateUpsertWorkflowSearchAttributes(
namespace,
attr,
)
},
enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES,
); err != nil || handler.stopProcessing {
return err
}
// blob size limit check
failWorkflow, err := handler.sizeLimitChecker.failWorkflowIfPayloadSizeExceedsLimit(
metrics.CommandTypeTag(enumspb.COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES.String()),
searchAttributesSize(attr.GetSearchAttributes().GetIndexedFields()),
"UpsertWorkflowSearchAttributesCommandAttributes exceeds size limit.",
)
if err != nil || failWorkflow {
handler.stopProcessing = true
return err
}
_, err = handler.mutableState.AddUpsertWorkflowSearchAttributesEvent(
handler.workflowTaskCompletedID, attr,
)
return err
}
func searchAttributesSize(fields map[string]*commonpb.Payload) int {
result := 0
for k, v := range fields {
result += len(k)
result += len(v.GetData())
}
return result
}
func (handler *workflowTaskHandlerImpl) retryCronContinueAsNew(
attr *historypb.WorkflowExecutionStartedEventAttributes,
backoffInterval time.Duration,
continueAsNewInitiator enumspb.ContinueAsNewInitiator,
failure *failurepb.Failure,
lastCompletionResult *commonpb.Payloads,
) error {
continueAsNewAttributes := &commandpb.ContinueAsNewWorkflowExecutionCommandAttributes{
WorkflowType: attr.WorkflowType,
TaskQueue: attr.TaskQueue,
RetryPolicy: attr.RetryPolicy,
Input: attr.Input,
WorkflowRunTimeout: attr.WorkflowRunTimeout,
WorkflowTaskTimeout: attr.WorkflowTaskTimeout,
CronSchedule: attr.CronSchedule,
BackoffStartInterval: &backoffInterval,
Initiator: continueAsNewInitiator,
Failure: failure,
LastCompletionResult: lastCompletionResult,
Header: attr.Header,
Memo: attr.Memo,
SearchAttributes: attr.SearchAttributes,
}
_, newStateBuilder, err := handler.mutableState.AddContinueAsNewEvent(
handler.workflowTaskCompletedID,
handler.workflowTaskCompletedID,
attr.GetParentWorkflowNamespace(),
continueAsNewAttributes,
)
if err != nil {
return err
}
handler.continueAsNewBuilder = newStateBuilder
return nil
}
func (handler *workflowTaskHandlerImpl) validateCommandAttr(
validationFn commandAttrValidationFn,
failedCause enumspb.WorkflowTaskFailedCause,
) error {
if err := validationFn(); err != nil {
if _, ok := err.(*serviceerror.InvalidArgument); ok {
return handler.failCommand(failedCause, err)
}
return err
}
return nil
}
func (handler *workflowTaskHandlerImpl) failCommand(
failedCause enumspb.WorkflowTaskFailedCause,
causeErr error,
) error {
handler.workflowTaskFailedCause = NewWorkflowTaskFailedCause(failedCause, causeErr)
handler.stopProcessing = true
return nil
}
func NewWorkflowTaskFailedCause(failedCause enumspb.WorkflowTaskFailedCause, causeErr error) *workflowTaskFailedCause {
return &workflowTaskFailedCause{
failedCause: failedCause,
causeErr: causeErr,
}
}
func (wtfc *workflowTaskFailedCause) Message() string {
if wtfc.causeErr == nil {
return wtfc.failedCause.String()
}
return fmt.Sprintf("%v: %v", wtfc.failedCause, wtfc.causeErr.Error())
}
| 1 | 11,524 | there is a function to get namespace entry from mutable state directly | temporalio-temporal | go |
@@ -191,7 +191,7 @@ module Bolt
hiera_config: @hiera_config,
plan_vars: plan_vars,
# This data isn't available on the target config hash
- config: @inventory.config.transport_data_get
+ config: @inventory.transport_data_get
}
description = options[:description] || 'apply catalog' | 1 | # frozen_string_literal: true
require 'base64'
require 'bolt/apply_result'
require 'bolt/apply_target'
require 'bolt/config'
require 'bolt/error'
require 'bolt/task'
require 'bolt/util/puppet_log_level'
require 'find'
require 'json'
require 'logging'
require 'open3'
module Bolt
class Applicator
def initialize(inventory, executor, modulepath, plugin_dirs, pdb_client, hiera_config, max_compiles, apply_settings)
# lazy-load expensive gem code
require 'concurrent'
@inventory = inventory
@executor = executor
@modulepath = modulepath
@plugin_dirs = plugin_dirs
@pdb_client = pdb_client
@hiera_config = hiera_config ? validate_hiera_config(hiera_config) : nil
@apply_settings = apply_settings || {}
@pool = Concurrent::ThreadPoolExecutor.new(max_threads: max_compiles)
@logger = Logging.logger[self]
@plugin_tarball = Concurrent::Delay.new do
build_plugin_tarball do |mod|
search_dirs = []
search_dirs << mod.plugins if mod.plugins?
search_dirs << mod.pluginfacts if mod.pluginfacts?
search_dirs << mod.files if mod.files?
search_dirs
end
end
end
private def libexec
@libexec ||= File.join(Gem::Specification.find_by_name('bolt').gem_dir, 'libexec')
end
def custom_facts_task
@custom_facts_task ||= begin
path = File.join(libexec, 'custom_facts.rb')
file = { 'name' => 'custom_facts.rb', 'path' => path }
metadata = { 'supports_noop' => true, 'input_method' => 'stdin',
'implementations' => [
{ 'name' => 'custom_facts.rb' },
{ 'name' => 'custom_facts.rb', 'remote' => true }
] }
Bolt::Task.new('apply_helpers::custom_facts', metadata, [file])
end
end
def catalog_apply_task
@catalog_apply_task ||= begin
path = File.join(libexec, 'apply_catalog.rb')
file = { 'name' => 'apply_catalog.rb', 'path' => path }
metadata = { 'supports_noop' => true, 'input_method' => 'stdin',
'implementations' => [
{ 'name' => 'apply_catalog.rb' },
{ 'name' => 'apply_catalog.rb', 'remote' => true }
] }
Bolt::Task.new('apply_helpers::apply_catalog', metadata, [file])
end
end
def query_resources_task
@query_resources_task ||= begin
path = File.join(libexec, 'query_resources.rb')
file = { 'name' => 'query_resources.rb', 'path' => path }
metadata = { 'supports_noop' => true, 'input_method' => 'stdin',
'implementations' => [
{ 'name' => 'query_resources.rb' },
{ 'name' => 'query_resources.rb', 'remote' => true }
] }
Bolt::Task.new('apply_helpers::query_resources', metadata, [file])
end
end
def compile(target, catalog_input)
# This simplified Puppet node object is what .local uses to determine the
# certname of the target
node = Puppet::Node.from_data_hash('name' => target.name,
'parameters' => { 'clientcert' => target.name })
trusted = Puppet::Context::TrustedInformation.local(node)
catalog_input[:target] = {
name: target.name,
facts: @inventory.facts(target).merge('bolt' => true),
variables: @inventory.vars(target),
trusted: trusted.to_h
}
bolt_catalog_exe = File.join(libexec, 'bolt_catalog')
old_path = ENV['PATH']
ENV['PATH'] = "#{RbConfig::CONFIG['bindir']}#{File::PATH_SEPARATOR}#{old_path}"
out, err, stat = Open3.capture3('ruby', bolt_catalog_exe, 'compile', stdin_data: catalog_input.to_json)
ENV['PATH'] = old_path
# Any messages logged by Puppet will be on stderr as JSON hashes, so we
# parse those and store them here. Any message on stderr that is not
# properly JSON formatted is assumed to be an error message. If
# compilation was successful, we print the logs as they may include
# important warnings. If compilation failed, we don't print the logs as
# they are likely redundant with the error that caused the failure, which
# will be handled separately.
logs = err.lines.map do |line|
JSON.parse(line)
rescue JSON::ParserError
{ 'level' => 'err', 'message' => line }
end
result = JSON.parse(out)
if stat.success?
logs.each do |log|
bolt_level = Bolt::Util::PuppetLogLevel::MAPPING[log['level'].to_sym]
message = log['message'].chomp
@logger.send(bolt_level, "#{target.name}: #{message}")
end
result
else
raise ApplyError.new(target.name, result['message'])
end
end
def validate_hiera_config(hiera_config)
if File.exist?(File.path(hiera_config))
data = File.open(File.path(hiera_config), "r:UTF-8") { |f| YAML.safe_load(f.read, [Symbol]) }
if data.nil?
return nil
elsif data['version'] != 5
raise Bolt::ParseError, "Hiera v5 is required, found v#{data['version'] || 3} in #{hiera_config}"
end
hiera_config
end
end
def apply(args, apply_body, scope)
raise(ArgumentError, 'apply requires a TargetSpec') if args.empty?
type0 = Puppet.lookup(:pal_script_compiler).type('TargetSpec')
Puppet::Pal.assert_type(type0, args[0], 'apply targets')
@executor.report_function_call('apply')
options = {}
if args.count > 1
type1 = Puppet.lookup(:pal_script_compiler).type('Hash[String, Data]')
Puppet::Pal.assert_type(type1, args[1], 'apply options')
options = args[1].transform_keys { |k| k.sub(/^_/, '').to_sym }
end
plan_vars = scope.to_hash(true, true)
%w[trusted server_facts facts].each { |k| plan_vars.delete(k) }
targets = @inventory.get_targets(args[0])
apply_ast(apply_body, targets, options, plan_vars)
end
# Count the number of top-level statements in the AST.
def count_statements(ast)
case ast
when Puppet::Pops::Model::Program
count_statements(ast.body)
when Puppet::Pops::Model::BlockExpression
ast.statements.count
else
1
end
end
def apply_ast(raw_ast, targets, options, plan_vars = {})
ast = Puppet::Pops::Serialization::ToDataConverter.convert(raw_ast, rich_data: true, symbol_to_string: true)
# Serialize as pcore for *Result* objects
plan_vars = Puppet::Pops::Serialization::ToDataConverter.convert(plan_vars,
rich_data: true,
symbol_as_string: true,
type_by_reference: true,
local_reference: false)
scope = {
code_ast: ast,
modulepath: @modulepath,
pdb_config: @pdb_client.config.to_hash,
hiera_config: @hiera_config,
plan_vars: plan_vars,
# This data isn't available on the target config hash
config: @inventory.config.transport_data_get
}
description = options[:description] || 'apply catalog'
r = @executor.log_action(description, targets) do
futures = targets.map do |target|
Concurrent::Future.execute(executor: @pool) do
@executor.with_node_logging("Compiling manifest block", [target]) do
compile(target, scope)
end
end
end
result_promises = targets.zip(futures).flat_map do |target, future|
@executor.queue_execute([target]) do |transport, batch|
@executor.with_node_logging("Applying manifest block", batch) do
catalog = future.value
if future.rejected?
batch.map do |batch_target|
# If an unhandled exception occurred, wrap it in an ApplyError
error = if future.reason.is_a?(Bolt::ApplyError)
future.reason
else
Bolt::ApplyError.new(batch_target, future.reason.message)
end
result = Bolt::ApplyResult.new(batch_target, error: error.to_h)
@executor.publish_event(type: :node_result, result: result)
result
end
else
arguments = {
'catalog' => Puppet::Pops::Types::PSensitiveType::Sensitive.new(catalog),
'plugins' => Puppet::Pops::Types::PSensitiveType::Sensitive.new(plugins),
'apply_settings' => @apply_settings,
'_task' => catalog_apply_task.name,
'_noop' => options[:noop]
}
callback = proc do |event|
if event[:type] == :node_result
event = event.merge(result: ApplyResult.from_task_result(event[:result]))
end
@executor.publish_event(event)
end
# Respect the run_as default set on the executor
options[:run_as] = @executor.run_as if @executor.run_as && !options.key?(:run_as)
results = transport.batch_task(batch, catalog_apply_task, arguments, options, &callback)
Array(results).map { |result| ApplyResult.from_task_result(result) }
end
end
end
end
@executor.await_results(result_promises)
end
# Allow for report to exclude event metrics (apply_result doesn't require it to be present)
resource_counts = r.ok_set.map { |result| result.event_metrics&.fetch('total') }.compact
@executor.report_apply(count_statements(raw_ast), resource_counts)
if !r.ok && !options[:catch_errors]
raise Bolt::ApplyFailure, r
end
r
end
def plugins
@plugin_tarball.value ||
raise(Bolt::Error.new("Failed to pack module plugins: #{@plugin_tarball.reason}", 'bolt/plugin-error'))
end
def build_plugin_tarball
# lazy-load expensive gem code
require 'minitar'
require 'zlib'
start_time = Time.now
sio = StringIO.new
output = Minitar::Output.new(Zlib::GzipWriter.new(sio))
Puppet.lookup(:current_environment).override_with(modulepath: @plugin_dirs).modules.each do |mod|
search_dirs = yield mod
parent = Pathname.new(mod.path).parent
files = Find.find(*search_dirs).select { |file| File.file?(file) }
files.each do |file|
tar_path = Pathname.new(file).relative_path_from(parent)
@logger.debug("Packing plugin #{file} to #{tar_path}")
stat = File.stat(file)
content = File.binread(file)
output.tar.add_file_simple(
tar_path.to_s,
data: content,
size: content.size,
mode: stat.mode & 0o777,
mtime: stat.mtime
)
end
end
duration = Time.now - start_time
@logger.debug("Packed plugins in #{duration * 1000} ms")
output.close
Base64.encode64(sio.string)
ensure
output&.close
end
end
end
| 1 | 14,119 | The `Transport::Config` objects don't serialize properly. We probably want to just turn them into hashes at this point. | puppetlabs-bolt | rb |
@@ -251,11 +251,11 @@ const ConfigInstructions = `
# These values specify the destination directory for ddev ssh and the
# directory in which commands passed into ddev exec are run.
-# omit_containers: ["dba", "ddev-ssh-agent"]
-# would omit the dba (phpMyAdmin) and ddev-ssh-agent containers. Currently
-# only those two containers can be omitted here.
-# Note that these containers can also be omitted globally in the
-# ~/.ddev/global_config.yaml or with the "ddev config global" command.
+# omit_containers: ["db", dba", "ddev-ssh-agent"]
+# Currently only these containers are supported. Some containers can also be
+# omitted globally in the ~/.ddev/global_config.yaml. Note that if you omit
+# the "db" container, several standard features of ddev that access the
+# database container will be unusuable.
# nfs_mount_enabled: false
# Great performance improvement but requires host configuration first. | 1 | package ddevapp
// DDevComposeTemplate is used to create the main docker-compose.yaml
// file for a ddev site.
const DDevComposeTemplate = `version: '{{ .ComposeVersion }}'
{{ .DdevGenerated }}
services:
{{if not .OmitDB }}
db:
container_name: {{ .Plugin }}-${DDEV_SITENAME}-db
build:
context: '{{ .DBBuildContext }}'
dockerfile: '{{ .DBBuildDockerfile }}'
args:
BASE_IMAGE: $DDEV_DBIMAGE
username: '{{ .Username }}'
uid: '{{ .UID }}'
gid: '{{ .GID }}'
image: ${DDEV_DBIMAGE}-${DDEV_SITENAME}-built
stop_grace_period: 60s
volumes:
- type: "volume"
source: mariadb-database
target: "/var/lib/mysql"
volume:
nocopy: true
- type: "bind"
source: "."
target: "/mnt/ddev_config"
- ddev-global-cache:/mnt/ddev-global-cache
restart: "no"
user: "$DDEV_UID:$DDEV_GID"
hostname: {{ .Name }}-db
ports:
- "{{ .DockerIP }}:$DDEV_HOST_DB_PORT:3306"
labels:
com.ddev.site-name: ${DDEV_SITENAME}
com.ddev.platform: {{ .Plugin }}
com.ddev.app-type: {{ .AppType }}
com.ddev.approot: $DDEV_APPROOT
environment:
- COLUMNS=$COLUMNS
- LINES=$LINES
- TZ={{ .Timezone }}
- DDEV_PROJECT={{ .Name }}
command: "$DDEV_MARIADB_LOCAL_COMMAND"
healthcheck:
interval: 1s
retries: 30
start_period: 20s
timeout: 120s
{{end}}
web:
container_name: {{ .Plugin }}-${DDEV_SITENAME}-web
build:
context: '{{ .WebBuildContext }}'
dockerfile: '{{ .WebBuildDockerfile }}'
args:
BASE_IMAGE: $DDEV_WEBIMAGE
username: '{{ .Username }}'
uid: '{{ .UID }}'
gid: '{{ .GID }}'
image: ${DDEV_WEBIMAGE}-${DDEV_SITENAME}-built
cap_add:
- SYS_PTRACE
volumes:
- type: {{ .MountType }}
source: {{ .WebMount }}
target: /var/www/html
{{ if eq .MountType "volume" }}
volume:
nocopy: true
{{ else }}
consistency: cached
{{ end }}
- ".:/mnt/ddev_config:ro"
- ddev-global-cache:/mnt/ddev-global-cache
{{ if not .OmitSSHAgent }}
- ddev-ssh-agent_socket_dir:/home/.ssh-agent
{{ end }}
restart: "no"
user: "$DDEV_UID:$DDEV_GID"
hostname: {{ .Name }}-web
{{if not .OmitDB }}
links:
- db:db
{{end}}
# ports is list of exposed *container* ports
ports:
- "{{ .DockerIP }}:$DDEV_HOST_WEBSERVER_PORT:80"
- "{{ .DockerIP }}:$DDEV_HOST_HTTPS_PORT:443"
environment:
- DOCROOT=$DDEV_DOCROOT
- DDEV_PHP_VERSION=$DDEV_PHP_VERSION
- DDEV_WEBSERVER_TYPE=$DDEV_WEBSERVER_TYPE
- DDEV_PROJECT_TYPE=$DDEV_PROJECT_TYPE
- DDEV_ROUTER_HTTP_PORT=$DDEV_ROUTER_HTTP_PORT
- DDEV_ROUTER_HTTPS_PORT=$DDEV_ROUTER_HTTPS_PORT
- DDEV_XDEBUG_ENABLED=$DDEV_XDEBUG_ENABLED
- IS_DDEV_PROJECT=true
- DOCKER_IP={{ .DockerIP }}
- HOST_DOCKER_INTERNAL_IP={{ .HostDockerInternalIP }}
- DEPLOY_NAME=local
- VIRTUAL_HOST=$DDEV_HOSTNAME
- COLUMNS=$COLUMNS
- LINES=$LINES
- TZ={{ .Timezone }}
# HTTP_EXPOSE allows for ports accepting HTTP traffic to be accessible from <site>.ddev.site:<port>
# To expose a container port to a different host port, define the port as hostPort:containerPort
- HTTP_EXPOSE=${DDEV_ROUTER_HTTP_PORT}:80,${DDEV_MAILHOG_PORT}:{{ .MailhogPort }}
# You can optionally expose an HTTPS port option for any ports defined in HTTP_EXPOSE.
# To expose an HTTPS port, define the port as securePort:containerPort.
- HTTPS_EXPOSE=${DDEV_ROUTER_HTTPS_PORT}:80,${DDEV_MAILHOG_HTTPS_PORT}:{{ .MailhogPort }}
- SSH_AUTH_SOCK=/home/.ssh-agent/socket
- DDEV_PROJECT={{ .Name }}
labels:
com.ddev.site-name: ${DDEV_SITENAME}
com.ddev.platform: {{ .Plugin }}
com.ddev.app-type: {{ .AppType }}
com.ddev.approot: $DDEV_APPROOT
{{ if .HostDockerInternalIP }}
extra_hosts: [ "host.docker.internal:{{ .HostDockerInternalIP }}" ]
{{ end }}
external_links:
{{ range $hostname := .Hostnames }}- "ddev-router:{{ $hostname }}"
{{ end }}
healthcheck:
interval: 1s
retries: 10
start_period: 10s
timeout: 120s
{{ if not .OmitDBA }}
dba:
container_name: ddev-${DDEV_SITENAME}-dba
image: $DDEV_DBAIMAGE
restart: "no"
labels:
com.ddev.site-name: ${DDEV_SITENAME}
com.ddev.platform: {{ .Plugin }}
com.ddev.app-type: {{ .AppType }}
com.ddev.approot: $DDEV_APPROOT
links:
- db:db
ports:
- "80"
hostname: {{ .Name }}-dba
environment:
- PMA_USER=db
- PMA_PASSWORD=db
- VIRTUAL_HOST=$DDEV_HOSTNAME
- TZ={{ .Timezone }}
# HTTP_EXPOSE allows for ports accepting HTTP traffic to be accessible from <site>.ddev.site:<port>
- HTTP_EXPOSE=${DDEV_PHPMYADMIN_PORT}:{{ .DBAPort }}
- HTTPS_EXPOSE=${DDEV_PHPMYADMIN_HTTPS_PORT}:{{ .DBAPort }}
healthcheck:
interval: 120s
timeout: 2s
retries: 1
{{end}}
networks:
default:
external:
name: ddev_default
volumes:
{{if not .OmitDB }}
mariadb-database:
name: "${DDEV_SITENAME}-mariadb"
{{end}}
{{ if not .OmitSSHAgent }}
ddev-ssh-agent_socket_dir:
external: true
{{ end }}
ddev-global-cache:
name: ddev-global-cache
{{ if .NFSMountEnabled }}
nfsmount:
driver: local
driver_opts:
type: nfs
o: "addr={{ if .HostDockerInternalIP }}{{ .HostDockerInternalIP }}{{ else }}host.docker.internal{{end}},hard,nolock,rw"
device: ":{{ .NFSSource }}"
{{ end }}
`
// ConfigInstructions is used to add example hooks usage
const ConfigInstructions = `
# Key features of ddev's config.yaml:
# name: <projectname> # Name of the project, automatically provides
# http://projectname.ddev.site and https://projectname.ddev.site
# type: <projecttype> # drupal6/7/8, backdrop, typo3, wordpress, php
# docroot: <relative_path> # Relative path to the directory containing index.php.
# php_version: "7.3" # PHP version to use, "5.6", "7.0", "7.1", "7.2", "7.3", "7.4"
# You can explicitly specify the webimage, dbimage, dbaimage lines but this
# is not recommended, as the images are often closely tied to ddev's' behavior,
# so this can break upgrades.
# webimage: <docker_image> # nginx/php docker image.
# dbimage: <docker_image> # mariadb docker image.
# dbaimage: <docker_image>
# mariadb_version and mysql_version
# ddev can use many versions of mariadb and mysql
# However these directives are mutually exclusive
# mariadb_version: 10.2
# mysql_version: 8.0
# router_http_port: <port> # Port to be used for http (defaults to port 80)
# router_https_port: <port> # Port for https (defaults to 443)
# xdebug_enabled: false # Set to true to enable xdebug and "ddev start" or "ddev restart"
# Note that for most people the commands
# "ddev exec enable_xdebug" and "ddev exec disable_xdebug" work better,
# as leaving xdebug enabled all the time is a big performance hit.
# webserver_type: nginx-fpm # Can be set to apache-fpm or apache-cgi as well
# timezone: Europe/Berlin
# This is the timezone used in the containers and by PHP;
# it can be set to any valid timezone,
# see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# For example Europe/Dublin or MST7MDT
# additional_hostnames:
# - somename
# - someothername
# would provide http and https URLs for "somename.ddev.site"
# and "someothername.ddev.site".
# additional_fqdns:
# - example.com
# - sub1.example.com
# would provide http and https URLs for "example.com" and "sub1.example.com"
# Please take care with this because it can cause great confusion.
# upload_dir: custom/upload/dir
# would set the destination path for ddev import-files to custom/upload/dir.
# working_dir:
# web: /var/www/html
# db: /home
# would set the default working directory for the web and db services.
# These values specify the destination directory for ddev ssh and the
# directory in which commands passed into ddev exec are run.
# omit_containers: ["dba", "ddev-ssh-agent"]
# would omit the dba (phpMyAdmin) and ddev-ssh-agent containers. Currently
# only those two containers can be omitted here.
# Note that these containers can also be omitted globally in the
# ~/.ddev/global_config.yaml or with the "ddev config global" command.
# nfs_mount_enabled: false
# Great performance improvement but requires host configuration first.
# See https://ddev.readthedocs.io/en/stable/users/performance/#using-nfs-to-mount-the-project-into-the-container
# host_https_port: "59002"
# The host port binding for https can be explicitly specified. It is
# dynamic unless otherwise specified.
# This is not used by most people, most people use the *router* instead
# of the localhost port.
# host_webserver_port: "59001"
# The host port binding for the ddev-webserver can be explicitly specified. It is
# dynamic unless otherwise specified.
# This is not used by most people, most people use the *router* instead
# of the localhost port.
# host_db_port: "59002"
# The host port binding for the ddev-dbserver can be explicitly specified. It is dynamic
# unless explicitly specified.
# phpmyadmin_port: "8036"
# phpmyadmin_https_port: "8037{"
# The PHPMyAdmin ports can be changed from the default 8036 and 8037
# mailhog_port: "8025"
# mailhog_https_port: "8026"
# The MailHog ports can be changed from the default 8025 and 8026
# webimage_extra_packages: [php-yaml, php7.3-ldap]
# Extra Debian packages that are needed in the webimage can be added here
# dbimage_extra_packages: [telnet,netcat]
# Extra Debian packages that are needed in the dbimage can be added here
# use_dns_when_possible: true
# If the host has internet access and the domain configured can
# successfully be looked up, DNS will be used for hostname resolution
# instead of editing /etc/hosts
# Defaults to true
# project_tld: ddev.site
# The top-level domain used for project URLs
# The default "ddev.site" allows DNS lookup via a wildcard
# If you prefer you can change this to "ddev.local" to preserve
# pre-v1.9 behavior.
# ngrok_args: --subdomain mysite --auth username:pass
# Provide extra flags to the "ngrok http" command, see
# https://ngrok.com/docs#http or run "ngrok http -h"
# disable_settings_management: false
# If true, ddev will not create CMS-specific settings files like
# Drupal's settings.php/settings.ddev.php or TYPO3's AdditionalSettings.php
# In this case the user must provide all such settings.
# provider: default # Currently either "default" or "pantheon"
#
# Many ddev commands can be extended to run tasks before or after the
# ddev command is executed, for example "post-start", "post-import-db",
# "pre-composer", "post-composer"
# See https://ddev.readthedocs.io/en/stable/users/extending-commands/ for more
# information on the commands that can be extended and the tasks you can define
# for them. Example:
#hooks:
`
// SequelproTemplate is the template for Sequelpro config.
var SequelproTemplate = `<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ContentFilters</key>
<dict/>
<key>auto_connect</key>
<true/>
<key>data</key>
<dict>
<key>connection</key>
<dict>
<key>database</key>
<string>%s</string>
<key>host</key>
<string>%s</string>
<key>name</key>
<string>drud/%s</string>
<key>password</key>
<string>%s</string>
<key>port</key>
<integer>%s</integer>
<key>rdbms_type</key>
<string>mysql</string>
<key>sslCACertFileLocation</key>
<string></string>
<key>sslCACertFileLocationEnabled</key>
<integer>0</integer>
<key>sslCertificateFileLocation</key>
<string></string>
<key>sslCertificateFileLocationEnabled</key>
<integer>0</integer>
<key>sslKeyFileLocation</key>
<string></string>
<key>sslKeyFileLocationEnabled</key>
<integer>0</integer>
<key>type</key>
<string>SPTCPIPConnection</string>
<key>useSSL</key>
<integer>0</integer>
<key>user</key>
<string>%s</string>
</dict>
</dict>
<key>encrypted</key>
<false/>
<key>format</key>
<string>connection</string>
<key>queryFavorites</key>
<array/>
<key>queryHistory</key>
<array/>
<key>rdbms_type</key>
<string>mysql</string>
<key>rdbms_version</key>
<string>5.5.44</string>
<key>version</key>
<integer>1</integer>
</dict>
</plist>`
// DdevRouterTemplate is the template for the generic router container.
const DdevRouterTemplate = `version: '{{ .compose_version }}'
services:
ddev-router:
image: {{ .router_image }}:{{ .router_tag }}
container_name: ddev-router
ports:{{ $dockerIP := .dockerIP }}{{ if not .router_bind_all_interfaces }}{{ range $port := .ports }}
- "{{ $dockerIP }}:{{ $port }}:{{ $port }}"{{ end }}{{ else }}{{ range $port := .ports }}
- "{{ $port }}:{{ $port }}"{{ end }}{{ end }}
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- ddev-global-cache:/mnt/ddev-global-cache:rw
restart: "no"
healthcheck:
interval: 1s
retries: 10
start_period: 10s
timeout: 120s
networks:
default:
external:
name: ddev_default
volumes:
ddev-global-cache:
name: ddev-global-cache
`
const DdevSSHAuthTemplate = `version: '{{ .compose_version }}'
volumes:
dot_ssh:
socket_dir:
services:
ddev-ssh-agent:
container_name: ddev-ssh-agent
hostname: ddev-ssh-agent
build:
context: '{{ .BuildContext }}'
args:
BASE_IMAGE: {{ .ssh_auth_image }}:{{ .ssh_auth_tag }}
username: '{{ .Username }}'
uid: '{{ .UID }}'
gid: '{{ .GID }}'
image: {{ .ssh_auth_image }}:{{ .ssh_auth_tag }}-built
user: "$DDEV_UID:$DDEV_GID"
volumes:
- "dot_ssh:/tmp/.ssh"
- "socket_dir:/tmp/.ssh-agent"
environment:
- SSH_AUTH_SOCK=/tmp/.ssh-agent/socket
healthcheck:
interval: 1s
retries: 2
start_period: 10s
timeout: 62s
networks:
default:
external:
name: ddev_default
`
| 1 | 14,124 | Sorry, typo s/unusuable/unusable/ | drud-ddev | go |
@@ -242,6 +242,13 @@ public class SparkSessionCatalog<T extends TableCatalog & SupportsNamespaces>
@Override
public final void initialize(String name, CaseInsensitiveStringMap options) {
+ if (options.containsKey("type") && options.get("type").equalsIgnoreCase("hive") && options.containsKey("uri")) {
+ throw new UnsupportedOperationException("Cannot set an alternative uri when" +
+ " using the SparkSessionCatalog, " +
+ "make an alternative SparkCatalog if the " +
+ "Spark Session catalog and Iceberg " +
+ "catalog should contact different metastores.");
+ }
this.catalogName = name;
this.icebergCatalog = buildSparkCatalog(name, options);
if (icebergCatalog instanceof StagingTableCatalog) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.util.Map;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.spark.sql.catalyst.analysis.NamespaceAlreadyExistsException;
import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException;
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException;
import org.apache.spark.sql.connector.catalog.CatalogExtension;
import org.apache.spark.sql.connector.catalog.CatalogPlugin;
import org.apache.spark.sql.connector.catalog.Identifier;
import org.apache.spark.sql.connector.catalog.NamespaceChange;
import org.apache.spark.sql.connector.catalog.StagedTable;
import org.apache.spark.sql.connector.catalog.StagingTableCatalog;
import org.apache.spark.sql.connector.catalog.SupportsNamespaces;
import org.apache.spark.sql.connector.catalog.Table;
import org.apache.spark.sql.connector.catalog.TableCatalog;
import org.apache.spark.sql.connector.catalog.TableChange;
import org.apache.spark.sql.connector.expressions.Transform;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.util.CaseInsensitiveStringMap;
/**
* A Spark catalog that can also load non-Iceberg tables.
*
* @param <T> CatalogPlugin class to avoid casting to TableCatalog and SupportsNamespaces.
*/
public class SparkSessionCatalog<T extends TableCatalog & SupportsNamespaces>
extends BaseCatalog implements CatalogExtension {
private static final String[] DEFAULT_NAMESPACE = new String[] {"default"};
private String catalogName = null;
private TableCatalog icebergCatalog = null;
private StagingTableCatalog asStagingCatalog = null;
private T sessionCatalog = null;
private boolean createParquetAsIceberg = false;
private boolean createAvroAsIceberg = false;
private boolean createOrcAsIceberg = false;
/**
* Build a {@link SparkCatalog} to be used for Iceberg operations.
* <p>
* The default implementation creates a new SparkCatalog with the session catalog's name and options.
*
* @param name catalog name
* @param options catalog options
* @return a SparkCatalog to be used for Iceberg tables
*/
protected TableCatalog buildSparkCatalog(String name, CaseInsensitiveStringMap options) {
SparkCatalog newCatalog = new SparkCatalog();
newCatalog.initialize(name, options);
return newCatalog;
}
@Override
public String[] defaultNamespace() {
return DEFAULT_NAMESPACE;
}
@Override
public String[][] listNamespaces() throws NoSuchNamespaceException {
return getSessionCatalog().listNamespaces();
}
@Override
public String[][] listNamespaces(String[] namespace) throws NoSuchNamespaceException {
return getSessionCatalog().listNamespaces(namespace);
}
@Override
public Map<String, String> loadNamespaceMetadata(String[] namespace) throws NoSuchNamespaceException {
return getSessionCatalog().loadNamespaceMetadata(namespace);
}
@Override
public void createNamespace(String[] namespace, Map<String, String> metadata) throws NamespaceAlreadyExistsException {
getSessionCatalog().createNamespace(namespace, metadata);
}
@Override
public void alterNamespace(String[] namespace, NamespaceChange... changes) throws NoSuchNamespaceException {
getSessionCatalog().alterNamespace(namespace, changes);
}
@Override
public boolean dropNamespace(String[] namespace) throws NoSuchNamespaceException {
return getSessionCatalog().dropNamespace(namespace);
}
@Override
public Identifier[] listTables(String[] namespace) throws NoSuchNamespaceException {
// delegate to the session catalog because all tables share the same namespace
return getSessionCatalog().listTables(namespace);
}
@Override
public Table loadTable(Identifier ident) throws NoSuchTableException {
try {
return icebergCatalog.loadTable(ident);
} catch (NoSuchTableException e) {
return getSessionCatalog().loadTable(ident);
}
}
@Override
public Table createTable(Identifier ident, StructType schema, Transform[] partitions,
Map<String, String> properties)
throws TableAlreadyExistsException, NoSuchNamespaceException {
String provider = properties.get("provider");
if (useIceberg(provider)) {
return icebergCatalog.createTable(ident, schema, partitions, properties);
} else {
// delegate to the session catalog
return getSessionCatalog().createTable(ident, schema, partitions, properties);
}
}
@Override
public StagedTable stageCreate(Identifier ident, StructType schema, Transform[] partitions,
Map<String, String> properties)
throws TableAlreadyExistsException, NoSuchNamespaceException {
String provider = properties.get("provider");
TableCatalog catalog;
if (useIceberg(provider)) {
if (asStagingCatalog != null) {
return asStagingCatalog.stageCreate(ident, schema, partitions, properties);
}
catalog = icebergCatalog;
} else {
catalog = getSessionCatalog();
}
// create the table with the session catalog, then wrap it in a staged table that will delete to roll back
Table table = catalog.createTable(ident, schema, partitions, properties);
return new RollbackStagedTable(catalog, ident, table);
}
@Override
public StagedTable stageReplace(Identifier ident, StructType schema, Transform[] partitions,
Map<String, String> properties)
throws NoSuchNamespaceException, NoSuchTableException {
String provider = properties.get("provider");
TableCatalog catalog;
if (useIceberg(provider)) {
if (asStagingCatalog != null) {
return asStagingCatalog.stageReplace(ident, schema, partitions, properties);
}
catalog = icebergCatalog;
} else {
catalog = getSessionCatalog();
}
// attempt to drop the table and fail if it doesn't exist
if (!catalog.dropTable(ident)) {
throw new NoSuchTableException(ident);
}
try {
// create the table with the session catalog, then wrap it in a staged table that will delete to roll back
Table table = catalog.createTable(ident, schema, partitions, properties);
return new RollbackStagedTable(catalog, ident, table);
} catch (TableAlreadyExistsException e) {
// the table was deleted, but now already exists again. retry the replace.
return stageReplace(ident, schema, partitions, properties);
}
}
@Override
public StagedTable stageCreateOrReplace(Identifier ident, StructType schema, Transform[] partitions,
Map<String, String> properties) throws NoSuchNamespaceException {
String provider = properties.get("provider");
TableCatalog catalog;
if (useIceberg(provider)) {
if (asStagingCatalog != null) {
return asStagingCatalog.stageCreateOrReplace(ident, schema, partitions, properties);
}
catalog = icebergCatalog;
} else {
catalog = getSessionCatalog();
}
// drop the table if it exists
catalog.dropTable(ident);
try {
// create the table with the session catalog, then wrap it in a staged table that will delete to roll back
Table sessionCatalogTable = catalog.createTable(ident, schema, partitions, properties);
return new RollbackStagedTable(catalog, ident, sessionCatalogTable);
} catch (TableAlreadyExistsException e) {
// the table was deleted, but now already exists again. retry the replace.
return stageCreateOrReplace(ident, schema, partitions, properties);
}
}
@Override
public Table alterTable(Identifier ident, TableChange... changes) throws NoSuchTableException {
if (icebergCatalog.tableExists(ident)) {
return icebergCatalog.alterTable(ident, changes);
} else {
return getSessionCatalog().alterTable(ident, changes);
}
}
@Override
public boolean dropTable(Identifier ident) {
// no need to check table existence to determine which catalog to use. if a table doesn't exist then both are
// required to return false.
return icebergCatalog.dropTable(ident) || getSessionCatalog().dropTable(ident);
}
@Override
public void renameTable(Identifier from, Identifier to) throws NoSuchTableException, TableAlreadyExistsException {
// rename is not supported by HadoopCatalog. to avoid UnsupportedOperationException for session catalog tables,
// check table existence first to ensure that the table belongs to the Iceberg catalog.
if (icebergCatalog.tableExists(from)) {
icebergCatalog.renameTable(from, to);
} else {
getSessionCatalog().renameTable(from, to);
}
}
@Override
public final void initialize(String name, CaseInsensitiveStringMap options) {
this.catalogName = name;
this.icebergCatalog = buildSparkCatalog(name, options);
if (icebergCatalog instanceof StagingTableCatalog) {
this.asStagingCatalog = (StagingTableCatalog) icebergCatalog;
}
this.createParquetAsIceberg = options.getBoolean("parquet-enabled", createParquetAsIceberg);
this.createAvroAsIceberg = options.getBoolean("avro-enabled", createAvroAsIceberg);
this.createOrcAsIceberg = options.getBoolean("orc-enabled", createOrcAsIceberg);
}
@Override
@SuppressWarnings("unchecked")
public void setDelegateCatalog(CatalogPlugin sparkSessionCatalog) {
if (sparkSessionCatalog instanceof TableCatalog && sparkSessionCatalog instanceof SupportsNamespaces) {
this.sessionCatalog = (T) sparkSessionCatalog;
} else {
throw new IllegalArgumentException("Invalid session catalog: " + sparkSessionCatalog);
}
}
@Override
public String name() {
return catalogName;
}
private boolean useIceberg(String provider) {
if (provider == null || "iceberg".equalsIgnoreCase(provider)) {
return true;
} else if (createParquetAsIceberg && "parquet".equalsIgnoreCase(provider)) {
return true;
} else if (createAvroAsIceberg && "avro".equalsIgnoreCase(provider)) {
return true;
} else if (createOrcAsIceberg && "orc".equalsIgnoreCase(provider)) {
return true;
}
return false;
}
private T getSessionCatalog() {
Preconditions.checkNotNull(sessionCatalog, "Delegated SessionCatalog is missing. " +
"Please make sure your are replacing Spark's default catalog, named 'spark_catalog'.");
return sessionCatalog;
}
}
| 1 | 42,018 | If this is needed, should we check that the configured `uri` isn't already equal to the value of the metastore URI configured via `spark.hadoop.hive.metastore.uris` or any of the other ways of setting it. This would be a breaking change for people who have `uri` configured on the SparkSessionCatalog and have it correctly set (which I believe should be everybody as `uri` is currently required if I'm not mistaken). | apache-iceberg | java |
@@ -21,4 +21,7 @@ class ViewMultipart(base.View):
return "Multipart form", self._format(v)
def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float:
- return float(content_type == "multipart/form-data")
+ if content_type and content_type.startswith("multipart/form-data"):
+ return 1
+ else:
+ return 0 | 1 | from typing import Optional
from mitmproxy.coretypes import multidict
from mitmproxy.net.http import multipart
from . import base
class ViewMultipart(base.View):
name = "Multipart Form"
@staticmethod
def _format(v):
yield [("highlight", "Form data:\n")]
yield from base.format_dict(multidict.MultiDict(v))
def __call__(self, data: bytes, content_type: Optional[str] = None, **metadata):
if content_type is None:
return
v = multipart.decode(content_type, data)
if v:
return "Multipart form", self._format(v)
def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float:
return float(content_type == "multipart/form-data")
| 1 | 15,760 | see above - this is only used to select the correct view, we don't need to handle the boundary information here. | mitmproxy-mitmproxy | py |
@@ -555,6 +555,7 @@ func TestSign(t *testing.T) {
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestCertificate(certPEM),
+ gen.SetCertificateRequestCA(certPEM),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)), | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package venafi
import (
"context"
"crypto"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"testing"
"time"
"github.com/Venafi/vcert/v4/pkg/endpoint"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
corelisters "k8s.io/client-go/listers/core/v1"
coretesting "k8s.io/client-go/testing"
fakeclock "k8s.io/utils/clock/testing"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
"github.com/jetstack/cert-manager/pkg/apis/certmanager"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
"github.com/jetstack/cert-manager/pkg/controller/certificaterequests"
controllertest "github.com/jetstack/cert-manager/pkg/controller/test"
testpkg "github.com/jetstack/cert-manager/pkg/controller/test"
"github.com/jetstack/cert-manager/pkg/issuer/venafi/client"
"github.com/jetstack/cert-manager/pkg/issuer/venafi/client/api"
internalvenafifake "github.com/jetstack/cert-manager/pkg/issuer/venafi/client/fake"
"github.com/jetstack/cert-manager/pkg/util/pki"
"github.com/jetstack/cert-manager/test/unit/gen"
testlisters "github.com/jetstack/cert-manager/test/unit/listers"
)
var (
fixedClockStart = time.Now()
fixedClock = fakeclock.NewFakeClock(fixedClockStart)
)
func generateCSR(t *testing.T, secretKey crypto.Signer, alg x509.SignatureAlgorithm) []byte {
template := x509.CertificateRequest{
Subject: pkix.Name{
CommonName: "test-common-name",
},
DNSNames: []string{
"foo.example.com", "bar.example.com",
},
SignatureAlgorithm: alg,
}
csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &template, secretKey)
if err != nil {
t.Error(err)
t.FailNow()
}
csr := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes})
return csr
}
func TestSign(t *testing.T) {
rsaSK, err := pki.GenerateRSAPrivateKey(2048)
if err != nil {
t.Error(err)
t.FailNow()
}
csrPEM := generateCSR(t, rsaSK, x509.SHA1WithRSA)
tppSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-tpp-secret",
},
Data: map[string][]byte{
"username": []byte("test-username"),
"password": []byte("test-password"),
},
}
cloudSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cloud-secret",
},
Data: map[string][]byte{
"api-key": []byte("test-api-key"),
},
}
baseIssuer := gen.Issuer("test-issuer",
gen.SetIssuerVenafi(cmapi.VenafiIssuer{}),
gen.AddIssuerCondition(cmapi.IssuerCondition{
Type: cmapi.IssuerConditionReady,
Status: cmmeta.ConditionTrue,
}),
)
tppIssuer := gen.IssuerFrom(baseIssuer,
gen.SetIssuerVenafi(cmapi.VenafiIssuer{
TPP: &cmapi.VenafiTPP{
CredentialsRef: cmmeta.LocalObjectReference{
Name: tppSecret.Name,
},
},
}),
)
cloudIssuer := gen.IssuerFrom(baseIssuer,
gen.SetIssuerVenafi(cmapi.VenafiIssuer{
Cloud: &cmapi.VenafiCloud{
APITokenSecretRef: cmmeta.SecretKeySelector{
LocalObjectReference: cmmeta.LocalObjectReference{
Name: cloudSecret.Name,
},
},
},
}),
)
baseCR := gen.CertificateRequest("test-cr",
gen.SetCertificateRequestCSR(csrPEM),
)
tppCR := gen.CertificateRequestFrom(baseCR,
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Group: certmanager.GroupName,
Name: tppIssuer.Name,
Kind: tppIssuer.Kind,
}),
)
tppCRWithCustomFields := gen.CertificateRequestFrom(tppCR, gen.SetCertificateRequestAnnotations(map[string]string{"venafi.cert-manager.io/custom-fields": `[{"name": "cert-manager-test", "value": "test ok"}]`}))
tppCRWithInvalidCustomFields := gen.CertificateRequestFrom(tppCR, gen.SetCertificateRequestAnnotations(map[string]string{"venafi.cert-manager.io/custom-fields": `[{"name": cert-manager-test}]`}))
tppCRWithInvalidCustomFieldType := gen.CertificateRequestFrom(tppCR, gen.SetCertificateRequestAnnotations(map[string]string{"venafi.cert-manager.io/custom-fields": `[{"name": "cert-manager-test", "value": "test ok", "type": "Bool"}]`}))
cloudCR := gen.CertificateRequestFrom(baseCR,
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Group: certmanager.GroupName,
Name: cloudIssuer.Name,
Kind: cloudIssuer.Kind,
}),
)
failGetSecretLister := &testlisters.FakeSecretLister{
SecretsFn: func(namespace string) corelisters.SecretNamespaceLister {
return &testlisters.FakeSecretNamespaceLister{
GetFn: func(name string) (ret *corev1.Secret, err error) {
return nil, errors.New("this is a network error")
},
}
},
}
template, err := pki.GenerateTemplateFromCertificateRequest(baseCR)
if err != nil {
t.Error(err)
t.FailNow()
}
certPEM, _, err := pki.SignCertificate(template, template, rsaSK.Public(), rsaSK)
if err != nil {
t.Error(err)
t.FailNow()
}
clientReturnsPending := &internalvenafifake.Venafi{
RequestCertificateFn: func(csrPEM []byte, duration time.Duration, customFields []api.CustomField) (string, error) {
return "test", nil
},
RetrieveCertificateFn: func(string, []byte, time.Duration, []api.CustomField) ([]byte, error) {
return nil, endpoint.ErrCertificatePending{
CertificateID: "test-cert-id",
Status: "test-status-pending",
}
},
}
clientReturnsGenericError := &internalvenafifake.Venafi{
RequestCertificateFn: func(csrPEM []byte, duration time.Duration, customFields []api.CustomField) (string, error) {
return "", errors.New("this is an error")
},
}
clientReturnsCert := &internalvenafifake.Venafi{
RequestCertificateFn: func(csrPEM []byte, duration time.Duration, customFields []api.CustomField) (string, error) {
return "test", nil
},
RetrieveCertificateFn: func(string, []byte, time.Duration, []api.CustomField) ([]byte, error) {
return certPEM, nil
},
}
clientReturnsCertIfCustomField := &internalvenafifake.Venafi{
RequestCertificateFn: func(csrPEM []byte, duration time.Duration, fields []api.CustomField) (string, error) {
if len(fields) > 0 && fields[0].Name == "cert-manager-test" && fields[0].Value == "test ok" {
return "test", nil
}
return "", errors.New("Custom field not set")
},
RetrieveCertificateFn: func(string, []byte, time.Duration, []api.CustomField) ([]byte, error) {
return certPEM, nil
},
}
clientReturnsInvalidCustomFieldType := &internalvenafifake.Venafi{
RequestCertificateFn: func(csrPEM []byte, duration time.Duration, fields []api.CustomField) (string, error) {
return "", client.ErrCustomFieldsType{Type: fields[0].Type}
},
}
metaFixedClockStart := metav1.NewTime(fixedClockStart)
tests := map[string]testT{
"tpp: if fail to build client based on missing secret then return nil and hard fail": {
certificateRequest: tppCR.DeepCopy(),
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{tppCR.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal SecretMissing Required secret resource not found: secret "test-tpp-secret" not found`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: `Required secret resource not found: secret "test-tpp-secret" not found`,
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
},
"tpp: if fail to build client based on secret lister transient error then return err and set pending": {
certificateRequest: tppCR.DeepCopy(),
issuer: tppIssuer,
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{tppCR.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal VenafiInitError Failed to initialise venafi client for signing: this is a network error`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Failed to initialise venafi client for signing: this is a network error",
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
expectedErr: true,
},
"cloud: if fail to build client based on missing secret then return nil and hard fail": {
certificateRequest: cloudCR.DeepCopy(),
issuer: cloudIssuer,
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{cloudCR.DeepCopy(), cloudIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal SecretMissing Required secret resource not found: secret "test-cloud-secret" not found`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: `Required secret resource not found: secret "test-cloud-secret" not found`,
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
},
"cloud: if fail to build client based on secret lister transient error then return err and set pending": {
certificateRequest: cloudCR.DeepCopy(),
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{cloudCR.DeepCopy(), cloudIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal VenafiInitError Failed to initialise venafi client for signing: this is a network error`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Failed to initialise venafi client for signing: this is a network error",
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
expectedErr: true,
},
"should exit nil and set status pending if referenced issuer is not ready": {
certificateRequest: cloudCR.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{},
CertManagerObjects: []runtime.Object{cloudCR.DeepCopy(),
gen.Issuer(cloudIssuer.DeepCopy().Name,
gen.SetIssuerVenafi(cmapi.VenafiIssuer{}),
)},
ExpectedEvents: []string{
"Normal IssuerNotReady Referenced issuer does not have a Ready status condition",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: "Pending",
Message: "Referenced issuer does not have a Ready status condition",
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
},
"tpp: if sign returns pending error then set pending and return err": {
certificateRequest: tppCR.DeepCopy(),
builder: &controllertest.Builder{
KubeObjects: []runtime.Object{tppSecret},
CertManagerObjects: []runtime.Object{cloudCR.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
"Normal IssuancePending Venafi certificate is requested",
"Normal IssuancePending Venafi certificate still in a pending state, the request will be retried: Issuance is pending. You may try retrieving the certificate later using Pickup ID: test-cert-id\n\tStatus: test-status-pending",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate is requested",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate still in a pending state, the request will be retried: Issuance is pending. You may try retrieving the certificate later using Pickup ID: test-cert-id\n\tStatus: test-status-pending",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsPending,
expectedErr: true,
},
"cloud: if sign returns pending error then set pending and return err": {
certificateRequest: cloudCR.DeepCopy(),
builder: &controllertest.Builder{
KubeObjects: []runtime.Object{cloudSecret},
CertManagerObjects: []runtime.Object{cloudCR.DeepCopy(), cloudIssuer.DeepCopy()},
ExpectedEvents: []string{
"Normal IssuancePending Venafi certificate is requested",
"Normal IssuancePending Venafi certificate still in a pending state, the request will be retried: Issuance is pending. You may try retrieving the certificate later using Pickup ID: test-cert-id\n\tStatus: test-status-pending",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate is requested",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate still in a pending state, the request will be retried: Issuance is pending. You may try retrieving the certificate later using Pickup ID: test-cert-id\n\tStatus: test-status-pending",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsPending,
expectedErr: true,
},
"tpp: if sign returns generic error then set pending and return error": {
certificateRequest: tppCR.DeepCopy(),
builder: &controllertest.Builder{
KubeObjects: []runtime.Object{tppSecret},
CertManagerObjects: []runtime.Object{tppCR.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
"Warning RequestError Failed to request venafi certificate: this is an error",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonFailed,
Message: "Failed to request venafi certificate: this is an error",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestFailureTime(metaFixedClockStart),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsGenericError,
expectedErr: true,
skipSecondSignCall: false,
},
"cloud: if sign returns generic error then set pending and return error": {
certificateRequest: cloudCR.DeepCopy(),
builder: &controllertest.Builder{
KubeObjects: []runtime.Object{cloudSecret},
CertManagerObjects: []runtime.Object{tppCR.DeepCopy(), cloudIssuer.DeepCopy()},
ExpectedEvents: []string{
"Warning RequestError Failed to request venafi certificate: this is an error",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonFailed,
Message: "Failed to request venafi certificate: this is an error",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestFailureTime(metaFixedClockStart),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsGenericError,
expectedErr: true,
skipSecondSignCall: false,
},
"tpp: if sign returns cert then return cert and not failed": {
certificateRequest: tppCR.DeepCopy(),
builder: &controllertest.Builder{
KubeObjects: []runtime.Object{tppSecret},
CertManagerObjects: []runtime.Object{tppCR.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
"Normal IssuancePending Venafi certificate is requested",
"Normal CertificateIssued Certificate fetched from issuer successfully",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate is requested",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionTrue,
Reason: cmapi.CertificateRequestReasonIssued,
Message: "Certificate fetched from issuer successfully",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestCertificate(certPEM),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsCert,
},
"cloud: if sign returns cert then return cert and not failed": {
certificateRequest: cloudCR.DeepCopy(),
builder: &controllertest.Builder{
KubeObjects: []runtime.Object{cloudSecret},
CertManagerObjects: []runtime.Object{cloudCR.DeepCopy(), cloudIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal IssuancePending Venafi certificate is requested`,
"Normal CertificateIssued Certificate fetched from issuer successfully",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate is requested",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(cloudCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionTrue,
Reason: cmapi.CertificateRequestReasonIssued,
Message: "Certificate fetched from issuer successfully",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestCertificate(certPEM),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsCert,
},
"annotations: Custom Fields": {
certificateRequest: tppCRWithCustomFields.DeepCopy(),
issuer: tppIssuer,
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{tppCRWithCustomFields.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
"Normal IssuancePending Venafi certificate is requested",
"Normal CertificateIssued Certificate fetched from issuer successfully",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCRWithCustomFields,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Venafi certificate is requested",
LastTransitionTime: &metaFixedClockStart,
}),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCRWithCustomFields,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionTrue,
Reason: cmapi.CertificateRequestReasonIssued,
Message: "Certificate fetched from issuer successfully",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestCertificate(certPEM),
gen.AddCertificateRequestAnnotations(map[string]string{cmapi.VenafiPickupIDAnnotationKey: "test"}),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsCertIfCustomField,
expectedErr: false,
},
"annotations: Error on invalid JSON in custom fields": {
certificateRequest: tppCRWithInvalidCustomFields.DeepCopy(),
issuer: tppIssuer,
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{tppCRWithInvalidCustomFields.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
`Warning CustomFieldsError Failed to parse "venafi.cert-manager.io/custom-fields" annotation: invalid character 'c' looking for beginning of value`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCRWithInvalidCustomFields,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonFailed,
Message: "Failed to parse \"venafi.cert-manager.io/custom-fields\" annotation: invalid character 'c' looking for beginning of value",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestFailureTime(metaFixedClockStart),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsPending,
skipSecondSignCall: true,
expectedErr: false,
},
"annotations: Error on invalid type in custom fields": {
certificateRequest: tppCRWithInvalidCustomFieldType.DeepCopy(),
issuer: tppIssuer,
builder: &controllertest.Builder{
CertManagerObjects: []runtime.Object{tppCRWithInvalidCustomFieldType.DeepCopy(), tppIssuer.DeepCopy()},
ExpectedEvents: []string{
`Warning CustomFieldsError certificate request contains an invalid Venafi custom fields type: "Bool": certificate request contains an invalid Venafi custom fields type: "Bool"`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(tppCRWithInvalidCustomFieldType,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonFailed,
Message: "certificate request contains an invalid Venafi custom fields type: \"Bool\": certificate request contains an invalid Venafi custom fields type: \"Bool\"",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestFailureTime(metaFixedClockStart),
),
)),
},
},
fakeSecretLister: failGetSecretLister,
fakeClient: clientReturnsInvalidCustomFieldType,
expectedErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
fixedClock.SetTime(fixedClockStart)
test.builder.Clock = fixedClock
runTest(t, test)
})
}
}
type testT struct {
builder *controllertest.Builder
certificateRequest *cmapi.CertificateRequest
issuer cmapi.GenericIssuer
fakeClient *internalvenafifake.Venafi
expectedErr bool
skipSecondSignCall bool
fakeSecretLister *testlisters.FakeSecretLister
}
func runTest(t *testing.T, test testT) {
test.builder.T = t
test.builder.Init()
defer test.builder.Stop()
v := NewVenafi(test.builder.Context)
if test.fakeSecretLister != nil {
v.secretsLister = test.fakeSecretLister
}
if test.fakeClient != nil {
v.clientBuilder = func(namespace string, secretsLister corelisters.SecretLister,
issuer cmapi.GenericIssuer) (client.Interface, error) {
return test.fakeClient, nil
}
}
controller := certificaterequests.New(apiutil.IssuerVenafi, v)
controller.Register(test.builder.Context)
test.builder.Start()
// Deep copy the certificate request to prevent pulling condition state across tests
err := controller.Sync(context.Background(), test.certificateRequest)
if err == nil && test.fakeClient != nil && test.fakeClient.RetrieveCertificateFn != nil && !test.skipSecondSignCall {
// request state is ok! simulating a 2nd sync to fetch the cert
metav1.SetMetaDataAnnotation(&test.certificateRequest.ObjectMeta, cmapi.VenafiPickupIDAnnotationKey, "test")
err = controller.Sync(context.Background(), test.certificateRequest)
}
if err != nil && !test.expectedErr {
t.Errorf("expected to not get an error, but got: %v", err)
}
if err == nil && test.expectedErr {
t.Errorf("expected to get an error but did not get one")
}
test.builder.CheckAndFinish(err)
}
| 1 | 24,623 | These tests use a self-signed cert so the CA *is* the cert. I considered making a proper chain to use in the tests, but wasn't sure it was necessary to test here, since we test it in the E2E tests anyway. | jetstack-cert-manager | go |
@@ -212,6 +212,10 @@ function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChil
if (newVNode.type===null) {
if (oldProps !== newProps) {
+ if (excessDomChildren!=null) {
+ const index = excessDomChildren.indexOf(dom);
+ if (index > -1) excessDomChildren[index] = null;
+ }
dom.data = newProps;
}
} | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component, enqueueRender } from '../component';
import { coerceToVNode, Fragment } from '../create-element';
import { diffChildren, toChildArray } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} mounts A list of newly
* mounted components
* @param {Element | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} isHydrating Whether or not we are in hydration
*/
export function diff(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, force, oldDom, isHydrating) {
let tmp, newType = newVNode.type;
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== undefined) return null;
if (tmp = options._diff) tmp(newVNode);
try {
outer: if (typeof newType==='function') {
let c, isNew, oldProps, oldState, snapshot, clearProcessingException;
let newProps = newVNode.props;
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
tmp = newType.contextType;
let provider = tmp && context[tmp._id];
let cctx = tmp ? (provider ? provider.props.value : tmp._defaultValue) : context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException = c._pendingError;
}
else {
// Instantiate the new component
if ('prototype' in newType && newType.prototype.render) {
newVNode._component = c = new newType(newProps, cctx); // eslint-disable-line new-cap
}
else {
newVNode._component = c = new Component(newProps, cctx);
c.constructor = newType;
c.render = doRender;
}
if (provider) provider.sub(c);
c.props = newProps;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
// Invoke getDerivedStateFromProps
if (c._nextState==null) {
c._nextState = c.state;
}
if (newType.getDerivedStateFromProps!=null) {
assign(c._nextState==c.state ? (c._nextState = assign({}, c._nextState)) : c._nextState, newType.getDerivedStateFromProps(newProps, c._nextState));
}
// Invoke pre-render lifecycle methods
if (isNew) {
if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount();
if (c.componentDidMount!=null) mounts.push(c);
}
else {
if (newType.getDerivedStateFromProps==null && force==null && c.componentWillReceiveProps!=null) {
c.componentWillReceiveProps(newProps, cctx);
}
if (!force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newProps, c._nextState, cctx)===false) {
c.props = newProps;
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
newVNode._dom = oldDom!=null ? oldVNode._dom : null;
newVNode._children = oldVNode._children;
break outer;
}
if (c.componentWillUpdate!=null) {
c.componentWillUpdate(newProps, c._nextState, cctx);
}
}
oldProps = c.props;
oldState = c.state;
c.context = cctx;
c.props = newProps;
c.state = c._nextState;
if (tmp = options._render) tmp(newVNode);
c._dirty = false;
c._vnode = newVNode;
c._parentDom = parentDom;
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment = tmp != null && tmp.type == Fragment && tmp.key == null;
toChildArray(isTopLevelFragment ? tmp.props.children : tmp, newVNode._children=[], coerceToVNode, true);
if (c.getChildContext!=null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate!=null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, oldDom, isHydrating);
c.base = newVNode._dom;
while (tmp=c._renderCallbacks.pop()) tmp.call(c);
// Don't call componentDidUpdate on mount or when we bailed out via
// `shouldComponentUpdate`
if (!isNew && oldProps!=null && c.componentDidUpdate!=null) {
c.componentDidUpdate(oldProps, oldState, snapshot);
}
if (clearProcessingException) {
c._pendingError = c._processingException = null;
}
}
else {
newVNode._dom = diffElementNodes(oldVNode._dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, isHydrating);
}
if (tmp = options.diffed) tmp(newVNode);
}
catch (e) {
options._catchError(e, newVNode, oldVNode);
}
return newVNode._dom;
}
export function commitRoot(mounts, root) {
let c;
while ((c = mounts.pop())) {
try {
c.componentDidMount();
}
catch (e) {
options._catchError(e, c._vnode);
}
}
if (options._commit) options._commit(root);
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} mounts An array of newly
* mounted components
* @param {boolean} isHydrating Whether or not we are in hydration
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, isHydrating) {
let i;
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type==='svg' || isSvg;
if (dom==null && excessDomChildren!=null) {
for (i=0; i<excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom==null) {
if (newVNode.type===null) {
return document.createTextNode(newProps);
}
dom = isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
if (newVNode.type===null) {
if (oldProps !== newProps) {
dom.data = newProps;
}
}
else if (newVNode!==oldVNode) {
if (excessDomChildren!=null) {
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
oldProps = oldVNode.props || EMPTY_OBJ;
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
// During hydration, props are not diffed at all (including dangerouslySetInnerHTML)
// @TODO we should warn in debug mode when props don't match here.
if (!isHydrating) {
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) {
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
}
diffProps(dom, newProps, oldProps, isSvg, isHydrating);
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (!newHtml) {
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, EMPTY_OBJ, isHydrating);
}
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (('value' in newProps) && newProps.value!==undefined && newProps.value !== dom.value) dom.value = newProps.value==null ? '' : newProps.value;
if (('checked' in newProps) && newProps.checked!==undefined && newProps.checked !== dom.checked) dom.checked = newProps.checked;
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} ref
* @param {any} value
* @param {import('../internal').VNode} vnode
*/
export function applyRef(ref, value, vnode) {
try {
if (typeof ref=='function') ref(value);
else ref.current = value;
}
catch (e) {
options._catchError(e, vnode);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').VNode} parentVNode The parent of the VNode that
* initiated the unmount
* @param {boolean} [skipRemove] Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, parentVNode, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if (r = vnode.ref) {
applyRef(r, null, parentVNode);
}
let dom;
if (!skipRemove && typeof vnode.type !== 'function') {
skipRemove = (dom = vnode._dom)!=null;
}
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component)!=null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
}
catch (e) {
options._catchError(e, parentVNode);
}
}
r.base = r._parentDom = null;
}
if (r = vnode._children) {
for (let i = 0; i < r.length; i++) {
if (r[i]) unmount(r[i], parentVNode, skipRemove);
}
}
if (dom!=null) removeNode(dom);
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
/**
* Find the closest error boundary to a thrown error and call it
* @param {object} error The thrown value
* @param {import('../internal').VNode} vnode The vnode that threw
* the error that was caught (except for unmounting when this parameter
* is the highest parent that was being unmounted)
* @param {import('../internal').VNode} oldVNode The oldVNode of the vnode
* that threw, if this VNode threw while diffing
*/
(options)._catchError = function (error, vnode, oldVNode) {
/** @type {import('../internal').Component} */
let component;
for (; vnode = vnode._parent;) {
if ((component = vnode._component) && !component._processingException) {
try {
if (component.constructor && component.constructor.getDerivedStateFromError!=null) {
component.setState(component.constructor.getDerivedStateFromError(error));
}
else if (component.componentDidCatch!=null) {
component.componentDidCatch(error);
}
else {
continue;
}
return enqueueRender(component._pendingError = component);
}
catch (e) {
error = e;
}
}
}
throw error;
};
| 1 | 14,115 | We could, maybe, just directly do `excessDomChildren[excessDomChildren.indexOf(dom)] = null;`. Would this improve the size in any way? This will end-up with a property on the `excessDomChildren["-1"]` but maybe we could live with that? | preactjs-preact | js |
@@ -1278,6 +1278,11 @@ module.service('gridUtil', ['$log', '$window', '$document', '$http', '$templateC
for ( var i = mouseWheeltoBind.length; i; ) {
$elm.on(mouseWheeltoBind[--i], cbs[fn]);
}
+ $elm.on('$destroy', function unbindEvents() {
+ for ( var i = mouseWheeltoBind.length; i; ) {
+ $elm.off(mouseWheeltoBind[--i], cbs[fn]);
+ }
+ });
};
s.off.mousewheel = function (elm, fn) {
var $elm = angular.element(elm); | 1 | (function() {
var module = angular.module('ui.grid');
var bindPolyfill;
if (typeof Function.prototype.bind !== "function") {
bindPolyfill = function() {
var slice = Array.prototype.slice;
return function(context) {
var fn = this,
args = slice.call(arguments, 1);
if (args.length) {
return function() {
return arguments.length ? fn.apply(context, args.concat(slice.call(arguments))) : fn.apply(context, args);
};
}
return function() {
return arguments.length ? fn.apply(context, arguments) : fn.call(context);
};
};
};
}
function getStyles (elem) {
var e = elem;
if (typeof(e.length) !== 'undefined' && e.length) {
e = elem[0];
}
return e.ownerDocument.defaultView.getComputedStyle(e, null);
}
var rnumnonpx = new RegExp( "^(" + (/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/).source + ")(?!px)[a-z%]+$", "i" ),
// swappable if display is none or starts with table except "table", "table-cell", or "table-caption"
// see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
rdisplayswap = /^(block|none|table(?!-c[ea]).+)/,
cssShow = { position: "absolute", visibility: "hidden", display: "block" };
function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) {
var i = extra === ( isBorderBox ? 'border' : 'content' ) ?
// If we already have the right measurement, avoid augmentation
4 :
// Otherwise initialize for horizontal or vertical properties
name === 'width' ? 1 : 0,
val = 0;
var sides = ['Top', 'Right', 'Bottom', 'Left'];
for ( ; i < 4; i += 2 ) {
var side = sides[i];
// dump('side', side);
// both box models exclude margin, so add it if we want it
if ( extra === 'margin' ) {
var marg = parseFloat(styles[extra + side]);
if (!isNaN(marg)) {
val += marg;
}
}
// dump('val1', val);
if ( isBorderBox ) {
// border-box includes padding, so remove it if we want content
if ( extra === 'content' ) {
var padd = parseFloat(styles['padding' + side]);
if (!isNaN(padd)) {
val -= padd;
// dump('val2', val);
}
}
// at this point, extra isn't border nor margin, so remove border
if ( extra !== 'margin' ) {
var bordermarg = parseFloat(styles['border' + side + 'Width']);
if (!isNaN(bordermarg)) {
val -= bordermarg;
// dump('val3', val);
}
}
}
else {
// at this point, extra isn't content, so add padding
var nocontentPad = parseFloat(styles['padding' + side]);
if (!isNaN(nocontentPad)) {
val += nocontentPad;
// dump('val4', val);
}
// at this point, extra isn't content nor padding, so add border
if ( extra !== 'padding') {
var nocontentnopad = parseFloat(styles['border' + side + 'Width']);
if (!isNaN(nocontentnopad)) {
val += nocontentnopad;
// dump('val5', val);
}
}
}
}
// dump('augVal', val);
return val;
}
function getWidthOrHeight( elem, name, extra ) {
// Start with offset property, which is equivalent to the border-box value
var valueIsBorderBox = true,
val, // = name === 'width' ? elem.offsetWidth : elem.offsetHeight,
styles = getStyles(elem),
isBorderBox = styles['boxSizing'] === 'border-box';
// some non-html elements return undefined for offsetWidth, so check for null/undefined
// svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285
// MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668
if ( val <= 0 || val == null ) {
// Fall back to computed then uncomputed css if necessary
val = styles[name];
if ( val < 0 || val == null ) {
val = elem.style[ name ];
}
// Computed unit is not pixels. Stop here and return.
if ( rnumnonpx.test(val) ) {
return val;
}
// we need the check for style in case a browser which returns unreliable values
// for getComputedStyle silently falls back to the reliable elem.style
valueIsBorderBox = isBorderBox &&
( true || val === elem.style[ name ] ); // use 'true' instead of 'support.boxSizingReliable()'
// Normalize "", auto, and prepare for extra
val = parseFloat( val ) || 0;
}
// use the active box-sizing model to add/subtract irrelevant styles
var ret = ( val +
augmentWidthOrHeight(
elem,
name,
extra || ( isBorderBox ? "border" : "content" ),
valueIsBorderBox,
styles
)
);
// dump('ret', ret, val);
return ret;
}
function getLineHeight(elm) {
elm = angular.element(elm)[0];
var parent = elm.parentElement;
if (!parent) {
parent = document.getElementsByTagName('body')[0];
}
return parseInt( getStyles(parent).fontSize ) || parseInt( getStyles(elm).fontSize ) || 16;
}
var uid = ['0', '0', '0', '0'];
var uidPrefix = 'uiGrid-';
/**
* @ngdoc service
* @name ui.grid.service:GridUtil
*
* @description Grid utility functions
*/
module.service('gridUtil', ['$log', '$window', '$document', '$http', '$templateCache', '$timeout', '$interval', '$injector', '$q', '$interpolate', 'uiGridConstants',
function ($log, $window, $document, $http, $templateCache, $timeout, $interval, $injector, $q, $interpolate, uiGridConstants) {
var s = {
augmentWidthOrHeight: augmentWidthOrHeight,
getStyles: getStyles,
/**
* @ngdoc method
* @name createBoundedWrapper
* @methodOf ui.grid.service:GridUtil
*
* @param {object} Object to bind 'this' to
* @param {method} Method to bind
* @returns {Function} The wrapper that performs the binding
*
* @description
* Binds given method to given object.
*
* By means of a wrapper, ensures that ``method`` is always bound to
* ``object`` regardless of its calling environment.
* Iow, inside ``method``, ``this`` always points to ``object``.
*
* See http://alistapart.com/article/getoutbindingsituations
*
*/
createBoundedWrapper: function(object, method) {
return function() {
return method.apply(object, arguments);
};
},
/**
* @ngdoc method
* @name readableColumnName
* @methodOf ui.grid.service:GridUtil
*
* @param {string} columnName Column name as a string
* @returns {string} Column name appropriately capitalized and split apart
*
@example
<example module="app">
<file name="app.js">
var app = angular.module('app', ['ui.grid']);
app.controller('MainCtrl', ['$scope', 'gridUtil', function ($scope, gridUtil) {
$scope.name = 'firstName';
$scope.columnName = function(name) {
return gridUtil.readableColumnName(name);
};
}]);
</file>
<file name="index.html">
<div ng-controller="MainCtrl">
<strong>Column name:</strong> <input ng-model="name" />
<br>
<strong>Output:</strong> <span ng-bind="columnName(name)"></span>
</div>
</file>
</example>
*/
readableColumnName: function (columnName) {
// Convert underscores to spaces
if (typeof(columnName) === 'undefined' || columnName === undefined || columnName === null) { return columnName; }
if (typeof(columnName) !== 'string') {
columnName = String(columnName);
}
return columnName.replace(/_+/g, ' ')
// Replace a completely all-capsed word with a first-letter-capitalized version
.replace(/^[A-Z]+$/, function (match) {
return angular.lowercase(angular.uppercase(match.charAt(0)) + match.slice(1));
})
// Capitalize the first letter of words
.replace(/([\w\u00C0-\u017F]+)/g, function (match) {
return angular.uppercase(match.charAt(0)) + match.slice(1);
})
// Put a space in between words that have partial capilizations (i.e. 'firstName' becomes 'First Name')
// .replace(/([A-Z]|[A-Z]\w+)([A-Z])/g, "$1 $2");
// .replace(/(\w+?|\w)([A-Z])/g, "$1 $2");
.replace(/(\w+?(?=[A-Z]))/g, '$1 ');
},
/**
* @ngdoc method
* @name getColumnsFromData
* @methodOf ui.grid.service:GridUtil
* @description Return a list of column names, given a data set
*
* @param {string} data Data array for grid
* @returns {Object} Column definitions with field accessor and column name
*
* @example
<pre>
var data = [
{ firstName: 'Bob', lastName: 'Jones' },
{ firstName: 'Frank', lastName: 'Smith' }
];
var columnDefs = GridUtil.getColumnsFromData(data, excludeProperties);
columnDefs == [
{
field: 'firstName',
name: 'First Name'
},
{
field: 'lastName',
name: 'Last Name'
}
];
</pre>
*/
getColumnsFromData: function (data, excludeProperties) {
var columnDefs = [];
if (!data || typeof(data[0]) === 'undefined' || data[0] === undefined) { return []; }
if (angular.isUndefined(excludeProperties)) { excludeProperties = []; }
var item = data[0];
angular.forEach(item,function (prop, propName) {
if ( excludeProperties.indexOf(propName) === -1){
columnDefs.push({
name: propName
});
}
});
return columnDefs;
},
/**
* @ngdoc method
* @name newId
* @methodOf ui.grid.service:GridUtil
* @description Return a unique ID string
*
* @returns {string} Unique string
*
* @example
<pre>
var id = GridUtil.newId();
# 1387305700482;
</pre>
*/
newId: (function() {
var seedId = new Date().getTime();
return function() {
return seedId += 1;
};
})(),
/**
* @ngdoc method
* @name getTemplate
* @methodOf ui.grid.service:GridUtil
* @description Get's template from cache / element / url
*
* @param {string|element|promise} Either a string representing the template id, a string representing the template url,
* an jQuery/Angualr element, or a promise that returns the template contents to use.
* @returns {object} a promise resolving to template contents
*
* @example
<pre>
GridUtil.getTemplate(url).then(function (contents) {
alert(contents);
})
</pre>
*/
getTemplate: function (template) {
// Try to fetch the template out of the templateCache
if ($templateCache.get(template)) {
return s.postProcessTemplate($templateCache.get(template));
}
// See if the template is itself a promise
if (angular.isFunction(template.then)) {
return template.then(s.postProcessTemplate);
}
// If the template is an element, return the element
try {
if (angular.element(template).length > 0) {
return $q.when(template).then(s.postProcessTemplate);
}
}
catch (err){
//do nothing; not valid html
}
s.logDebug('fetching url', template);
// Default to trying to fetch the template as a url with $http
return $http({ method: 'GET', url: template})
.then(
function (result) {
var templateHtml = result.data.trim();
//put in templateCache for next call
$templateCache.put(template, templateHtml);
return templateHtml;
},
function (err) {
throw new Error("Could not get template " + template + ": " + err);
}
)
.then(s.postProcessTemplate);
},
//
postProcessTemplate: function (template) {
var startSym = $interpolate.startSymbol(),
endSym = $interpolate.endSymbol();
// If either of the interpolation symbols have been changed, we need to alter this template
if (startSym !== '{{' || endSym !== '}}') {
template = template.replace(/\{\{/g, startSym);
template = template.replace(/\}\}/g, endSym);
}
return $q.when(template);
},
/**
* @ngdoc method
* @name guessType
* @methodOf ui.grid.service:GridUtil
* @description guesses the type of an argument
*
* @param {string/number/bool/object} item variable to examine
* @returns {string} one of the following
* - 'string'
* - 'boolean'
* - 'number'
* - 'date'
* - 'object'
*/
guessType : function (item) {
var itemType = typeof(item);
// Check for numbers and booleans
switch (itemType) {
case "number":
case "boolean":
case "string":
return itemType;
default:
if (angular.isDate(item)) {
return "date";
}
return "object";
}
},
/**
* @ngdoc method
* @name elementWidth
* @methodOf ui.grid.service:GridUtil
*
* @param {element} element DOM element
* @param {string} [extra] Optional modifier for calculation. Use 'margin' to account for margins on element
*
* @returns {number} Element width in pixels, accounting for any borders, etc.
*/
elementWidth: function (elem) {
},
/**
* @ngdoc method
* @name elementHeight
* @methodOf ui.grid.service:GridUtil
*
* @param {element} element DOM element
* @param {string} [extra] Optional modifier for calculation. Use 'margin' to account for margins on element
*
* @returns {number} Element height in pixels, accounting for any borders, etc.
*/
elementHeight: function (elem) {
},
// Thanks to http://stackoverflow.com/a/13382873/888165
getScrollbarWidth: function() {
var outer = document.createElement("div");
outer.style.visibility = "hidden";
outer.style.width = "100px";
outer.style.msOverflowStyle = "scrollbar"; // needed for WinJS apps
document.body.appendChild(outer);
var widthNoScroll = outer.offsetWidth;
// force scrollbars
outer.style.overflow = "scroll";
// add innerdiv
var inner = document.createElement("div");
inner.style.width = "100%";
outer.appendChild(inner);
var widthWithScroll = inner.offsetWidth;
// remove divs
outer.parentNode.removeChild(outer);
return widthNoScroll - widthWithScroll;
},
swap: function( elem, options, callback, args ) {
var ret, name,
old = {};
// Remember the old values, and insert the new ones
for ( name in options ) {
old[ name ] = elem.style[ name ];
elem.style[ name ] = options[ name ];
}
ret = callback.apply( elem, args || [] );
// Revert the old values
for ( name in options ) {
elem.style[ name ] = old[ name ];
}
return ret;
},
fakeElement: function( elem, options, callback, args ) {
var ret, name,
newElement = angular.element(elem).clone()[0];
for ( name in options ) {
newElement.style[ name ] = options[ name ];
}
angular.element(document.body).append(newElement);
ret = callback.call( newElement, newElement );
angular.element(newElement).remove();
return ret;
},
/**
* @ngdoc method
* @name normalizeWheelEvent
* @methodOf ui.grid.service:GridUtil
*
* @param {event} event A mouse wheel event
*
* @returns {event} A normalized event
*
* @description
* Given an event from this list:
*
* `wheel, mousewheel, DomMouseScroll, MozMousePixelScroll`
*
* "normalize" it
* so that it stays consistent no matter what browser it comes from (i.e. scale it correctly and make sure the direction is right.)
*/
normalizeWheelEvent: function (event) {
// var toFix = ['wheel', 'mousewheel', 'DOMMouseScroll', 'MozMousePixelScroll'];
// var toBind = 'onwheel' in document || document.documentMode >= 9 ? ['wheel'] : ['mousewheel', 'DomMouseScroll', 'MozMousePixelScroll'];
var lowestDelta, lowestDeltaXY;
var orgEvent = event || window.event,
args = [].slice.call(arguments, 1),
delta = 0,
deltaX = 0,
deltaY = 0,
absDelta = 0,
absDeltaXY = 0,
fn;
// event = $.event.fix(orgEvent);
// event.type = 'mousewheel';
// NOTE: jQuery masks the event and stores it in the event as originalEvent
if (orgEvent.originalEvent) {
orgEvent = orgEvent.originalEvent;
}
// Old school scrollwheel delta
if ( orgEvent.wheelDelta ) { delta = orgEvent.wheelDelta; }
if ( orgEvent.detail ) { delta = orgEvent.detail * -1; }
// At a minimum, setup the deltaY to be delta
deltaY = delta;
// Firefox < 17 related to DOMMouseScroll event
if ( orgEvent.axis !== undefined && orgEvent.axis === orgEvent.HORIZONTAL_AXIS ) {
deltaY = 0;
deltaX = delta * -1;
}
// New school wheel delta (wheel event)
if ( orgEvent.deltaY ) {
deltaY = orgEvent.deltaY * -1;
delta = deltaY;
}
if ( orgEvent.deltaX ) {
deltaX = orgEvent.deltaX;
delta = deltaX * -1;
}
// Webkit
if ( orgEvent.wheelDeltaY !== undefined ) { deltaY = orgEvent.wheelDeltaY; }
if ( orgEvent.wheelDeltaX !== undefined ) { deltaX = orgEvent.wheelDeltaX; }
// Look for lowest delta to normalize the delta values
absDelta = Math.abs(delta);
if ( !lowestDelta || absDelta < lowestDelta ) { lowestDelta = absDelta; }
absDeltaXY = Math.max(Math.abs(deltaY), Math.abs(deltaX));
if ( !lowestDeltaXY || absDeltaXY < lowestDeltaXY ) { lowestDeltaXY = absDeltaXY; }
// Get a whole value for the deltas
fn = delta > 0 ? 'floor' : 'ceil';
delta = Math[fn](delta / lowestDelta);
deltaX = Math[fn](deltaX / lowestDeltaXY);
deltaY = Math[fn](deltaY / lowestDeltaXY);
return {
delta: delta,
deltaX: deltaX,
deltaY: deltaY
};
},
// Stolen from Modernizr
// TODO: make this, and everythign that flows from it, robust
//http://www.stucox.com/blog/you-cant-detect-a-touchscreen/
isTouchEnabled: function() {
var bool;
if (('ontouchstart' in $window) || $window.DocumentTouch && $document instanceof DocumentTouch) {
bool = true;
}
return bool;
},
isNullOrUndefined: function(obj) {
if (obj === undefined || obj === null) {
return true;
}
return false;
},
endsWith: function(str, suffix) {
if (!str || !suffix || typeof str !== "string") {
return false;
}
return str.indexOf(suffix, str.length - suffix.length) !== -1;
},
arrayContainsObjectWithProperty: function(array, propertyName, propertyValue) {
var found = false;
angular.forEach(array, function (object) {
if (object[propertyName] === propertyValue) {
found = true;
}
});
return found;
},
//// Shim requestAnimationFrame
//requestAnimationFrame: $window.requestAnimationFrame && $window.requestAnimationFrame.bind($window) ||
// $window.webkitRequestAnimationFrame && $window.webkitRequestAnimationFrame.bind($window) ||
// function(fn) {
// return $timeout(fn, 10, false);
// },
numericAndNullSort: function (a, b) {
if (a === null) { return 1; }
if (b === null) { return -1; }
if (a === null && b === null) { return 0; }
return a - b;
},
// Disable ngAnimate animations on an element
disableAnimations: function (element) {
var $animate;
try {
$animate = $injector.get('$animate');
// See: http://brianhann.com/angular-1-4-breaking-changes-to-be-aware-of/#animate
if (angular.version.major > 1 || (angular.version.major === 1 && angular.version.minor >= 4)) {
$animate.enabled(element, false);
} else {
$animate.enabled(false, element);
}
}
catch (e) {}
},
enableAnimations: function (element) {
var $animate;
try {
$animate = $injector.get('$animate');
// See: http://brianhann.com/angular-1-4-breaking-changes-to-be-aware-of/#animate
if (angular.version.major > 1 || (angular.version.major === 1 && angular.version.minor >= 4)) {
$animate.enabled(element, true);
} else {
$animate.enabled(true, element);
}
return $animate;
}
catch (e) {}
},
// Blatantly stolen from Angular as it isn't exposed (yet. 2.0 maybe?)
nextUid: function nextUid() {
var index = uid.length;
var digit;
while (index) {
index--;
digit = uid[index].charCodeAt(0);
if (digit === 57 /*'9'*/) {
uid[index] = 'A';
return uidPrefix + uid.join('');
}
if (digit === 90 /*'Z'*/) {
uid[index] = '0';
} else {
uid[index] = String.fromCharCode(digit + 1);
return uidPrefix + uid.join('');
}
}
uid.unshift('0');
return uidPrefix + uid.join('');
},
// Blatantly stolen from Angular as it isn't exposed (yet. 2.0 maybe?)
hashKey: function hashKey(obj) {
var objType = typeof obj,
key;
if (objType === 'object' && obj !== null) {
if (typeof (key = obj.$$hashKey) === 'function') {
// must invoke on object to keep the right this
key = obj.$$hashKey();
}
else if (typeof(obj.$$hashKey) !== 'undefined' && obj.$$hashKey) {
key = obj.$$hashKey;
}
else if (key === undefined) {
key = obj.$$hashKey = s.nextUid();
}
}
else {
key = obj;
}
return objType + ':' + key;
},
resetUids: function () {
uid = ['0', '0', '0'];
},
/**
* @ngdoc method
* @methodOf ui.grid.service:GridUtil
* @name logError
* @description wraps the $log method, allowing us to choose different
* treatment within ui-grid if we so desired. At present we only log
* error messages if uiGridConstants.LOG_ERROR_MESSAGES is set to true
* @param {string} logMessage message to be logged to the console
*
*/
logError: function( logMessage ){
if ( uiGridConstants.LOG_ERROR_MESSAGES ){
$log.error( logMessage );
}
},
/**
* @ngdoc method
* @methodOf ui.grid.service:GridUtil
* @name logWarn
* @description wraps the $log method, allowing us to choose different
* treatment within ui-grid if we so desired. At present we only log
* warning messages if uiGridConstants.LOG_WARN_MESSAGES is set to true
* @param {string} logMessage message to be logged to the console
*
*/
logWarn: function( logMessage ){
if ( uiGridConstants.LOG_WARN_MESSAGES ){
$log.warn( logMessage );
}
},
/**
* @ngdoc method
* @methodOf ui.grid.service:GridUtil
* @name logDebug
* @description wraps the $log method, allowing us to choose different
* treatment within ui-grid if we so desired. At present we only log
* debug messages if uiGridConstants.LOG_DEBUG_MESSAGES is set to true
*
*/
logDebug: function() {
if ( uiGridConstants.LOG_DEBUG_MESSAGES ){
$log.debug.apply($log, arguments);
}
}
};
/**
* @ngdoc object
* @name focus
* @propertyOf ui.grid.service:GridUtil
* @description Provies a set of methods to set the document focus inside the grid.
* See {@link ui.grid.service:GridUtil.focus} for more information.
*/
/**
* @ngdoc object
* @name ui.grid.service:GridUtil.focus
* @description Provies a set of methods to set the document focus inside the grid.
* Timeouts are utilized to ensure that the focus is invoked after any other event has been triggered.
* e.g. click events that need to run before the focus or
* inputs elements that are in a disabled state but are enabled when those events
* are triggered.
*/
s.focus = {
queue: [],
//http://stackoverflow.com/questions/25596399/set-element-focus-in-angular-way
/**
* @ngdoc method
* @methodOf ui.grid.service:GridUtil.focus
* @name byId
* @description Sets the focus of the document to the given id value.
* If provided with the grid object it will automatically append the grid id.
* This is done to encourage unique dom id's as it allows for multiple grids on a
* page.
* @param {String} id the id of the dom element to set the focus on
* @param {Object=} Grid the grid object for this grid instance. See: {@link ui.grid.class:Grid}
* @param {Number} Grid.id the unique id for this grid. Already set on an initialized grid object.
* @returns {Promise} The `$timeout` promise that will be resolved once focus is set. If another focus is requested before this request is evaluated.
* then the promise will fail with the `'canceled'` reason.
*/
byId: function (id, Grid) {
this._purgeQueue();
var promise = $timeout(function() {
var elementID = (Grid && Grid.id ? Grid.id + '-' : '') + id;
var element = $window.document.getElementById(elementID);
if (element) {
element.focus();
} else {
s.logWarn('[focus.byId] Element id ' + elementID + ' was not found.');
}
}, 0, false);
this.queue.push(promise);
return promise;
},
/**
* @ngdoc method
* @methodOf ui.grid.service:GridUtil.focus
* @name byElement
* @description Sets the focus of the document to the given dom element.
* @param {(element|angular.element)} element the DOM element to set the focus on
* @returns {Promise} The `$timeout` promise that will be resolved once focus is set. If another focus is requested before this request is evaluated.
* then the promise will fail with the `'canceled'` reason.
*/
byElement: function(element){
if (!angular.isElement(element)){
s.logWarn("Trying to focus on an element that isn\'t an element.");
return $q.reject('not-element');
}
element = angular.element(element);
this._purgeQueue();
var promise = $timeout(function(){
if (element){
element[0].focus();
}
}, 0, false);
this.queue.push(promise);
return promise;
},
/**
* @ngdoc method
* @methodOf ui.grid.service:GridUtil.focus
* @name bySelector
* @description Sets the focus of the document to the given dom element.
* @param {(element|angular.element)} parentElement the parent/ancestor of the dom element that you are selecting using the query selector
* @param {String} querySelector finds the dom element using the {@link http://www.w3schools.com/jsref/met_document_queryselector.asp querySelector}
* @param {boolean} [aSync=false] If true then the selector will be querried inside of a timeout. Otherwise the selector will be querried imidately
* then the focus will be called.
* @returns {Promise} The `$timeout` promise that will be resolved once focus is set. If another focus is requested before this request is evaluated.
* then the promise will fail with the `'canceled'` reason.
*/
bySelector: function(parentElement, querySelector, aSync){
var self = this;
if (!angular.isElement(parentElement)){
throw new Error("The parent element is not an element.");
}
// Ensure that this is an angular element.
// It is fine if this is already an angular element.
parentElement = angular.element(parentElement);
var focusBySelector = function(){
var element = parentElement[0].querySelector(querySelector);
return self.byElement(element);
};
this._purgeQueue();
if (aSync){ //Do this asynchronysly
var promise = $timeout(focusBySelector, 0, false);
this.queue.push($timeout(focusBySelector), 0, false);
return promise;
} else {
return focusBySelector();
}
},
_purgeQueue: function(){
this.queue.forEach(function(element){
$timeout.cancel(element);
});
this.queue = [];
}
};
['width', 'height'].forEach(function (name) {
var capsName = angular.uppercase(name.charAt(0)) + name.substr(1);
s['element' + capsName] = function (elem, extra) {
var e = elem;
if (e && typeof(e.length) !== 'undefined' && e.length) {
e = elem[0];
}
if (e && e !== null) {
var styles = getStyles(e);
return e.offsetWidth === 0 && rdisplayswap.test(styles.display) ?
s.swap(e, cssShow, function() {
return getWidthOrHeight(e, name, extra );
}) :
getWidthOrHeight( e, name, extra );
}
else {
return null;
}
};
s['outerElement' + capsName] = function (elem, margin) {
return elem ? s['element' + capsName].call(this, elem, margin ? 'margin' : 'border') : null;
};
});
// http://stackoverflow.com/a/24107550/888165
s.closestElm = function closestElm(el, selector) {
if (typeof(el.length) !== 'undefined' && el.length) {
el = el[0];
}
var matchesFn;
// find vendor prefix
['matches','webkitMatchesSelector','mozMatchesSelector','msMatchesSelector','oMatchesSelector'].some(function(fn) {
if (typeof document.body[fn] === 'function') {
matchesFn = fn;
return true;
}
return false;
});
// traverse parents
var parent;
while (el !== null) {
parent = el.parentElement;
if (parent !== null && parent[matchesFn](selector)) {
return parent;
}
el = parent;
}
return null;
};
s.type = function (obj) {
var text = Function.prototype.toString.call(obj.constructor);
return text.match(/function (.*?)\(/)[1];
};
s.getBorderSize = function getBorderSize(elem, borderType) {
if (typeof(elem.length) !== 'undefined' && elem.length) {
elem = elem[0];
}
var styles = getStyles(elem);
// If a specific border is supplied, like 'top', read the 'borderTop' style property
if (borderType) {
borderType = 'border' + borderType.charAt(0).toUpperCase() + borderType.slice(1);
}
else {
borderType = 'border';
}
borderType += 'Width';
var val = parseInt(styles[borderType], 10);
if (isNaN(val)) {
return 0;
}
else {
return val;
}
};
// http://stackoverflow.com/a/22948274/888165
// TODO: Opera? Mobile?
s.detectBrowser = function detectBrowser() {
var userAgent = $window.navigator.userAgent;
var browsers = {chrome: /chrome/i, safari: /safari/i, firefox: /firefox/i, ie: /internet explorer|trident\//i};
for (var key in browsers) {
if (browsers[key].test(userAgent)) {
return key;
}
}
return 'unknown';
};
// Borrowed from https://github.com/othree/jquery.rtl-scroll-type
// Determine the scroll "type" this browser is using for RTL
s.rtlScrollType = function rtlScrollType() {
if (rtlScrollType.type) {
return rtlScrollType.type;
}
var definer = angular.element('<div dir="rtl" style="font-size: 14px; width: 1px; height: 1px; position: absolute; top: -1000px; overflow: scroll">A</div>')[0],
type = 'reverse';
document.body.appendChild(definer);
if (definer.scrollLeft > 0) {
type = 'default';
}
else {
definer.scrollLeft = 1;
if (definer.scrollLeft === 0) {
type = 'negative';
}
}
angular.element(definer).remove();
rtlScrollType.type = type;
return type;
};
/**
* @ngdoc method
* @name normalizeScrollLeft
* @methodOf ui.grid.service:GridUtil
*
* @param {element} element The element to get the `scrollLeft` from.
* @param {grid} grid - grid used to normalize (uses the rtl property)
*
* @returns {number} A normalized scrollLeft value for the current browser.
*
* @description
* Browsers currently handle RTL in different ways, resulting in inconsistent scrollLeft values. This method normalizes them
*/
s.normalizeScrollLeft = function normalizeScrollLeft(element, grid) {
if (typeof(element.length) !== 'undefined' && element.length) {
element = element[0];
}
var scrollLeft = element.scrollLeft;
if (grid.isRTL()) {
switch (s.rtlScrollType()) {
case 'default':
return element.scrollWidth - scrollLeft - element.clientWidth;
case 'negative':
return Math.abs(scrollLeft);
case 'reverse':
return scrollLeft;
}
}
return scrollLeft;
};
/**
* @ngdoc method
* @name denormalizeScrollLeft
* @methodOf ui.grid.service:GridUtil
*
* @param {element} element The element to normalize the `scrollLeft` value for
* @param {number} scrollLeft The `scrollLeft` value to denormalize.
* @param {grid} grid The grid that owns the scroll event.
*
* @returns {number} A normalized scrollLeft value for the current browser.
*
* @description
* Browsers currently handle RTL in different ways, resulting in inconsistent scrollLeft values. This method denormalizes a value for the current browser.
*/
s.denormalizeScrollLeft = function denormalizeScrollLeft(element, scrollLeft, grid) {
if (typeof(element.length) !== 'undefined' && element.length) {
element = element[0];
}
if (grid.isRTL()) {
switch (s.rtlScrollType()) {
case 'default':
// Get the max scroll for the element
var maxScrollLeft = element.scrollWidth - element.clientWidth;
// Subtract the current scroll amount from the max scroll
return maxScrollLeft - scrollLeft;
case 'negative':
return scrollLeft * -1;
case 'reverse':
return scrollLeft;
}
}
return scrollLeft;
};
/**
* @ngdoc method
* @name preEval
* @methodOf ui.grid.service:GridUtil
*
* @param {string} path Path to evaluate
*
* @returns {string} A path that is normalized.
*
* @description
* Takes a field path and converts it to bracket notation to allow for special characters in path
* @example
* <pre>
* gridUtil.preEval('property') == 'property'
* gridUtil.preEval('nested.deep.prop-erty') = "nested['deep']['prop-erty']"
* </pre>
*/
s.preEval = function (path) {
var m = uiGridConstants.BRACKET_REGEXP.exec(path);
if (m) {
return (m[1] ? s.preEval(m[1]) : m[1]) + m[2] + (m[3] ? s.preEval(m[3]) : m[3]);
} else {
path = path.replace(uiGridConstants.APOS_REGEXP, '\\\'');
var parts = path.split(uiGridConstants.DOT_REGEXP);
var preparsed = [parts.shift()]; // first item must be var notation, thus skip
angular.forEach(parts, function (part) {
preparsed.push(part.replace(uiGridConstants.FUNC_REGEXP, '\']$1'));
});
return preparsed.join('[\'');
}
};
/**
* @ngdoc method
* @name debounce
* @methodOf ui.grid.service:GridUtil
*
* @param {function} func function to debounce
* @param {number} wait milliseconds to delay
* @param {boolean} immediate execute before delay
*
* @returns {function} A function that can be executed as debounced function
*
* @description
* Copied from https://github.com/shahata/angular-debounce
* Takes a function, decorates it to execute only 1 time after multiple calls, and returns the decorated function
* @example
* <pre>
* var debouncedFunc = gridUtil.debounce(function(){alert('debounced');}, 500);
* debouncedFunc();
* debouncedFunc();
* debouncedFunc();
* </pre>
*/
s.debounce = function (func, wait, immediate) {
var timeout, args, context, result;
function debounce() {
/* jshint validthis:true */
context = this;
args = arguments;
var later = function () {
timeout = null;
if (!immediate) {
result = func.apply(context, args);
}
};
var callNow = immediate && !timeout;
if (timeout) {
$timeout.cancel(timeout);
}
timeout = $timeout(later, wait, false);
if (callNow) {
result = func.apply(context, args);
}
return result;
}
debounce.cancel = function () {
$timeout.cancel(timeout);
timeout = null;
};
return debounce;
};
/**
* @ngdoc method
* @name throttle
* @methodOf ui.grid.service:GridUtil
*
* @param {function} func function to throttle
* @param {number} wait milliseconds to delay after first trigger
* @param {Object} params to use in throttle.
*
* @returns {function} A function that can be executed as throttled function
*
* @description
* Adapted from debounce function (above)
* Potential keys for Params Object are:
* trailing (bool) - whether to trigger after throttle time ends if called multiple times
* Updated to use $interval rather than $timeout, as protractor (e2e tests) is able to work with $interval,
* but not with $timeout
*
* Note that when using throttle, you need to use throttle to create a new function upfront, then use the function
* return from that call each time you need to call throttle. If you call throttle itself repeatedly, the lastCall
* variable will get overwritten and the throttling won't work
*
* @example
* <pre>
* var throttledFunc = gridUtil.throttle(function(){console.log('throttled');}, 500, {trailing: true});
* throttledFunc(); //=> logs throttled
* throttledFunc(); //=> queues attempt to log throttled for ~500ms (since trailing param is truthy)
* throttledFunc(); //=> updates arguments to keep most-recent request, but does not do anything else.
* </pre>
*/
s.throttle = function(func, wait, options){
options = options || {};
var lastCall = 0, queued = null, context, args;
function runFunc(endDate){
lastCall = +new Date();
func.apply(context, args);
$interval(function(){queued = null; }, 0, 1, false);
}
return function(){
/* jshint validthis:true */
context = this;
args = arguments;
if (queued === null){
var sinceLast = +new Date() - lastCall;
if (sinceLast > wait){
runFunc();
}
else if (options.trailing){
queued = $interval(runFunc, wait - sinceLast, 1, false);
}
}
};
};
s.on = {};
s.off = {};
s._events = {};
s.addOff = function (eventName) {
s.off[eventName] = function (elm, fn) {
var idx = s._events[eventName].indexOf(fn);
if (idx > 0) {
s._events[eventName].removeAt(idx);
}
};
};
var mouseWheeltoBind = ( 'onwheel' in document || document.documentMode >= 9 ) ? ['wheel'] : ['mousewheel', 'DomMouseScroll', 'MozMousePixelScroll'],
nullLowestDeltaTimeout,
lowestDelta;
s.on.mousewheel = function (elm, fn) {
if (!elm || !fn) { return; }
var $elm = angular.element(elm);
// Store the line height and page height for this particular element
$elm.data('mousewheel-line-height', getLineHeight($elm));
$elm.data('mousewheel-page-height', s.elementHeight($elm));
if (!$elm.data('mousewheel-callbacks')) { $elm.data('mousewheel-callbacks', {}); }
var cbs = $elm.data('mousewheel-callbacks');
cbs[fn] = (Function.prototype.bind || bindPolyfill).call(mousewheelHandler, $elm[0], fn);
// Bind all the mousew heel events
for ( var i = mouseWheeltoBind.length; i; ) {
$elm.on(mouseWheeltoBind[--i], cbs[fn]);
}
};
s.off.mousewheel = function (elm, fn) {
var $elm = angular.element(elm);
var cbs = $elm.data('mousewheel-callbacks');
var handler = cbs[fn];
if (handler) {
for ( var i = mouseWheeltoBind.length; i; ) {
$elm.off(mouseWheeltoBind[--i], handler);
}
}
delete cbs[fn];
if (Object.keys(cbs).length === 0) {
$elm.removeData('mousewheel-line-height');
$elm.removeData('mousewheel-page-height');
$elm.removeData('mousewheel-callbacks');
}
};
function mousewheelHandler(fn, event) {
var $elm = angular.element(this);
var delta = 0,
deltaX = 0,
deltaY = 0,
absDelta = 0,
offsetX = 0,
offsetY = 0;
// jQuery masks events
if (event.originalEvent) { event = event.originalEvent; }
if ( 'detail' in event ) { deltaY = event.detail * -1; }
if ( 'wheelDelta' in event ) { deltaY = event.wheelDelta; }
if ( 'wheelDeltaY' in event ) { deltaY = event.wheelDeltaY; }
if ( 'wheelDeltaX' in event ) { deltaX = event.wheelDeltaX * -1; }
// Firefox < 17 horizontal scrolling related to DOMMouseScroll event
if ( 'axis' in event && event.axis === event.HORIZONTAL_AXIS ) {
deltaX = deltaY * -1;
deltaY = 0;
}
// Set delta to be deltaY or deltaX if deltaY is 0 for backwards compatabilitiy
delta = deltaY === 0 ? deltaX : deltaY;
// New school wheel delta (wheel event)
if ( 'deltaY' in event ) {
deltaY = event.deltaY * -1;
delta = deltaY;
}
if ( 'deltaX' in event ) {
deltaX = event.deltaX;
if ( deltaY === 0 ) { delta = deltaX * -1; }
}
// No change actually happened, no reason to go any further
if ( deltaY === 0 && deltaX === 0 ) { return; }
// Need to convert lines and pages to pixels if we aren't already in pixels
// There are three delta modes:
// * deltaMode 0 is by pixels, nothing to do
// * deltaMode 1 is by lines
// * deltaMode 2 is by pages
if ( event.deltaMode === 1 ) {
var lineHeight = $elm.data('mousewheel-line-height');
delta *= lineHeight;
deltaY *= lineHeight;
deltaX *= lineHeight;
}
else if ( event.deltaMode === 2 ) {
var pageHeight = $elm.data('mousewheel-page-height');
delta *= pageHeight;
deltaY *= pageHeight;
deltaX *= pageHeight;
}
// Store lowest absolute delta to normalize the delta values
absDelta = Math.max( Math.abs(deltaY), Math.abs(deltaX) );
if ( !lowestDelta || absDelta < lowestDelta ) {
lowestDelta = absDelta;
// Adjust older deltas if necessary
if ( shouldAdjustOldDeltas(event, absDelta) ) {
lowestDelta /= 40;
}
}
// Get a whole, normalized value for the deltas
delta = Math[ delta >= 1 ? 'floor' : 'ceil' ](delta / lowestDelta);
deltaX = Math[ deltaX >= 1 ? 'floor' : 'ceil' ](deltaX / lowestDelta);
deltaY = Math[ deltaY >= 1 ? 'floor' : 'ceil' ](deltaY / lowestDelta);
// Normalise offsetX and offsetY properties
// if ($elm[0].getBoundingClientRect ) {
// var boundingRect = $(elm)[0].getBoundingClientRect();
// offsetX = event.clientX - boundingRect.left;
// offsetY = event.clientY - boundingRect.top;
// }
// event.deltaX = deltaX;
// event.deltaY = deltaY;
// event.deltaFactor = lowestDelta;
var newEvent = {
originalEvent: event,
deltaX: deltaX,
deltaY: deltaY,
deltaFactor: lowestDelta,
preventDefault: function () { event.preventDefault(); },
stopPropagation: function () { event.stopPropagation(); }
};
// Clearout lowestDelta after sometime to better
// handle multiple device types that give
// a different lowestDelta
// Ex: trackpad = 3 and mouse wheel = 120
if (nullLowestDeltaTimeout) { clearTimeout(nullLowestDeltaTimeout); }
nullLowestDeltaTimeout = setTimeout(nullLowestDelta, 200);
fn.call($elm[0], newEvent);
}
function nullLowestDelta() {
lowestDelta = null;
}
function shouldAdjustOldDeltas(orgEvent, absDelta) {
// If this is an older event and the delta is divisable by 120,
// then we are assuming that the browser is treating this as an
// older mouse wheel event and that we should divide the deltas
// by 40 to try and get a more usable deltaFactor.
// Side note, this actually impacts the reported scroll distance
// in older browsers and can cause scrolling to be slower than native.
// Turn this off by setting $.event.special.mousewheel.settings.adjustOldDeltas to false.
return orgEvent.type === 'mousewheel' && absDelta % 120 === 0;
}
return s;
}]);
// Add 'px' to the end of a number string if it doesn't have it already
module.filter('px', function() {
return function(str) {
if (str.match(/^[\d\.]+$/)) {
return str + 'px';
}
else {
return str;
}
};
});
})();
| 1 | 11,759 | this looks like it may be something that needs additional refactoring | angular-ui-ui-grid | js |
@@ -300,7 +300,7 @@ class Resource(object):
for resource in yielder.iter():
res = resource
new_stack = stack + [self]
-
+
# Parallelization for resource subtrees.
if res.should_dispatch():
callback = partial(res.try_accept, visitor, new_stack) | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crawler implementation for gcp resources."""
# pylint: disable=too-many-lines, no-self-use, bad-docstring-quotes
import ctypes
from functools import partial
import json
import os
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.services.inventory.base.gcp import (
ResourceNotSupported)
from google.cloud.forseti.services.inventory.base import iam_helpers
LOGGER = logger.get_logger(__name__)
def size_t_hash(key):
"""Hash the key using size_t.
Args:
key (str): The key to hash.
Returns:
str: The hashed key.
"""
return '%u' % ctypes.c_size_t(hash(key)).value
def from_root_id(client, root_id):
"""Start the crawling from root if the root type is supported.
Args:
client (object): GCP API client.
root_id (str): id of the root.
Returns:
Resource: the root resource instance.
Raises:
Exception: Unsupported root id.
"""
root_map = {
'organizations': ResourceManagerOrganization.fetch,
'projects': ResourceManagerProject.fetch,
'folders': ResourceManagerFolder.fetch,
}
for prefix, func in root_map.iteritems():
if root_id.startswith(prefix):
return func(client, root_id)
raise Exception(
'Unsupported root id, must be one of {}'.format(
','.join(root_map.keys())))
def cached(field_name):
"""Decorator to perform caching.
Args:
field_name (str): The name of the attribute to cache.
Returns:
wrapper: Function wrapper to perform caching.
"""
field_name = '__cached_{}'.format(field_name)
def _cached(f):
"""Cache wrapper.
Args:
f (func): function to be decorated.
Returns:
wrapper: Function wrapper to perform caching.
"""
def wrapper(*args, **kwargs):
"""Function wrapper to perform caching.
Args:
*args: args to be passed to the function.
**kwargs: kwargs to be passed to the function.
Returns:
object: Results of executing f.
"""
if hasattr(args[0], field_name):
return getattr(args[0], field_name)
result = f(*args, **kwargs)
setattr(args[0], field_name, result)
return result
return wrapper
return _cached
class ResourceFactory(object):
"""ResourceFactory for visitor pattern."""
def __init__(self, attributes):
"""Initialize.
Args:
attributes (dict): attributes for a specific type of resource.
"""
self.attributes = attributes
def create_new(self, data, root=False):
"""Create a new instance of a Resource type.
Args:
data (str): raw data.
root (Resource): root of this resource.
Returns:
Resource: Resource instance.
"""
attrs = self.attributes
cls = attrs['cls']
return cls(data, root, **attrs)
# pylint: disable=too-many-instance-attributes, too-many-public-methods
class Resource(object):
"""The base Resource class."""
def __init__(self, data, root=False, contains=None, **kwargs):
"""Initialize.
Args:
data (str): raw data.
root (Resource): the root of this crawling.
contains (list): child types to crawl.
**kwargs (dict): arguments.
"""
del kwargs # Unused.
self._data = data
self._root = root
self._stack = None
self._visitor = None
self._contains = [] if contains is None else contains
self._warning = []
self._timestamp = self._utcnow()
self._inventory_key = None
@staticmethod
def _utcnow():
"""Wrapper for datetime.datetime.now() injection.
Returns:
datatime: the datetime.
"""
return date_time.get_utc_now_datetime()
def __getitem__(self, key):
"""Get Item.
Args:
key (str): key of this resource.
Returns:
str: data of this resource.
Raises:
KeyError: 'key: {}, data: {}'
"""
try:
return self._data[key]
except KeyError:
raise KeyError('key: {}, data: {}'.format(key, self._data))
def __setitem__(self, key, value):
"""Set the value of an item.
Args:
key (str): key of this resource.
value (str): value to set on this resource.
"""
self._data[key] = value
def set_inventory_key(self, key):
"""Set the inventory unique id for the resource.
Args:
key (int): The unique id for the resource from the storage.
"""
self._inventory_key = key
def inventory_key(self):
"""Gets the inventory key for this resource, if set.
Returns:
int: The unique id for the resource in storage.
"""
return self._inventory_key
@staticmethod
def type():
"""Get type of this resource.
Raises:
NotImplementedError: method not implemented.
"""
raise NotImplementedError()
def data(self):
"""Get data on this resource.
Returns:
str: raw data.
"""
return self._data
def parent(self):
"""Get parent of this resource.
Returns:
Resource: parent of this resource.
"""
if self._root:
return self
try:
return self._stack[-1]
except IndexError:
return None
def key(self):
"""Get key of this resource.
Raises:
NotImplementedError: key method not implemented.
"""
raise NotImplementedError('Class: {}'.format(self.__class__.__name__))
def add_warning(self, warning):
"""Add warning on this resource.
Args:
warning (str): warning to be added.
"""
self._warning.append(str(warning))
def get_warning(self):
"""Get warning on this resource.
Returns:
str: warning message.
"""
return '\n'.join(self._warning)
# pylint: disable=broad-except
def try_accept(self, visitor, stack=None):
"""Handle exceptions on the call the accept.
Args:
visitor (object): The class implementing the visitor pattern.
stack (list): The resource stack from the root to immediate parent
of this resource.
"""
try:
self.accept(visitor, stack)
except Exception as e:
LOGGER.exception(e)
self.parent().add_warning(e)
visitor.update(self.parent())
visitor.on_child_error(e)
def accept(self, visitor, stack=None):
"""Accept of resource in visitor pattern.
Args:
visitor (Crawler): visitor instance.
stack (list): resource hierarchy stack.
"""
stack = [] if not stack else stack
self._stack = stack
self._visitor = visitor
visitor.visit(self)
for yielder_cls in self._contains:
yielder = yielder_cls(self, visitor.get_client())
try:
for resource in yielder.iter():
res = resource
new_stack = stack + [self]
# Parallelization for resource subtrees.
if res.should_dispatch():
callback = partial(res.try_accept, visitor, new_stack)
visitor.dispatch(callback)
else:
res.try_accept(visitor, new_stack)
except Exception as e:
LOGGER.exception(e)
self.add_warning(e)
visitor.on_child_error(e)
if self._warning:
visitor.update(self)
# pylint: enable=broad-except
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('gcs_policy')
def get_gcs_policy(self, client=None):
"""Get gcs policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('sql_policy')
def get_cloudsql_policy(self, client=None):
"""Get cloudsql policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('dataset_policy')
def get_dataset_policy(self, client=None):
"""Get dataset policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('group_members')
def get_group_members(self, client=None):
"""Get group member template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('billing_info')
def get_billing_info(self, client=None):
"""Get billing info template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('enabled_apis')
def get_enabled_apis(self, client=None):
"""Get enabled apis template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('service_config')
def get_kubernetes_service_config(self, client=None):
"""Get kubernetes service config method template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
def get_timestamp(self):
"""Template for timestamp when the resource object.
Returns:
str: a string timestamp when the resource object was created.
"""
return self._timestamp.strftime(string_formats.TIMESTAMP_UTC_OFFSET)
def stack(self):
"""Get resource hierarchy stack of this resource.
Returns:
list: resource hierarchy stack of this resource.
Raises:
Exception: 'Stack not initialized yet'.
"""
if self._stack is None:
raise Exception('Stack not initialized yet')
return self._stack
def visitor(self):
"""Get visitor on this resource.
Returns:
Crawler: visitor on this resource.
Raises:
Exception: 'Visitor not initialized yet'.
"""
if self._visitor is None:
raise Exception('Visitor not initialized yet')
return self._visitor
def should_dispatch(self):
"""Whether resources should run in parallel threads.
Returns:
bool: whether this resource should run in parallel threads.
"""
return False
def __repr__(self):
"""String Representation.
Returns:
str: Resource representation.
"""
return ('{}<data="{}", parent_resource_type="{}", '
'parent_resource_id="{}">').format(
self.__class__.__name__,
json.dumps(self._data, sort_keys=True),
self.parent().type(),
self.parent().key())
# pylint: enable=too-many-instance-attributes, too-many-public-methods
def resource_class_factory(resource_type, key_field, hash_key=False):
"""Factory function to generate Resource subclasses.
Args:
resource_type (str): The static resource type for this subclass.
key_field (str): The field in the resource data to use as the resource
unique key.
hash_key (bool): If true, use a hash of the key field data instead of
the value of the key field.
Returns:
class: A new class object.
"""
class ResourceSubclass(Resource):
"""Subclass of Resource."""
@staticmethod
def type():
"""Get type of this resource.
Returns:
str: The static resource type for this subclass.
"""
return resource_type
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
if hash_key:
# Resource does not have a globally unique ID, use size_t hash
# of key data.
return size_t_hash(self[key_field])
return self[key_field]
return ResourceSubclass
# Resource Manager resource classes
class ResourceManagerOrganization(resource_class_factory('organization', None)):
"""The Resource implementation for Organization."""
@classmethod
def fetch(cls, client, resource_key):
"""Get Organization.
Saves ApiExecutionErrors as warnings.
Args:
client (object): GCP API client.
resource_key (str): resource key to fetch.
Returns:
Organization: Organization resource.
"""
try:
data = client.fetch_crm_organization(resource_key)
return FACTORIES['organization'].create_new(data, root=True)
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Unable to fetch Organization %s: %s', resource_key, e)
data = {'name': resource_key}
resource = FACTORIES['organization'].create_new(data, root=True)
resource.add_warning(e)
return resource
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this organization.
Args:
client (object): GCP API client.
Returns:
dict: organization IAM Policy.
"""
try:
return client.fetch_crm_organization_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
def has_directory_resource_id(self):
"""Whether this organization has a directoryCustomerId.
Returns:
bool: True if the data exists, else False.
"""
return ('owner' in self._data and
'directoryCustomerId' in self['owner'])
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return self['name'].split('/', 1)[-1]
class ResourceManagerOrgPolicy(resource_class_factory('crm_org_policy', None)):
"""The Resource implementation for Resource Manager Organization Policy."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource
"""
unique_key = '/'.join([self.parent().type(),
self.parent().key(),
self['constraint']])
return '%u' % ctypes.c_size_t(hash(unique_key)).value
class ResourceManagerFolder(resource_class_factory('folder', None)):
"""The Resource implementation for Folder."""
@classmethod
def fetch(cls, client, resource_key):
"""Get Folder.
Args:
client (object): GCP API client.
resource_key (str): resource key to fetch.
Returns:
Folder: Folder resource.
"""
try:
data = client.fetch_crm_folder(resource_key)
return FACTORIES['folder'].create_new(data, root=True)
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Unable to fetch Folder %s: %s', resource_key, e)
data = {'name': resource_key}
resource = FACTORIES['folder'].create_new(data, root=True)
resource.add_warning(e)
return resource
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return self['name'].split('/', 1)[-1]
def should_dispatch(self):
"""Folder resources should run in parallel threads.
Returns:
bool: whether folder resources should run in parallel threads.
"""
return True
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this folder.
Args:
client (object): GCP API client.
Returns:
dict: Folder IAM Policy.
"""
try:
return client.fetch_crm_folder_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
class ResourceManagerProject(resource_class_factory('project', 'projectId')):
"""The Resource implementation for Project."""
def __init__(self, data, root=False, contains=None, **kwargs):
"""Initialize.
Args:
data (str): raw data.
root (Resource): the root of this crawling.
contains (list): child types to crawl.
**kwargs (dict): arguments.
"""
super(ResourceManagerProject, self).__init__(data, root, contains,
**kwargs)
self._enabled_service_names = None
@classmethod
def fetch(cls, client, resource_key):
"""Get Project.
Args:
client (object): GCP API client.
resource_key (str): resource key to fetch.
Returns:
Project: created project.
"""
try:
project_number = resource_key.split('/', 1)[-1]
data = client.fetch_crm_project(project_number)
return FACTORIES['project'].create_new(data, root=True)
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Unable to fetch Project %s: %s', resource_key, e)
data = {'name': resource_key}
resource = FACTORIES['project'].create_new(data, root=True)
resource.add_warning(e)
return resource
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this project.
Args:
client (object): GCP API client.
Returns:
dict: Project IAM Policy.
"""
if self.enumerable():
try:
return client.fetch_crm_project_iam_policy(
project_number=self['projectNumber'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
return {}
@cached('billing_info')
def get_billing_info(self, client=None):
"""Get billing info.
Args:
client (object): GCP API client.
Returns:
dict: Project Billing Info resource.
"""
if self.enumerable():
try:
return client.fetch_billing_project_info(
project_number=self['projectNumber'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get Billing Info: %s', e)
self.add_warning(e)
return None
return {}
@cached('enabled_apis')
def get_enabled_apis(self, client=None):
"""Get project enabled API services.
Args:
client (object): GCP API client.
Returns:
list: A list of ManagedService resource dicts.
"""
enabled_apis = []
if self.enumerable():
try:
enabled_apis = client.fetch_services_enabled_apis(
project_number=self['projectNumber'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get Enabled APIs: %s', e)
self.add_warning(e)
self._enabled_service_names = frozenset(
(api.get('serviceName') for api in enabled_apis))
return enabled_apis
def should_dispatch(self):
"""Project resources should run in parallel threads.
Returns:
bool: whether project resources should run in parallel threads.
"""
return True
def enumerable(self):
"""Check if this project is enumerable.
Returns:
bool: if this project is enumerable.
"""
return self['lifecycleState'] == 'ACTIVE'
def billing_enabled(self):
"""Check if billing is configured.
Returns:
bool: if billing is enabled on the project.
"""
if self.get_billing_info():
return self.get_billing_info().get('billingEnabled', False)
# If status is unknown, always return True so other APIs aren't blocked.
return True
def is_api_enabled(self, service_name):
"""Returns True if the API service is enabled on the project.
Args:
service_name (str): The API service name to check.
Returns:
bool: whether a service api is enabled
"""
if self._enabled_service_names:
return service_name in self._enabled_service_names
# If status is unknown, always return True so other APIs aren't blocked.
return True
def bigquery_api_enabled(self):
"""Check if the bigquery api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
# Bigquery API depends on billing being enabled
return (self.billing_enabled() and
self.is_api_enabled('bigquery-json.googleapis.com'))
def compute_api_enabled(self):
"""Check if the compute api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
# Compute API depends on billing being enabled
return (self.billing_enabled() and
self.is_api_enabled('compute.googleapis.com'))
def container_api_enabled(self):
"""Check if the container api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
# Compute API depends on billing being enabled
return (self.billing_enabled() and
self.is_api_enabled('container.googleapis.com'))
def storage_api_enabled(self):
"""whether storage api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
return self.is_api_enabled('storage-component.googleapis.com')
class ResourceManagerLien(resource_class_factory('lien', None)):
"""The Resource implementation for Resource Manager Lien."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource
"""
return self['name'].split('/')[-1]
# AppEngine resource classes
class AppEngineApp(resource_class_factory('appengine_app', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine App."""
class AppEngineService(resource_class_factory('appengine_service', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine Service."""
class AppEngineVersion(resource_class_factory('appengine_version', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine Version."""
class AppEngineInstance(resource_class_factory('appengine_instance', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine Instance."""
# Bigquery resource classes
class BigqueryDataSet(resource_class_factory('dataset', 'id')):
"""The Resource implementation for Bigquery DataSet."""
def _set_cache(self, field_name, value):
"""Manually set a cache value if it isn't already set.
Args:
field_name (str): The name of the attribute to cache.
value (str): The value to cache.
"""
field_name = '__cached_{}'.format(field_name)
if not hasattr(self, field_name) or getattr(self, field_name) is None:
setattr(self, field_name, value)
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""IAM policy for this Dataset.
Args:
client (object): GCP API client.
Returns:
dict: Dataset Policy.
"""
try:
iam_policy = client.fetch_bigquery_iam_policy(
self.parent()['projectId'],
self.parent()['projectNumber'],
self['datasetReference']['datasetId'])
dataset_policy = iam_helpers.convert_iam_to_bigquery_policy(
iam_policy)
self._set_cache('dataset_policy', dataset_policy)
return iam_policy
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get Dataset IAM Policy: %s', e)
self.add_warning(e)
return None
@cached('dataset_policy')
def get_dataset_policy(self, client=None):
"""Dataset policy for this Dataset.
Args:
client (object): GCP API client.
Returns:
dict: Dataset Policy.
"""
try:
dataset_policy = client.fetch_bigquery_dataset_policy(
self.parent()['projectId'],
self.parent()['projectNumber'],
self['datasetReference']['datasetId'])
iam_policy = iam_helpers.convert_bigquery_policy_to_iam(
dataset_policy, self.parent()['projectId'])
self._set_cache('iam_policy', iam_policy)
return dataset_policy
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get Dataset Policy: %s', e)
self.add_warning(e)
return None
# BigqueryTable resource classes
class BigqueryTable(resource_class_factory('bigquery_table', 'id')):
"""The Resource implementation for bigquery table."""
# Billing resource classes
class BillingAccount(resource_class_factory('billing_account', None)):
"""The Resource implementation for BillingAccount."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return self['name'].split('/', 1)[-1]
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this folder.
Args:
client (object): GCP API client.
Returns:
dict: Billing Account IAM Policy.
"""
try:
return client.fetch_billing_account_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
# CloudSQL resource classes
class CloudSqlInstance(resource_class_factory('cloudsqlinstance', 'name')):
"""The Resource implementation for CloudSQL Instance."""
# Compute Engine resource classes
class ComputeAutoscaler(resource_class_factory('compute_autoscaler', 'id')):
"""The Resource implementation for Compute Autoscaler."""
class ComputeBackendBucket(resource_class_factory('compute_backendbucket',
'id')):
"""The Resource implementation for Compute Backend Bucket."""
class ComputeBackendService(resource_class_factory('backendservice', 'id')):
"""The Resource implementation for Compute Backend Service."""
class ComputeDisk(resource_class_factory('disk', 'id')):
"""The Resource implementation for Compute Disk."""
class ComputeFirewall(resource_class_factory('firewall', 'id')):
"""The Resource implementation for Compute Firewall."""
class ComputeForwardingRule(resource_class_factory('forwardingrule', 'id')):
"""The Resource implementation for Compute Forwarding Rule."""
class ComputeHealthCheck(resource_class_factory('compute_healthcheck', 'id')):
"""The Resource implementation for Compute HealthCheck."""
class ComputeHttpHealthCheck(resource_class_factory('compute_httphealthcheck',
'id')):
"""The Resource implementation for Compute HTTP HealthCheck."""
class ComputeHttpsHealthCheck(resource_class_factory('compute_httpshealthcheck',
'id')):
"""The Resource implementation for Compute HTTPS HealthCheck."""
class ComputeImage(resource_class_factory('image', 'id')):
"""The Resource implementation for Compute Image."""
class ComputeInstance(resource_class_factory('instance', 'id')):
"""The Resource implementation for Compute Instance."""
class ComputeInstanceGroup(resource_class_factory('instancegroup', 'id')):
"""The Resource implementation for Compute InstanceGroup."""
class ComputeInstanceGroupManager(resource_class_factory('instancegroupmanager',
'id')):
"""The Resource implementation for Compute InstanceGroupManager."""
class ComputeInstanceTemplate(resource_class_factory('instancetemplate', 'id')):
"""The Resource implementation for Compute InstanceTemplate."""
class ComputeLicense(resource_class_factory('compute_license', 'id')):
"""The Resource implementation for Compute License."""
class ComputeNetwork(resource_class_factory('network', 'id')):
"""The Resource implementation for Compute Network."""
class ComputeProject(resource_class_factory('compute_project', 'id')):
"""The Resource implementation for Compute Project."""
class ComputeRouter(resource_class_factory('compute_router', 'id')):
"""The Resource implementation for Compute Router."""
class ComputeSnapshot(resource_class_factory('snapshot', 'id')):
"""The Resource implementation for Compute Snapshot."""
class ComputeSslCertificate(resource_class_factory('compute_sslcertificate',
'id')):
"""The Resource implementation for Compute SSL Certificate."""
class ComputeSubnetwork(resource_class_factory('subnetwork', 'id')):
"""The Resource implementation for Compute Subnetwork."""
class ComputeTargetHttpProxy(resource_class_factory('compute_targethttpproxy',
'id')):
"""The Resource implementation for Compute TargetHttpProxy."""
class ComputeTargetHttpsProxy(resource_class_factory('compute_targethttpsproxy',
'id')):
"""The Resource implementation for Compute TargetHttpsProxy."""
class ComputeTargetInstance(resource_class_factory('compute_targetinstance',
'id')):
"""The Resource implementation for Compute TargetInstance."""
class ComputeTargetPool(resource_class_factory('compute_targetpool', 'id')):
"""The Resource implementation for Compute TargetPool."""
class ComputeTargetSslProxy(resource_class_factory('compute_targetsslproxy',
'id')):
"""The Resource implementation for Compute TargetSslProxy."""
class ComputeTargetTcpProxy(resource_class_factory('compute_targettcpproxy',
'id')):
"""The Resource implementation for Compute TargetTcpProxy."""
class ComputeTargetVpnGateway(resource_class_factory('compute_targetvpngateway',
'id')):
"""The Resource implementation for Compute TargetVpnGateway."""
class ComputeUrlMap(resource_class_factory('compute_urlmap', 'id')):
"""The Resource implementation for Compute UrlMap."""
class ComputeVpnTunnel(resource_class_factory('compute_vpntunnel', 'id')):
"""The Resource implementation for Compute VpnTunnel."""
# Cloud Dataproc resource classes
class DataprocCluster(resource_class_factory('dataproc_cluster',
'clusterUuid')):
"""The Resource implementation for Dataproc Cluster."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Dataproc Cluster IAM policy.
Args:
client (object): GCP API client.
Returns:
dict: Dataproc Cluster IAM policy.
"""
try:
# Dataproc resource does not contain a direct reference to the
# region name except in an embedded label.
region = self['labels']['goog-dataproc-location']
cluster = 'projects/{}/regions/{}/clusters/{}'.format(
self['projectId'], region, self['clusterName'])
return client.fetch_dataproc_cluster_iam_policy(cluster)
except (api_errors.ApiExecutionError,
ResourceNotSupported,
KeyError,
TypeError) as e:
if isinstance(e, TypeError):
e = 'Cluster has no labels.'
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning('Could not get IAM policy: %s' % e)
return None
# Cloud DNS resource classes
class DnsManagedZone(resource_class_factory('dns_managedzone', 'id')):
"""The Resource implementation for Cloud DNS ManagedZone."""
class DnsPolicy(resource_class_factory('dns_policy', 'id')):
"""The Resource implementation for Cloud DNS Policy."""
# IAM resource classes
class IamCuratedRole(resource_class_factory('role', 'name')):
"""The Resource implementation for IAM Curated Roles."""
def parent(self):
"""Curated roles have no parent."""
return None
class IamRole(resource_class_factory('role', 'name')):
"""The Resource implementation for IAM Roles."""
class IamServiceAccount(resource_class_factory('serviceaccount', 'uniqueId')):
"""The Resource implementation for IAM ServiceAccount."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Service Account IAM policy for this service account.
Args:
client (object): GCP API client.
Returns:
dict: Service Account IAM policy.
"""
try:
return client.fetch_iam_serviceaccount_iam_policy(
self['name'], self['uniqueId'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
class IamServiceAccountKey(resource_class_factory('serviceaccount_key', None)):
"""The Resource implementation for IAM ServiceAccountKey."""
def key(self):
"""Get key of this resource.
Key name is in the format:
projects/{project_id}/serviceAccounts/{service_account}/keys/{key_id}
Returns:
str: id key of this resource
"""
return self['name'].split('/')[-1]
# Key Management Service resource classes
class KmsCryptoKey(resource_class_factory('kms_cryptokey', 'name',
hash_key=True)):
"""The Resource implementation for KMS CryptoKey."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""KMS CryptoKey IAM policy.
Args:
client (object): GCP API client.
Returns:
dict: CryptoKey IAM policy.
"""
try:
return client.fetch_kms_cryptokey_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
class KmsCryptoKeyVersion(resource_class_factory('kms_cryptokeyversion', 'name',
hash_key=True)):
"""The Resource implementation for KMS CryptoKeyVersion."""
class KmsKeyRing(resource_class_factory('kms_keyring', 'name',
hash_key=True)):
"""The Resource implementation for KMS KeyRing."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""KMS Keyring IAM policy.
Args:
client (object): GCP API client.
Returns:
dict: Keyring IAM policy.
"""
try:
return client.fetch_kms_keyring_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
# Kubernetes Engine resource classes
class KubernetesCluster(resource_class_factory('kubernetes_cluster',
'selfLink',
hash_key=True)):
"""The Resource implementation for Kubernetes Cluster."""
@cached('service_config')
def get_kubernetes_service_config(self, client=None):
"""Get service config for KubernetesCluster.
Args:
client (object): GCP API client.
Returns:
dict: Generator of Kubernetes Engine Cluster resources.
"""
try:
return client.fetch_container_serviceconfig(
self.parent().key(), zone=self.zone(), location=self.location())
except ValueError:
LOGGER.exception('Cluster has no zone or location: %s',
self._data)
return {}
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get Service Config: %s', e)
self.add_warning(e)
return None
def location(self):
"""Get KubernetesCluster location.
Returns:
str: KubernetesCluster location.
"""
try:
self_link_parts = self['selfLink'].split('/')
return self_link_parts[self_link_parts.index('locations') + 1]
except (KeyError, ValueError):
LOGGER.debug('selfLink not found or contains no locations: %s',
self._data)
return None
def zone(self):
"""Get KubernetesCluster zone.
Returns:
str: KubernetesCluster zone.
"""
try:
self_link_parts = self['selfLink'].split('/')
return self_link_parts[self_link_parts.index('zones') + 1]
except (KeyError, ValueError):
LOGGER.debug('selfLink not found or contains no zones: %s',
self._data)
return None
# Stackdriver Logging resource classes
class LoggingSink(resource_class_factory('sink', None)):
"""The Resource implementation for Stackdriver Logging sink."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource
"""
sink_name = '/'.join([self.parent().type(), self.parent().key(),
self.type(), self['name']])
return sink_name
# GSuite resource classes
class GsuiteUser(resource_class_factory('gsuite_user', 'id')):
"""The Resource implementation for GSuite User."""
class GsuiteGroup(resource_class_factory('gsuite_group', 'id')):
"""The Resource implementation for GSuite User."""
def should_dispatch(self):
"""GSuite Groups should always dispatch to another thread.
Returns:
bool: Always returns True.
"""
return True
class GsuiteUserMember(resource_class_factory('gsuite_user_member', 'id')):
"""The Resource implementation for GSuite User."""
class GsuiteGroupMember(resource_class_factory('gsuite_group_member', 'id')):
"""The Resource implementation for GSuite User."""
# Cloud Pub/Sub resource classes
class PubsubSubscription(resource_class_factory('pubsub_subscription', 'name',
hash_key=True)):
"""The Resource implementation for PubSub Subscription."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get IAM policy for this Pubsub Subscription.
Args:
client (object): GCP API client.
Returns:
dict: Pubsub Subscription IAM policy.
"""
try:
return client.fetch_pubsub_subscription_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
class PubsubTopic(resource_class_factory('pubsub_topic', 'name',
hash_key=True)):
"""The Resource implementation for PubSub Topic."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get IAM policy for this Pubsub Topic.
Args:
client (object): GCP API client.
Returns:
dict: Pubsub Topic IAM policy.
"""
try:
return client.fetch_pubsub_topic_iam_policy(self['name'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
# Cloud Spanner resource classes
class SpannerDatabase(resource_class_factory('spanner_database', 'name',
hash_key=True)):
"""The Resource implementation for Spanner Database."""
class SpannerInstance(resource_class_factory('spanner_instance', 'name',
hash_key=True)):
"""The Resource implementation for Spanner Instance."""
# Cloud storage resource classes
class StorageBucket(resource_class_factory('bucket', 'id')):
"""The Resource implementation for Storage Bucket."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get IAM policy for this Storage bucket.
Args:
client (object): GCP API client.
Returns:
dict: bucket IAM policy.
"""
try:
return client.fetch_storage_bucket_iam_policy(self.key())
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get IAM policy: %s', e)
self.add_warning(e)
return None
@cached('gcs_policy')
def get_gcs_policy(self, client=None):
"""Get Bucket Access Control policy for this storage bucket.
Args:
client (object): GCP API client.
Returns:
list: bucket access controls.
"""
try:
# Full projection returns GCS policy with the resource.
return self['acl']
except KeyError:
pass
try:
return client.fetch_storage_bucket_acls(
self.key(),
self.parent()['projectId'],
self['projectNumber'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warn('Could not get bucket Access Control policy: %s', e)
self.add_warning(e)
return None
class StorageObject(resource_class_factory('storage_object', 'id')):
"""The Resource implementation for Storage Object."""
def get_gcs_policy(self, client=None):
"""Full projection returns GCS policy with the resource.
Args:
client (object): GCP API client.
Returns:
dict: Object acl.
"""
try:
return self['acl']
except KeyError:
return []
class ResourceIterator(object):
"""The Resource iterator template."""
def __init__(self, resource, client):
"""Initialize.
Args:
resource (Resource): The parent resource.
client (object): GCP API Client.
"""
self.resource = resource
self.client = client
def iter(self):
"""Resource iterator.
Raises:
NotImplementedError: Abstract class method not implemented.
"""
raise NotImplementedError()
def resource_iter_class_factory(api_method_name,
resource_name,
api_method_arg_key=None,
additional_arg_keys=None,
resource_validation_method_name=None,
**kwargs):
"""Factory function to generate ResourceIterator subclasses.
Args:
api_method_name (str): The method to call on the API client class to
iterate resources.
resource_name (str): The name of the resource to create from the
resource factory.
api_method_arg_key (str): An optional key from the resource dict to
lookup for the value to send to the api method.
additional_arg_keys (list): An optional list of additional keys from the
resource dict to lookup for the values to send to the api method.
resource_validation_method_name (str): An optional method name to call
to validate that the resource supports iterating resources of this
type.
**kwargs (dict): Additional keyword args to send to the api method.
Returns:
class: A new class object.
"""
def always_true():
"""Helper function that always returns True.
Returns:
bool: True
"""
return True
class ResourceIteratorSubclass(ResourceIterator):
"""Subclass of ResourceIterator."""
def iter(self):
"""Resource iterator.
Yields:
Resource: resource returned from client.
"""
gcp = self.client
if resource_validation_method_name:
resource_validation_check = getattr(
self.resource, resource_validation_method_name)
else:
# Always return True if no check is configured.
resource_validation_check = always_true
if resource_validation_check():
try:
iter_method = getattr(gcp, api_method_name)
args = []
if api_method_arg_key:
args.append(self.resource[api_method_arg_key])
if additional_arg_keys:
args.extend(
self.resource[key] for key in additional_arg_keys)
for data in iter_method(*args, **kwargs):
yield FACTORIES[resource_name].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
return ResourceIteratorSubclass
class ResourceManagerFolderIterator(resource_iter_class_factory(
api_method_name='iter_crm_folders', resource_name='folder',
api_method_arg_key='name')):
"""The Resource iterator implementation for Resource Manager Folder."""
class ResourceManagerFolderOrgPolicyIterator(resource_iter_class_factory(
api_method_name='iter_crm_folder_org_policies',
resource_name='crm_org_policy',
api_method_arg_key='name')):
"""The Resource iterator implementation for CRM Folder Org Policies."""
class ResourceManagerOrganizationOrgPolicyIterator(resource_iter_class_factory(
api_method_name='iter_crm_organization_org_policies',
resource_name='crm_org_policy',
api_method_arg_key='name')):
"""The Resource iterator for CRM Organization Org Policies."""
# Project iterator requires looking up parent type, so cannot use class factory.
class ResourceManagerProjectIterator(ResourceIterator):
"""The Resource iterator implementation for Resource Manager Project."""
def iter(self):
"""Resource iterator.
Yields:
Resource: Project created
"""
gcp = self.client
parent_type = self.resource.type()
parent_id = self.resource.key()
try:
for data in gcp.iter_crm_projects(
parent_type=parent_type, parent_id=parent_id):
yield FACTORIES['project'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class ResourceManagerProjectOrgPolicyIterator(resource_iter_class_factory(
api_method_name='iter_crm_project_org_policies',
resource_name='crm_org_policy',
api_method_arg_key='projectNumber')):
"""The Resource iterator implementation for CRM Project Org Policies."""
# AppEngine iterators do not support using the class factory.
class AppEngineAppIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineApp"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineApp created
"""
gcp = self.client
if self.resource.enumerable():
try:
data = gcp.fetch_gae_app(project_id=self.resource['projectId'])
if data:
yield FACTORIES['appengine_app'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class AppEngineServiceIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineService"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineService created
"""
gcp = self.client
try:
for data in gcp.iter_gae_services(project_id=self.resource['id']):
yield FACTORIES['appengine_service'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class AppEngineVersionIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineVersion"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineVersion created
"""
gcp = self.client
try:
for data in gcp.iter_gae_versions(
project_id=self.resource.parent()['id'],
service_id=self.resource['id']):
yield FACTORIES['appengine_version'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class AppEngineInstanceIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineInstance"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineInstance created
"""
gcp = self.client
try:
for data in gcp.iter_gae_instances(
project_id=self.resource.parent().parent()['id'],
service_id=self.resource.parent()['id'],
version_id=self.resource['id']):
yield FACTORIES['appengine_instance'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class BigqueryDataSetIterator(resource_iter_class_factory(
api_method_name='iter_bigquery_datasets',
resource_name='bigquery_dataset',
api_method_arg_key='projectNumber')):
"""The Resource iterator implementation for Bigquery Dataset."""
class BigqueryTableIterator(resource_iter_class_factory(
api_method_name='iter_bigquery_tables',
resource_name='bigquery_table',
api_method_arg_key='datasetReference')):
"""The Resource iterator implementation for Bigquery Table."""
class BillingAccountIterator(resource_iter_class_factory(
api_method_name='iter_billing_accounts',
resource_name='billing_account')):
"""The Resource iterator implementation for Billing Account."""
class CloudSqlInstanceIterator(resource_iter_class_factory(
api_method_name='iter_cloudsql_instances',
resource_name='cloudsql_instance',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for CloudSQL Instance."""
def compute_iter_class_factory(api_method_name, resource_name):
"""Factory function to generate ResourceIterator subclasses for Compute.
Args:
api_method_name (str): The method to call on the API client class to
iterate resources.
resource_name (str): The name of the resource to create from the
resource factory.
Returns:
class: A new class object.
"""
return resource_iter_class_factory(
api_method_name, resource_name, api_method_arg_key='projectNumber',
resource_validation_method_name='compute_api_enabled')
class ComputeAutoscalerIterator(compute_iter_class_factory(
api_method_name='iter_compute_autoscalers',
resource_name='compute_autoscaler')):
"""The Resource iterator implementation for Compute Autoscaler."""
class ComputeBackendBucketIterator(compute_iter_class_factory(
api_method_name='iter_compute_backendbuckets',
resource_name='compute_backendbucket')):
"""The Resource iterator implementation for Compute BackendBucket."""
class ComputeBackendServiceIterator(compute_iter_class_factory(
api_method_name='iter_compute_backendservices',
resource_name='compute_backendservice')):
"""The Resource iterator implementation for Compute BackendService."""
class ComputeDiskIterator(compute_iter_class_factory(
api_method_name='iter_compute_disks',
resource_name='compute_disk')):
"""The Resource iterator implementation for Compute Disk."""
class ComputeFirewallIterator(compute_iter_class_factory(
api_method_name='iter_compute_firewalls',
resource_name='compute_firewall')):
"""The Resource iterator implementation for Compute Firewall."""
class ComputeForwardingRuleIterator(compute_iter_class_factory(
api_method_name='iter_compute_forwardingrules',
resource_name='compute_forwardingrule')):
"""The Resource iterator implementation for Compute ForwardingRule."""
class ComputeHealthCheckIterator(compute_iter_class_factory(
api_method_name='iter_compute_healthchecks',
resource_name='compute_healthcheck')):
"""The Resource iterator implementation for Compute HealthCheck."""
class ComputeHttpHealthCheckIterator(compute_iter_class_factory(
api_method_name='iter_compute_httphealthchecks',
resource_name='compute_httphealthcheck')):
"""The Resource iterator implementation for Compute HttpHealthCheck."""
class ComputeHttpsHealthCheckIterator(compute_iter_class_factory(
api_method_name='iter_compute_httpshealthchecks',
resource_name='compute_httpshealthcheck')):
"""The Resource iterator implementation for Compute HttpsHealthCheck."""
class ComputeImageIterator(compute_iter_class_factory(
api_method_name='iter_compute_images',
resource_name='compute_image')):
"""The Resource iterator implementation for Compute Image."""
# TODO: Refactor IAP scanner to not expect additional data to be included
# with the instancegroup resource.
class ComputeInstanceGroupIterator(ResourceIterator):
"""The Resource iterator implementation for Compute InstanceGroup."""
def iter(self):
"""Compute InstanceGroup iterator.
Yields:
Resource: Compute InstanceGroup resource.
"""
gcp = self.client
if self.resource.compute_api_enabled():
try:
for data in gcp.iter_compute_instancegroups(
self.resource['projectNumber']):
# IAP Scanner expects instance URLs to be included with the
# instance groups.
try:
data['instance_urls'] = gcp.fetch_compute_ig_instances(
self.resource['projectNumber'],
data['name'],
zone=os.path.basename(data.get('zone', '')),
region=os.path.basename(data.get('region', '')))
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
yield FACTORIES['compute_instancegroup'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class ComputeInstanceGroupManagerIterator(compute_iter_class_factory(
api_method_name='iter_compute_ig_managers',
resource_name='compute_instancegroupmanager')):
"""The Resource iterator implementation for Compute InstanceGroupManager."""
class ComputeInstanceIterator(compute_iter_class_factory(
api_method_name='iter_compute_instances',
resource_name='compute_instance')):
"""The Resource iterator implementation for Compute Instance."""
class ComputeInstanceTemplateIterator(compute_iter_class_factory(
api_method_name='iter_compute_instancetemplates',
resource_name='compute_instancetemplate')):
"""The Resource iterator implementation for Compute InstanceTemplate."""
class ComputeLicenseIterator(compute_iter_class_factory(
api_method_name='iter_compute_licenses',
resource_name='compute_license')):
"""The Resource iterator implementation for Compute License."""
class ComputeNetworkIterator(compute_iter_class_factory(
api_method_name='iter_compute_networks',
resource_name='compute_network')):
"""The Resource iterator implementation for Compute Network."""
class ComputeProjectIterator(compute_iter_class_factory(
api_method_name='iter_compute_project',
resource_name='compute_project')):
"""The Resource iterator implementation for Compute Project."""
class ComputeRouterIterator(compute_iter_class_factory(
api_method_name='iter_compute_routers',
resource_name='compute_router')):
"""The Resource iterator implementation for Compute Router."""
class ComputeSnapshotIterator(compute_iter_class_factory(
api_method_name='iter_compute_snapshots',
resource_name='compute_snapshot')):
"""The Resource iterator implementation for Compute Snapshot."""
class ComputeSslCertificateIterator(compute_iter_class_factory(
api_method_name='iter_compute_sslcertificates',
resource_name='compute_sslcertificate')):
"""The Resource iterator implementation for Compute SSL Certificate."""
class ComputeSubnetworkIterator(compute_iter_class_factory(
api_method_name='iter_compute_subnetworks',
resource_name='compute_subnetwork')):
"""The Resource iterator implementation for Compute Subnetwork."""
class ComputeTargetHttpProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targethttpproxies',
resource_name='compute_targethttpproxy')):
"""The Resource iterator implementation for Compute TargetHttpProxy."""
class ComputeTargetHttpsProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targethttpsproxies',
resource_name='compute_targethttpsproxy')):
"""The Resource iterator implementation for Compute TargetHttpsProxy."""
class ComputeTargetInstanceIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetinstances',
resource_name='compute_targetinstance')):
"""The Resource iterator implementation for Compute TargetInstance."""
class ComputeTargetPoolIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetpools',
resource_name='compute_targetpool')):
"""The Resource iterator implementation for Compute TargetPool."""
class ComputeTargetSslProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetsslproxies',
resource_name='compute_targetsslproxy')):
"""The Resource iterator implementation for Compute TargetSslProxy."""
class ComputeTargetTcpProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targettcpproxies',
resource_name='compute_targettcpproxy')):
"""The Resource iterator implementation for Compute TargetTcpProxy."""
class ComputeTargetVpnGatewayIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetvpngateways',
resource_name='compute_targetvpngateway')):
"""The Resource iterator implementation for Compute TargetVpnGateway."""
class ComputeUrlMapIterator(compute_iter_class_factory(
api_method_name='iter_compute_urlmaps',
resource_name='compute_urlmap')):
"""The Resource iterator implementation for Compute UrlMap."""
class ComputeVpnTunnelIterator(compute_iter_class_factory(
api_method_name='iter_compute_vpntunnels',
resource_name='compute_vpntunnel')):
"""The Resource iterator implementation for Compute VpnTunnel."""
class DataprocClusterIterator(resource_iter_class_factory(
api_method_name='iter_dataproc_clusters',
resource_name='dataproc_cluster',
api_method_arg_key='projectId',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud Dataproc Cluster."""
class DnsManagedZoneIterator(resource_iter_class_factory(
api_method_name='iter_dns_managedzones',
resource_name='dns_managedzone',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud DNS ManagedZone."""
class DnsPolicyIterator(resource_iter_class_factory(
api_method_name='iter_dns_policies',
resource_name='dns_policy',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud DNS Policy."""
# GSuite iterators do not support using the class factory.
class GsuiteGroupIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite Group"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteGroup created
"""
gsuite = self.client
if self.resource.has_directory_resource_id():
try:
for data in gsuite.iter_gsuite_groups(
self.resource['owner']['directoryCustomerId']):
yield FACTORIES['gsuite_group'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class GsuiteMemberIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite Member"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteUserMember or GsuiteGroupMember created
"""
gsuite = self.client
try:
for data in gsuite.iter_gsuite_group_members(self.resource['id']):
if data['type'] == 'USER':
yield FACTORIES['gsuite_user_member'].create_new(data)
elif data['type'] == 'GROUP':
yield FACTORIES['gsuite_group_member'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class GsuiteUserIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite User"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteUser created
"""
gsuite = self.client
if self.resource.has_directory_resource_id():
try:
for data in gsuite.iter_gsuite_users(
self.resource['owner']['directoryCustomerId']):
yield FACTORIES['gsuite_user'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class IamOrganizationCuratedRoleIterator(resource_iter_class_factory(
api_method_name='iter_iam_curated_roles',
resource_name='iam_curated_role')):
"""The Resource iterator implementation for Organization Curated Role."""
class IamOrganizationRoleIterator(resource_iter_class_factory(
api_method_name='iter_iam_organization_roles',
resource_name='iam_role',
api_method_arg_key='name')):
"""The Resource iterator implementation for IAM Organization Role."""
class IamProjectRoleIterator(resource_iter_class_factory(
api_method_name='iter_iam_project_roles',
resource_name='iam_role',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for IAM Project Role."""
class IamServiceAccountIterator(resource_iter_class_factory(
api_method_name='iter_iam_serviceaccounts',
resource_name='iam_serviceaccount',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for IAM ServiceAccount."""
class IamServiceAccountKeyIterator(resource_iter_class_factory(
api_method_name='iter_iam_serviceaccount_exported_keys',
resource_name='iam_serviceaccount_key',
api_method_arg_key='name')):
"""The Resource iterator implementation for IAM ServiceAccount Key."""
class KmsKeyRingIterator(resource_iter_class_factory(
api_method_name='iter_kms_keyrings',
resource_name='kms_keyring',
api_method_arg_key='projectId',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for KMS KeyRing."""
class KmsCryptoKeyIterator(resource_iter_class_factory(
api_method_name='iter_kms_cryptokeys',
resource_name='kms_cryptokey',
api_method_arg_key='name')):
"""The Resource iterator implementation for KMS CryptoKey."""
class KmsCryptoKeyVersionIterator(resource_iter_class_factory(
api_method_name='iter_kms_cryptokeyversions',
resource_name='kms_cryptokeyversion',
api_method_arg_key='name')):
"""The Resource iterator implementation for KMS CryptoKeyVersion."""
class KubernetesClusterIterator(resource_iter_class_factory(
api_method_name='iter_container_clusters',
resource_name='kubernetes_cluster',
api_method_arg_key='projectNumber',
resource_validation_method_name='container_api_enabled')):
"""The Resource iterator implementation for Kubernetes Cluster."""
class LoggingBillingAccountSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_billing_account_sinks',
resource_name='logging_sink',
api_method_arg_key='name')):
"""The Resource iterator implementation for Logging Billing Account Sink."""
class LoggingFolderSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_folder_sinks',
resource_name='logging_sink',
api_method_arg_key='name')):
"""The Resource iterator implementation for Logging Folder Sink."""
class LoggingOrganizationSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_organization_sinks',
resource_name='logging_sink',
api_method_arg_key='name')):
"""The Resource iterator implementation for Logging Organization Sink"""
class LoggingProjectSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_project_sinks',
resource_name='logging_sink',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Logging Project Sink."""
class PubsubSubscriptionIterator(resource_iter_class_factory(
api_method_name='iter_pubsub_subscriptions',
resource_name='pubsub_subscription',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for PubSub Subscription."""
class PubsubTopicIterator(resource_iter_class_factory(
api_method_name='iter_pubsub_topics',
resource_name='pubsub_topic',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for PubSub Topic."""
class ResourceManagerProjectLienIterator(resource_iter_class_factory(
api_method_name='iter_crm_project_liens',
resource_name='crm_lien',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Resource Manager Lien."""
class SpannerDatabaseIterator(resource_iter_class_factory(
api_method_name='iter_spanner_databases',
resource_name='spanner_database',
api_method_arg_key='name')):
"""The Resource iterator implementation for Cloud DNS ManagedZone."""
class SpannerInstanceIterator(resource_iter_class_factory(
api_method_name='iter_spanner_instances',
resource_name='spanner_instance',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud DNS Policy."""
class StorageBucketIterator(resource_iter_class_factory(
api_method_name='iter_storage_buckets',
resource_name='storage_bucket',
api_method_arg_key='projectNumber',
resource_validation_method_name='storage_api_enabled')):
"""The Resource iterator implementation for Storage Bucket."""
class StorageObjectIterator(resource_iter_class_factory(
api_method_name='iter_storage_objects',
resource_name='storage_object',
api_method_arg_key='id')):
"""The Resource iterator implementation for Storage Object."""
FACTORIES = {
'organization': ResourceFactory({
'dependsOn': [],
'cls': ResourceManagerOrganization,
'contains': [
BillingAccountIterator,
GsuiteGroupIterator,
GsuiteUserIterator,
IamOrganizationCuratedRoleIterator,
IamOrganizationRoleIterator,
LoggingOrganizationSinkIterator,
ResourceManagerOrganizationOrgPolicyIterator,
ResourceManagerFolderIterator,
ResourceManagerProjectIterator,
]}),
'folder': ResourceFactory({
'dependsOn': ['organization'],
'cls': ResourceManagerFolder,
'contains': [
LoggingFolderSinkIterator,
ResourceManagerFolderOrgPolicyIterator,
ResourceManagerFolderIterator,
ResourceManagerProjectIterator,
]}),
'project': ResourceFactory({
'dependsOn': ['organization', 'folder'],
'cls': ResourceManagerProject,
'contains': [
AppEngineAppIterator,
BigqueryDataSetIterator,
CloudSqlInstanceIterator,
ComputeAutoscalerIterator,
ComputeBackendBucketIterator,
ComputeBackendServiceIterator,
ComputeDiskIterator,
ComputeFirewallIterator,
ComputeForwardingRuleIterator,
ComputeHealthCheckIterator,
ComputeHttpHealthCheckIterator,
ComputeHttpsHealthCheckIterator,
ComputeImageIterator,
ComputeInstanceGroupIterator,
ComputeInstanceGroupManagerIterator,
ComputeInstanceIterator,
ComputeInstanceTemplateIterator,
ComputeLicenseIterator,
ComputeNetworkIterator,
ComputeProjectIterator,
ComputeRouterIterator,
ComputeSnapshotIterator,
ComputeSslCertificateIterator,
ComputeSubnetworkIterator,
ComputeTargetHttpProxyIterator,
ComputeTargetHttpsProxyIterator,
ComputeTargetInstanceIterator,
ComputeTargetPoolIterator,
ComputeTargetSslProxyIterator,
ComputeTargetTcpProxyIterator,
ComputeTargetVpnGatewayIterator,
ComputeUrlMapIterator,
ComputeVpnTunnelIterator,
DataprocClusterIterator,
DnsManagedZoneIterator,
DnsPolicyIterator,
IamProjectRoleIterator,
IamServiceAccountIterator,
KmsKeyRingIterator,
KubernetesClusterIterator,
LoggingProjectSinkIterator,
PubsubSubscriptionIterator,
PubsubTopicIterator,
ResourceManagerProjectLienIterator,
ResourceManagerProjectOrgPolicyIterator,
SpannerInstanceIterator,
StorageBucketIterator,
]}),
'appengine_app': ResourceFactory({
'dependsOn': ['project'],
'cls': AppEngineApp,
'contains': [
AppEngineServiceIterator,
]}),
'appengine_service': ResourceFactory({
'dependsOn': ['appengine_app'],
'cls': AppEngineService,
'contains': [
AppEngineVersionIterator,
]}),
'appengine_version': ResourceFactory({
'dependsOn': ['appengine_service'],
'cls': AppEngineVersion,
'contains': [
AppEngineInstanceIterator,
]}),
'appengine_instance': ResourceFactory({
'dependsOn': ['appengine_version'],
'cls': AppEngineInstance,
'contains': []}),
'billing_account': ResourceFactory({
'dependsOn': ['organization'],
'cls': BillingAccount,
'contains': [
LoggingBillingAccountSinkIterator,
]}),
'bigquery_dataset': ResourceFactory({
'dependsOn': ['project'],
'cls': BigqueryDataSet,
'contains': [
BigqueryTableIterator
]}),
'bigquery_table': ResourceFactory({
'dependsOn': ['bigquery_dataset'],
'cls': BigqueryTable,
'contains': []}),
'cloudsql_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': CloudSqlInstance,
'contains': []}),
'compute_autoscaler': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeAutoscaler,
'contains': []}),
'compute_backendservice': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeBackendService,
'contains': []}),
'compute_backendbucket': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeBackendBucket,
'contains': []}),
'compute_disk': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeDisk,
'contains': []}),
'compute_firewall': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeFirewall,
'contains': []}),
'compute_forwardingrule': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeForwardingRule,
'contains': []}),
'compute_healthcheck': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeHealthCheck,
'contains': []}),
'compute_httphealthcheck': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeHttpHealthCheck,
'contains': []}),
'compute_httpshealthcheck': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeHttpsHealthCheck,
'contains': []}),
'compute_image': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeImage,
'contains': []}),
'compute_instancegroup': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstanceGroup,
'contains': []}),
'compute_instancegroupmanager': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstanceGroupManager,
'contains': []}),
'compute_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstance,
'contains': []}),
'compute_instancetemplate': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstanceTemplate,
'contains': []}),
'compute_license': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeLicense,
'contains': []}),
'compute_network': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeNetwork,
'contains': []}),
'compute_project': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeProject,
'contains': []}),
'compute_router': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeRouter,
'contains': []}),
'compute_snapshot': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSnapshot,
'contains': []}),
'compute_sslcertificate': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSslCertificate,
'contains': []}),
'compute_subnetwork': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSubnetwork,
'contains': []}),
'compute_targethttpproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetHttpProxy,
'contains': []}),
'compute_targethttpsproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetHttpsProxy,
'contains': []}),
'compute_targetinstance': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetInstance,
'contains': []}),
'compute_targetpool': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetPool,
'contains': []}),
'compute_targetsslproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetSslProxy,
'contains': []}),
'compute_targettcpproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetTcpProxy,
'contains': []}),
'compute_targetvpngateway': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetVpnGateway,
'contains': []}),
'compute_urlmap': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeUrlMap,
'contains': []}),
'compute_vpntunnel': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeVpnTunnel,
'contains': []}),
'crm_lien': ResourceFactory({
'dependsOn': ['project'],
'cls': ResourceManagerLien,
'contains': []}),
'crm_org_policy': ResourceFactory({
'dependsOn': ['folder', 'organization', 'project'],
'cls': ResourceManagerOrgPolicy,
'contains': []}),
'dataproc_cluster': ResourceFactory({
'dependsOn': ['project'],
'cls': DataprocCluster,
'contains': []}),
'dns_managedzone': ResourceFactory({
'dependsOn': ['project'],
'cls': DnsManagedZone,
'contains': []}),
'dns_policy': ResourceFactory({
'dependsOn': ['project'],
'cls': DnsPolicy,
'contains': []}),
'gsuite_group': ResourceFactory({
'dependsOn': ['organization'],
'cls': GsuiteGroup,
'contains': [
GsuiteMemberIterator,
]}),
'gsuite_group_member': ResourceFactory({
'dependsOn': ['gsuite_group'],
'cls': GsuiteGroupMember,
'contains': []}),
'gsuite_user': ResourceFactory({
'dependsOn': ['organization'],
'cls': GsuiteUser,
'contains': []}),
'gsuite_user_member': ResourceFactory({
'dependsOn': ['gsuite_group'],
'cls': GsuiteUserMember,
'contains': []}),
'iam_curated_role': ResourceFactory({
'dependsOn': [],
'cls': IamCuratedRole,
'contains': []}),
'iam_role': ResourceFactory({
'dependsOn': ['organization', 'project'],
'cls': IamRole,
'contains': []}),
'iam_serviceaccount': ResourceFactory({
'dependsOn': ['project'],
'cls': IamServiceAccount,
'contains': [
IamServiceAccountKeyIterator
]}),
'iam_serviceaccount_key': ResourceFactory({
'dependsOn': ['iam_serviceaccount'],
'cls': IamServiceAccountKey,
'contains': []}),
'kms_keyring': ResourceFactory({
'dependsOn': ['project'],
'cls': KmsKeyRing,
'contains': [
KmsCryptoKeyIterator
]}),
'kms_cryptokey': ResourceFactory({
'dependsOn': ['kms_keyring'],
'cls': KmsCryptoKey,
'contains': [
KmsCryptoKeyVersionIterator
]}),
'kms_cryptokeyversion': ResourceFactory({
'dependsOn': ['kms_cryptokey'],
'cls': KmsCryptoKeyVersion,
'contains': []}),
'kubernetes_cluster': ResourceFactory({
'dependsOn': ['project'],
'cls': KubernetesCluster,
'contains': []}),
'logging_sink': ResourceFactory({
'dependsOn': ['organization', 'folder', 'project'],
'cls': LoggingSink,
'contains': []}),
'pubsub_subscription': ResourceFactory({
'dependsOn': ['project'],
'cls': PubsubSubscription,
'contains': []}),
'pubsub_topic': ResourceFactory({
'dependsOn': ['project'],
'cls': PubsubTopic,
'contains': []}),
'spanner_database': ResourceFactory({
'dependsOn': ['project'],
'cls': SpannerDatabase,
'contains': []}),
'spanner_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': SpannerInstance,
'contains': [
SpannerDatabaseIterator
]}),
'storage_bucket': ResourceFactory({
'dependsOn': ['project'],
'cls': StorageBucket,
'contains': [
# StorageObjectIterator
]}),
'storage_object': ResourceFactory({
'dependsOn': ['bucket'],
'cls': StorageObject,
'contains': []}),
}
| 1 | 33,421 | Nit: Remove whitespace (and below) | forseti-security-forseti-security | py |
@@ -205,7 +205,10 @@ return [
'error_moving_file' => 'Error moving file :file',
'error_moving_directory' => 'Error moving directory :dir',
'error_deleting_directory' => 'Error deleting the original directory :dir',
- 'path' => 'Path'
+ 'path' => 'Path',
+ 'unzip' => 'Unzip',
+ 'is_a_directory' => 'Only files are allowed. You\'ve selected a directory.',
+ 'error_open_zip' => 'Error open the zip file'
],
'component' => [
'menu_label' => 'Components', | 1 | <?php
return [
'cms_object' => [
'invalid_file' => 'Invalid file name: :name. File names can contain only alphanumeric symbols, underscores, dashes and dots. Some examples of correct file names: page.htm, page, subdirectory/page',
'invalid_property' => "The property ':name' cannot be set",
'file_already_exists' => "File ':name' already exists.",
'error_saving' => "Error saving file ':name'. Please check write permissions.",
'error_creating_directory' => 'Error creating directory :name. Please check write permissions.',
'invalid_file_extension'=>'Invalid file extension: :invalid. Allowed extensions are: :allowed.',
'error_deleting' => "Error deleting the template file ':name'. Please check write permissions.",
'delete_success' => 'Templates were successfully deleted: :count.',
'file_name_required' => 'The File Name field is required.'
],
'theme' => [
'not_found_name' => "The theme ':name' is not found.",
'active' => [
'not_set' => 'The active theme is not set.',
'not_found' => 'The active theme is not found.'
],
'edit' => [
'not_set' => 'The edit theme is not set.',
'not_found' => 'The edit theme is not found.',
'not_match' => "The object you're trying to access doesn't belong to the theme being edited. Please reload the page."
],
'settings_menu' => 'Front-end theme',
'settings_menu_description' => 'Preview the list of installed themes and select an active theme.',
'default_tab' => 'Properties',
'name_label' => 'Name',
'name_create_placeholder' => 'New theme name',
'author_label' => 'Author',
'author_placeholder' => 'Person or company name',
'description_label' => 'Description',
'description_placeholder' => 'Theme description',
'homepage_label' => 'Homepage',
'homepage_placeholder' => 'Website URL',
'code_label' => 'Code',
'code_placeholder' => 'A unique code for this theme used for distribution',
'dir_name_label' => 'Directory name',
'dir_name_create_label' => 'The destination theme directory',
'theme_label' => 'Theme',
'theme_title' => 'Themes',
'activate_button' => 'Activate',
'active_button' => 'Activate',
'customize_theme' => 'Customize Theme',
'customize_button' => 'Customize',
'duplicate_button' => 'Duplicate',
'duplicate_title' => 'Duplicate theme',
'duplicate_theme_success' => 'Duplicated theme successfully!',
'manage_button' => 'Manage',
'manage_title' => 'Manage theme',
'edit_properties_title' => 'Theme',
'edit_properties_button' => 'Edit properties',
'save_properties' => 'Save properties',
'import_button' => 'Import',
'import_title' => 'Import theme',
'import_theme_success' => 'Imported theme successfully!',
'import_uploaded_file' => 'Theme archive file',
'import_overwrite_label' => 'Overwrite existing files',
'import_overwrite_comment' => 'Untick this box to only import new files',
'import_folders_label' => 'Folders',
'import_folders_comment' => 'Please select the theme folders you would like to import',
'export_button' => 'Export',
'export_title' => 'Export theme',
'export_folders_label' => 'Folders',
'export_folders_comment' => 'Please select the theme folders you would like to export',
'delete_button' => 'Delete',
'delete_confirm' => 'Are you sure you want to delete this theme? It cannot be undone!',
'delete_active_theme_failed' => 'Cannot delete the active theme, try making another theme active first.',
'delete_theme_success' => 'Deleted theme successfully!',
'create_title' => 'Create theme',
'create_button' => 'Create',
'create_new_blank_theme' => 'Create a new blank theme',
'create_theme_success' => 'Created theme successfully!',
'create_theme_required_name' => 'Please specify a name for the theme.',
'new_directory_name_label' => 'Theme directory',
'new_directory_name_comment' => 'Provide a new directory name for the duplicated theme.',
'dir_name_invalid' => 'Name can contain only digits, Latin letters and the following symbols: _-',
'dir_name_taken' => 'Desired theme directory already exists.',
'find_more_themes' => 'Find more themes',
'saving' => 'Saving theme...',
'return' => 'Return to themes list',
],
'maintenance' => [
'settings_menu' => 'Maintenance mode',
'settings_menu_description' => 'Configure the maintenance mode page and toggle the setting.',
'is_enabled' => 'Enable maintenance mode',
'is_enabled_comment' => 'When activated website visitors will see the page chosen below.'
],
'page' => [
'not_found_name' => "The page ':name' is not found",
'not_found' => [
'label' => 'Page not found',
'help' => 'The requested page cannot be found.'
],
'custom_error' => [
'label' => 'Page error',
'help' => "We're sorry, but something went wrong and the page cannot be displayed."
],
'menu_label' => 'Pages',
'unsaved_label' => 'Unsaved page(s)',
'no_list_records' => 'No pages found',
'new' => 'New page',
'invalid_url' => 'Invalid URL format. The URL should start with the forward slash symbol and can contain digits, Latin letters and the following symbols: ._-[]:?|/+*^$',
'delete_confirm_multiple' => 'Do you really want to delete selected pages?',
'delete_confirm_single' => 'Do you really want delete this page?',
'no_layout' => '-- no layout --'
],
'layout' => [
'not_found_name' => "The layout ':name' is not found",
'menu_label' => 'Layouts',
'unsaved_label' => 'Unsaved layout(s)',
'no_list_records' => 'No layouts found',
'new' => 'New layout',
'delete_confirm_multiple' => 'Do you really want to delete selected layouts?',
'delete_confirm_single' => 'Do you really want delete this layout?'
],
'partial' => [
'not_found_name' => "The partial ':name' is not found.",
'invalid_name' => 'Invalid partial name: :name.',
'menu_label' => 'Partials',
'unsaved_label' => 'Unsaved partial(s)',
'no_list_records' => 'No partials found',
'delete_confirm_multiple' => 'Do you really want to delete selected partials?',
'delete_confirm_single' => 'Do you really want delete this partial?',
'new' => 'New partial'
],
'content' => [
'not_found_name' => "The content file ':name' is not found.",
'menu_label' => 'Content',
'unsaved_label' => 'Unsaved content',
'no_list_records' => 'No content files found',
'delete_confirm_multiple' => 'Do you really want to delete selected content files or directories?',
'delete_confirm_single' => 'Do you really want delete this content file?',
'new' => 'New content file'
],
'ajax_handler' => [
'invalid_name' => 'Invalid AJAX handler name: :name.',
'not_found' => "AJAX handler ':name' was not found."
],
'cms' => [
'menu_label' => 'CMS'
],
'sidebar' => [
'add' => 'Add',
'search' => 'Search...'
],
'editor' => [
'settings' => 'Settings',
'title' => 'Title',
'new_title' => 'New page title',
'url' => 'URL',
'filename' => 'File Name',
'layout' => 'Layout',
'description' => 'Description',
'preview' => 'Preview',
'meta' => 'Meta',
'meta_title' => 'Meta Title',
'meta_description' => 'Meta Description',
'markup' => 'Markup',
'code' => 'Code',
'content' => 'Content',
'hidden' => 'Hidden',
'hidden_comment' => 'Hidden pages are accessible only by logged-in back-end users.',
'enter_fullscreen' => 'Enter fullscreen mode',
'exit_fullscreen' => 'Exit fullscreen mode'
],
'asset' => [
'menu_label' => 'Assets',
'unsaved_label' => 'Unsaved asset(s)',
'drop_down_add_title' => 'Add...',
'drop_down_operation_title' => 'Action...',
'upload_files' => 'Upload file(s)',
'create_file' => 'Create file',
'create_directory' => 'Create directory',
'directory_popup_title' => 'New directory',
'directory_name' => 'Directory name',
'rename' => 'Rename',
'delete' => 'Delete',
'move' => 'Move',
'select' => 'Select',
'new' => 'New file',
'rename_popup_title' => 'Rename',
'rename_new_name' => 'New name',
'invalid_path' => 'Path can contain only digits, Latin letters, spaces and the following symbols: ._-/',
'error_deleting_file' => 'Error deleting file :name.',
'error_deleting_dir_not_empty' => 'Error deleting directory :name. The directory is not empty.',
'error_deleting_dir' => 'Error deleting file :name.',
'invalid_name' => 'Name can contain only digits, Latin letters, spaces and the following symbols: ._-',
'original_not_found' => 'Original file or directory not found',
'already_exists' => 'File or directory with this name already exists',
'error_renaming' => 'Error renaming the file or directory',
'name_cant_be_empty' => 'The name cannot be empty',
'too_large' => 'The uploaded file is too large. The maximum allowed file size is :max_size',
'type_not_allowed' => 'Only the following file types are allowed: :allowed_types',
'file_not_valid' => 'File is not valid',
'error_uploading_file' => "Error uploading file ':name': :error",
'move_please_select' => 'please select',
'move_destination' => 'Destination directory',
'move_popup_title' => 'Move assets',
'move_button' => 'Move',
'selected_files_not_found' => 'Selected files not found',
'select_destination_dir' => 'Please select a destination directory',
'destination_not_found' => 'Destination directory is not found',
'error_moving_file' => 'Error moving file :file',
'error_moving_directory' => 'Error moving directory :dir',
'error_deleting_directory' => 'Error deleting the original directory :dir',
'path' => 'Path'
],
'component' => [
'menu_label' => 'Components',
'unnamed' => 'Unnamed',
'no_description' => 'No description provided',
'alias' => 'Alias',
'alias_description' => 'A unique name given to this component when using it in the page or layout code.',
'validation_message' => 'Component aliases are required and can contain only Latin symbols, digits, and underscores. The aliases should start with a Latin symbol.',
'invalid_request' => 'The template cannot be saved because of invalid component data.',
'no_records' => 'No components found',
'not_found' => "The component ':name' is not found.",
'method_not_found' => "The component ':name' does not contain a method ':method'."
],
'template' => [
'invalid_type' => 'Unknown template type.',
'not_found' => 'The requested template was not found.',
'saved'=> 'The template has been successfully saved.'
],
'permissions' => [
'name' => 'Cms',
'manage_content' => 'Manage content',
'manage_assets' => 'Manage assets',
'manage_pages' => 'Manage pages',
'manage_layouts' => 'Manage layouts',
'manage_partials' => 'Manage partials',
'manage_themes' => 'Manage themes',
'manage_media' => 'Manage media'
],
'mediafinder' => [
'default_prompt' => 'Click the %s button to find a media item'
],
'media' => [
'invalid_path' => "Invalid file path specified: ':path'.",
'menu_label' => 'Media',
'upload' => 'Upload',
'move' => 'Move',
'delete' => 'Delete',
'add_folder' => 'Add folder',
'search' => 'Search',
'display' => 'Display',
'filter_everything' => 'Everything',
'filter_images' => 'Images',
'filter_video' => 'Video',
'filter_audio' => 'Audio',
'filter_documents' => 'Documents',
'library' => 'Library',
'folder_size_items' => 'item(s)',
'size' => 'Size',
'title' => 'Title',
'last_modified' => 'Last modified',
'public_url' => 'Public URL',
'click_here' => 'Click here',
'thumbnail_error' => 'Error generating thumbnail.',
'return_to_parent' => 'Return to the parent folder',
'return_to_parent_label' => 'Go up ..',
'nothing_selected' => 'Nothing is selected.',
'multiple_selected' => 'Multiple items selected.',
'uploading_file_num' => 'Uploading :number file(s)...',
'uploading_complete' => 'Upload complete',
'uploading_error' => 'Upload failed',
'order_by' => 'Order by',
'folder' => 'Folder',
'no_files_found' => 'No files found by your request.',
'delete_empty' => 'Please select items to delete.',
'delete_confirm' => 'Do you really want to delete the selected item(s)?',
'error_renaming_file' => 'Error renaming the item.',
'new_folder_title' => 'New folder',
'folder_name' => 'Folder name',
'error_creating_folder' => 'Error creating folder',
'folder_or_file_exist' => 'A folder or file with the specified name already exists.',
'move_empty' => 'Please select items to move.',
'move_popup_title' => 'Move files or folders',
'move_destination' => 'Destination folder',
'please_select_move_dest' => 'Please select a destination folder.',
'move_dest_src_match' => 'Please select another destination folder.',
'empty_library' => 'The Media Library is empty. Upload files or create folders to get started.',
'insert' => 'Insert',
'crop_and_insert' => 'Crop & Insert',
'select_single_image' => 'Please select a single image.',
'selection_not_image' => 'The selected item is not an image.',
'restore' => 'Undo all changes',
'resize' => 'Resize...',
'selection_mode_normal' => 'Normal',
'selection_mode_fixed_ratio' => 'Fixed ratio',
'selection_mode_fixed_size' => 'Fixed size',
'height' => 'Height',
'width' => 'Width',
'selection_mode' => 'Selection mode',
'resize_image' => 'Resize image',
'image_size' => 'Image size:',
'selected_size' => 'Selected:'
]
];
| 1 | 11,401 | "Failed opening" or "Failed to open". | octobercms-october | php |
@@ -131,8 +131,12 @@ func (d *Disk) validate(ctx context.Context, s *Step) DError {
if _, err := s.w.images.regUse(d.SourceImage, s); err != nil {
errs = addErrs(errs, Errf("%s: can't use image %q: %v", pre, d.SourceImage, err))
}
+ } else if d.SourceSnapshot != "" {
+ if _, err := s.w.snapshots.regUse(d.SourceSnapshot, s); err != nil {
+ errs = addErrs(errs, Errf("%s: can't use snapshot %q: %v", pre, d.SourceSnapshot, err))
+ }
} else if d.Disk.SizeGb == 0 {
- errs = addErrs(errs, Errf("%s: SizeGb and SourceImage not set", pre))
+ errs = addErrs(errs, Errf("%s: SizeGb, SourceSnapshot or SourceImage not set", pre))
}
// Register creation. | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"encoding/json"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
var (
diskURLRgx = regexp.MustCompile(fmt.Sprintf(`^(projects/(?P<project>%[1]s)/)?zones/(?P<zone>%[2]s)/disks/(?P<disk>%[2]s)(/resize)?$`, projectRgxStr, rfc1035))
deviceNameURLRgx = regexp.MustCompile(fmt.Sprintf(`^(projects/(?P<project>%[1]s)/)?zones/(?P<zone>%[2]s)/devices/(?P<disk>%[2]s)$`, projectRgxStr, rfc1035))
)
// diskExists should only be used during validation for existing GCE disks
// and should not be relied or populated for daisy created resources.
func (w *Workflow) diskExists(project, zone, disk string) (bool, DError) {
return w.diskCache.resourceExists(func(project, zone string, opts ...daisyCompute.ListCallOption) (interface{}, error) {
return w.ComputeClient.ListDisks(project, zone)
}, project, zone, disk)
}
// isDiskAttached should only be used during validation for existing attached GCE disks
// and should not be relied or populated for daisy created resources.
func isDiskAttached(client daisyCompute.Client, deviceName, project, zone, instance string) (bool, DError) {
i, err := client.GetInstance(project, zone, instance)
if err != nil {
return false, Errf("failed to get instance info for checking attached disks: %v", err)
}
parts := strings.Split(deviceName, "/")
realName := parts[len(parts)-1]
for _, d := range i.Disks {
if d.DeviceName == realName {
return true, nil
}
}
return false, nil
}
// Disk is used to create a GCE disk in a project.
type Disk struct {
compute.Disk
Resource
// If this is enabled, then WINDOWS will be added to the
// disk's guestOsFeatures. This is a string since daisy
// replaces variables after JSON has been parsed.
// (If it were boolean, the JSON marshaller throws
// an error when it sees something like `${is_windows}`)
IsWindows string `json:"isWindows,omitempty"`
// Size of this disk.
SizeGb string `json:"sizeGb,omitempty"`
// Fallback to pd-standard when quota is not enough for higher-level pd
FallbackToPdStandard bool `json:"fallbackToPdStandard,omitempty"`
}
// MarshalJSON is a hacky workaround to prevent Disk from using compute.Disk's implementation.
func (d *Disk) MarshalJSON() ([]byte, error) {
return json.Marshal(*d)
}
func (d *Disk) populate(ctx context.Context, s *Step) DError {
var errs DError
d.Name, d.Zone, errs = d.Resource.populateWithZone(ctx, s, d.Name, d.Zone)
d.Description = strOr(d.Description, fmt.Sprintf("Disk created by Daisy in workflow %q on behalf of %s.", s.w.Name, s.w.username))
if d.SizeGb != "" {
size, err := strconv.ParseInt(d.SizeGb, 10, 64)
if err != nil {
errs = addErrs(errs, Errf("cannot parse SizeGb: %s, err: %v", d.SizeGb, err))
}
d.Disk.SizeGb = size
}
if d.IsWindows != "" {
isWindows, err := strconv.ParseBool(d.IsWindows)
if err != nil {
errs = addErrs(errs, Errf("cannot parse IsWindows as boolean: %s, err: %v", d.IsWindows, err))
}
if isWindows {
d.GuestOsFeatures = CombineGuestOSFeatures(d.GuestOsFeatures, "WINDOWS")
}
}
if imageURLRgx.MatchString(d.SourceImage) {
d.SourceImage = extendPartialURL(d.SourceImage, d.Project)
}
if d.Type == "" {
d.Type = fmt.Sprintf("projects/%s/zones/%s/diskTypes/pd-standard", d.Project, d.Zone)
} else if diskTypeURLRgx.MatchString(d.Type) {
d.Type = extendPartialURL(d.Type, d.Project)
} else {
d.Type = fmt.Sprintf("projects/%s/zones/%s/diskTypes/%s", d.Project, d.Zone, d.Type)
}
d.link = fmt.Sprintf("projects/%s/zones/%s/disks/%s", d.Project, d.Zone, d.Name)
return errs
}
func (d *Disk) validate(ctx context.Context, s *Step) DError {
pre := fmt.Sprintf("cannot create disk %q", d.daisyName)
errs := d.Resource.validateWithZone(ctx, s, d.Zone, pre)
if !diskTypeURLRgx.MatchString(d.Type) {
errs = addErrs(errs, Errf("%s: bad disk type: %q", pre, d.Type))
}
if d.SourceImage != "" {
if _, err := s.w.images.regUse(d.SourceImage, s); err != nil {
errs = addErrs(errs, Errf("%s: can't use image %q: %v", pre, d.SourceImage, err))
}
} else if d.Disk.SizeGb == 0 {
errs = addErrs(errs, Errf("%s: SizeGb and SourceImage not set", pre))
}
// Register creation.
errs = addErrs(errs, s.w.disks.regCreate(d.daisyName, &d.Resource, s, false))
return errs
}
type diskAttachment struct {
mode string
attacher, detacher *Step
}
type diskRegistry struct {
baseResourceRegistry
attachments map[string]map[string]*diskAttachment // map (disk, instance) -> attachment
testDetachHelper func(dName, iName string, s *Step) DError
}
func newDiskRegistry(w *Workflow) *diskRegistry {
dr := &diskRegistry{baseResourceRegistry: baseResourceRegistry{w: w, typeName: "disk", urlRgx: diskURLRgx}}
dr.baseResourceRegistry.deleteFn = dr.deleteFn
dr.init()
return dr
}
func (dr *diskRegistry) init() {
dr.baseResourceRegistry.init()
dr.attachments = map[string]map[string]*diskAttachment{}
}
func (dr *diskRegistry) deleteFn(res *Resource) DError {
m := NamedSubexp(diskURLRgx, res.link)
err := dr.w.ComputeClient.DeleteDisk(m["project"], m["zone"], m["disk"])
if gErr, ok := err.(*googleapi.Error); ok && gErr.Code == http.StatusNotFound {
return typedErr(resourceDNEError, "failed to delete disk", err)
}
return newErr("failed to delete disk", err)
}
// detachHelper marks s as the detacher between dName and iName.
// Returns an error if the detacher doesn't depend on the attacher.
func (dr *diskRegistry) detachHelper(dName, iName string, isAttached bool, s *Step) DError {
if dr.testDetachHelper != nil {
return dr.testDetachHelper(dName, iName, s)
}
// if the disk has already been attached before workflow is executed, skip validating its attacher
if isAttached {
return nil
}
pre := fmt.Sprintf("step %q cannot detach disk %q from instance %q", s.name, dName, iName)
var att *diskAttachment
if im, _ := dr.attachments[dName]; im == nil {
return Errf("%s: not attached", pre)
} else if att, _ = im[iName]; att == nil {
return Errf("%s: not attached", pre)
} else if att.detacher != nil {
return Errf("%s: already detached or concurrently detached by step %q", pre, att.detacher.name)
} else if !s.nestedDepends(att.attacher) {
return Errf("%s: step %q does not depend on attaching step %q", pre, s.name, att.attacher.name)
}
att.detacher = s
return nil
}
// registerAttachment is called by Instance.regCreate and AttachDisks.validate and marks a disk as attached to an instance by Step s.
func (dr *diskRegistry) regAttach(dName, iName, mode string, s *Step) DError {
dr.mx.Lock()
defer dr.mx.Unlock()
pre := fmt.Sprintf("step %q cannot attach disk %q to instance %q", s.name, dName, iName)
var errs DError
// Iterate over disk's attachments. Check for concurrent conflicts.
// Step s is concurrent with other attachments if the attachment detacher == nil
// or s does not depend on the detacher.
// If this is a repeat attachment (same disk and instance already attached), do nothing and return.
for attIName, att := range dr.attachments[dName] {
// Is this a concurrent attachment?
if att.detacher == nil || !s.nestedDepends(att.detacher) {
if attIName == iName {
errs = addErrs(errs, Errf("%s: concurrently attached by step %q", pre, att.attacher.name))
return nil // this is a repeat attachment to the same instance -- does nothing
} else if strIn(diskModeRW, []string{mode, att.mode}) {
// Can't have concurrent attachment in RW mode.
return Errf(
"%s: concurrent RW attachment of disk %q between instances %q (%s) and %q (%s)",
pre, dName, iName, mode, attIName, att.mode)
}
}
}
var im map[string]*diskAttachment
if im, _ = dr.attachments[dName]; im == nil {
im = map[string]*diskAttachment{}
dr.attachments[dName] = im
}
im[iName] = &diskAttachment{mode: mode, attacher: s}
return nil
}
// regDetach marks s as the detacher for the dName disk and iName instance.
// Returns an error if dName or iName don't exist or if detachHelper returns an error.
func (dr *diskRegistry) regDetach(dName, iName string, isAttached bool, s *Step) DError {
dr.mx.Lock()
defer dr.mx.Unlock()
return dr.detachHelper(dName, iName, isAttached, s)
}
// regDetachAll is called by Instance.regDelete and registers Step s as the detacher for all disks currently attached to iName.
func (dr *diskRegistry) regDetachAll(iName string, s *Step) DError {
dr.mx.Lock()
defer dr.mx.Unlock()
var errs DError
// For every disk.
for dName, im := range dr.attachments {
// Check if instance attached.
if att, _ := im[iName]; att == nil || att.detacher != nil {
continue
}
// If yes, detach.
errs = addErrs(dr.detachHelper(dName, iName, false, s))
}
return errs
}
| 1 | 10,368 | and -> or | GoogleCloudPlatform-compute-image-tools | go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.