patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -1,9 +1,7 @@
# coding: utf-8
"""Comparison of `binary` and `xentropy` objectives.
-
BLUF: The `xentropy` objective does logistic regression and generalizes
to the case where labels are probabilistic (i.e. numbers between 0 and 1).
-
Details: Both `binary` and `xentropy` minimize the log loss and use
`boost_from_average = TRUE` by default. Possibly the only difference
between them with default settings is that `binary` may achieve a slight | 1 | # coding: utf-8
"""Comparison of `binary` and `xentropy` objectives.
BLUF: The `xentropy` objective does logistic regression and generalizes
to the case where labels are probabilistic (i.e. numbers between 0 and 1).
Details: Both `binary` and `xentropy` minimize the log loss and use
`boost_from_average = TRUE` by default. Possibly the only difference
between them with default settings is that `binary` may achieve a slight
speed improvement by assuming that the labels are binary instead of
probabilistic.
"""
import time
import numpy as np
import pandas as pd
from scipy.special import expit
import lightgbm as lgb
#################
# Simulate some binary data with a single categorical and
# single continuous predictor
np.random.seed(0)
N = 1000
X = pd.DataFrame({
'continuous': range(N),
'categorical': np.repeat([0, 1, 2, 3, 4], N / 5)
})
CATEGORICAL_EFFECTS = [-1, -1, -2, -2, 2]
LINEAR_TERM = np.array([
-0.5 + 0.01 * X['continuous'][k]
+ CATEGORICAL_EFFECTS[X['categorical'][k]] for k in range(X.shape[0])
]) + np.random.normal(0, 1, X.shape[0])
TRUE_PROB = expit(LINEAR_TERM)
Y = np.random.binomial(1, TRUE_PROB, size=N)
DATA = {
'X': X,
'probability_labels': TRUE_PROB,
'binary_labels': Y,
'lgb_with_binary_labels': lgb.Dataset(X, Y),
'lgb_with_probability_labels': lgb.Dataset(X, TRUE_PROB),
}
#################
# Set up a couple of utilities for our experiments
def log_loss(preds, labels):
"""Logarithmic loss with non-necessarily-binary labels."""
log_likelihood = np.sum(labels * np.log(preds)) / len(preds)
return -log_likelihood
def experiment(objective, label_type, data):
"""Measure performance of an objective.
Parameters
----------
objective : string 'binary' or 'xentropy'
Objective function.
label_type : string 'binary' or 'probability'
Type of the label.
data : dict
Data for training.
Returns
-------
result : dict
Experiment summary stats.
"""
np.random.seed(0)
nrounds = 5
lgb_data = data['lgb_with_' + label_type + '_labels']
params = {
'objective': objective,
'feature_fraction': 1,
'bagging_fraction': 1,
'verbose': -1
}
time_zero = time.time()
gbm = lgb.train(params, lgb_data, num_boost_round=nrounds)
y_fitted = gbm.predict(data['X'])
y_true = data[label_type + '_labels']
duration = time.time() - time_zero
return {
'time': duration,
'correlation': np.corrcoef(y_fitted, y_true)[0, 1],
'logloss': log_loss(y_fitted, y_true)
}
#################
# Observe the behavior of `binary` and `xentropy` objectives
print('Performance of `binary` objective with binary labels:')
print(experiment('binary', label_type='binary', data=DATA))
print('Performance of `xentropy` objective with binary labels:')
print(experiment('xentropy', label_type='binary', data=DATA))
print('Performance of `xentropy` objective with probability labels:')
print(experiment('xentropy', label_type='probability', data=DATA))
# Trying this throws an error on non-binary values of y:
# experiment('binary', label_type='probability', DATA)
# The speed of `binary` is not drastically different than
# `xentropy`. `xentropy` runs faster than `binary` in many cases, although
# there are reasons to suspect that `binary` should run faster when the
# label is an integer instead of a float
K = 10
A = [experiment('binary', label_type='binary', data=DATA)['time']
for k in range(K)]
B = [experiment('xentropy', label_type='binary', data=DATA)['time']
for k in range(K)]
print('Best `binary` time: ' + str(min(A)))
print('Best `xentropy` time: ' + str(min(B)))
| 1 | 30,630 | please revert all of these unrelated whitespace changes | microsoft-LightGBM | cpp |
@@ -0,0 +1,15 @@
+package com.fsck.k9.mail;
+
+/**
+ * Enumeration of the different possible encryption protocol that can be used.
+ */
+public enum EncryptionType {
+
+ NONE,
+
+ INLINE,
+
+ PGP_MIME,
+
+ S_MIME
+} | 1 | 1 | 13,276 | PGP_INLINE is clearer IMHO | k9mail-k-9 | java |
|
@@ -16,6 +16,8 @@ import (
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
+ "context"
+
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/action"
"github.com/iotexproject/iotex-core/config" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package actpool
import (
"fmt"
"math/big"
"strings"
"testing"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/action"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/proto"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/testutil"
)
const (
pubkeyA = "2c9ccbeb9ee91271f7e5c2103753be9c9edff847e1a51227df6a6b0765f31a4b424e84027b44a663950f013a88b8fd8cdc53b1eda1d4b73f9d9dc12546c8c87d68ff1435a0f8a006"
prikeyA = "b5affb30846a00ef5aa39b57f913d70cd8cf6badd587239863cb67feacf6b9f30c34e800"
pubkeyB = "881504d84a0659e14dcba59f24a98e71cda55b139615342668840c64678f1514941bbd053c7492fb9b719e6050cfa972efa491b79e11a1713824dda5f638fc0d9fa1b68be3c0f905"
prikeyB = "b89c1ec0fb5b192c8bb8f6fcf9a871e4a67ef462f40d2b8ff426da1d1eaedd9696dc9d00"
pubkeyC = "252fc7bc9a993b68dd7b13a00213c9cf4befe80da49940c52220f93c7147771ba2d783045cf0fbf2a86b32a62848befb96c0f38c0487a5ccc806ff28bb06d9faf803b93dda107003"
prikeyC = "3e05de562a27fb6e25ac23ff8bcaa1ada0c253fa8ff7c6d15308f65d06b6990f64ee9601"
pubkeyD = "29aa28cc21c3ee3cc658d3a322997ceb8d5d352f45d052192d3ab57cd196d3375af558067f5a2cfe5fc65d5249cc07f991bab683468382a3acaa4c8b7af35156b46aeda00620f307"
prikeyD = "d4b7b441382751d9a1955152b46a69f3c9f9559c6205757af928f5181ff207060d0dab00"
pubkeyE = "64dc2d5f445a78b884527252a3dba1f72f52251c97ec213dda99868882024d4d1442f100c8f1f833d0c687871a959ee97665dea24de1a627cce6c970d9db5859da9e4295bb602e04"
prikeyE = "53a827f7c5b4b4040b22ae9b12fcaa234e8362fa022480f50b8643981806ed67c7f77a00"
)
const (
maxNumActsPerPool = 8192
maxNumActsPerAcct = 256
)
var (
addr1 = testutil.ConstructAddress(pubkeyA, prikeyA)
addr2 = testutil.ConstructAddress(pubkeyB, prikeyB)
addr3 = testutil.ConstructAddress(pubkeyC, prikeyC)
addr4 = testutil.ConstructAddress(pubkeyD, prikeyD)
addr5 = testutil.ConstructAddress(pubkeyE, prikeyE)
)
func TestActPool_validateTsf(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
// Case I: Coinbase transfer
coinbaseTsf := action.Transfer{IsCoinbase: true}
err = ap.validateTsf(&coinbaseTsf)
require.Equal(ErrTransfer, errors.Cause(err))
// Case II: Oversized data
tmpPayload := [32769]byte{}
payload := tmpPayload[:]
tsf := action.Transfer{Payload: payload}
err = ap.validateTsf(&tsf)
require.Equal(ErrActPool, errors.Cause(err))
// Case III: Negative amount
tsf = action.Transfer{Amount: big.NewInt(-100)}
err = ap.validateTsf(&tsf)
require.Equal(ErrBalance, errors.Cause(err))
// Case IV: Invalid address
tsf = action.Transfer{Sender: addr1.RawAddress, Recipient: "io1qyqsyqcyq5narhapakcsrhksfajfcpl24us3xp38zwvsep", Amount: big.NewInt(1)}
err = ap.validateTsf(&tsf)
require.Error(err)
require.True(strings.Contains(err.Error(), "error when validating recipient's address"))
// Case V: Signature verification fails
unsignedTsf, err := action.NewTransfer(uint64(1), big.NewInt(1), addr1.RawAddress, addr1.RawAddress, []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
err = ap.validateTsf(unsignedTsf)
require.Equal(action.ErrTransferError, errors.Cause(err))
// Case VI: Nonce is too low
prevTsf, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(prevTsf)
require.NoError(err)
err = bc.CommitStateChanges(0, []*action.Transfer{prevTsf}, nil, nil)
require.NoError(err)
ap.Reset()
nTsf, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(60), []byte{}, uint64(100000), big.NewInt(10))
err = ap.validateTsf(nTsf)
require.Equal(ErrNonce, errors.Cause(err))
}
func TestActPool_validateVote(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
// Case I: Oversized data
tmpSelfPubKey := [32769]byte{}
selfPubKey := tmpSelfPubKey[:]
vote := action.Vote{
ActionPb: &iproto.ActionPb{
Action: &iproto.ActionPb_Vote{
Vote: &iproto.VotePb{
SelfPubkey: selfPubKey},
},
},
}
err = ap.validateVote(&vote)
require.Equal(ErrActPool, errors.Cause(err))
// Case II: Invalid voter's public key
vote = action.Vote{
ActionPb: &iproto.ActionPb{
Action: &iproto.ActionPb_Vote{
Vote: &iproto.VotePb{},
},
},
}
err = ap.validateVote(&vote)
require.Error(err)
require.True(strings.Contains(err.Error(), "failed to get voter's public key"))
// Case III: Invalid address
vote = action.Vote{
ActionPb: &iproto.ActionPb{
Action: &iproto.ActionPb_Vote{
Vote: &iproto.VotePb{
SelfPubkey: addr1.PublicKey[:],
VoterAddress: addr1.RawAddress,
VoteeAddress: "123",
},
},
},
}
err = ap.validateVote(&vote)
require.Error(err)
require.True(strings.Contains(err.Error(), "error when validating votee's address"))
// Case IV: Signature verification fails
unsignedVote, err := action.NewVote(1, addr1.RawAddress, addr2.RawAddress, uint64(100000), big.NewInt(10))
unsignedVote.GetVote().SelfPubkey = addr1.PublicKey[:]
require.NoError(err)
err = ap.validateVote(unsignedVote)
require.Equal(action.ErrVoteError, errors.Cause(err))
// Case V: Nonce is too low
prevTsf, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(prevTsf)
require.NoError(err)
err = bc.CommitStateChanges(0, []*action.Transfer{prevTsf}, nil, nil)
require.NoError(err)
ap.Reset()
nVote, _ := signedVote(addr1, addr1, uint64(1), uint64(100000), big.NewInt(10))
err = ap.validateVote(nVote)
require.Equal(ErrNonce, errors.Cause(err))
// Case VI: Votee is not a candidate
vote2, _ := signedVote(addr1, addr2, uint64(2), uint64(100000), big.NewInt(10))
err = ap.validateVote(vote2)
require.Equal(ErrVotee, errors.Cause(err))
}
func TestActPool_AddActs(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(10))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
// Test actpool status after adding a sequence of Tsfs/votes: need to check confirmed nonce, pending nonce, and pending balance
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
tsf5, _ := signedTransfer(addr1, addr1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf6, _ := signedTransfer(addr2, addr2, uint64(1), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
tsf7, _ := signedTransfer(addr2, addr2, uint64(3), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
tsf8, _ := signedTransfer(addr2, addr2, uint64(4), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
err = ap.AddTsf(tsf5)
require.Equal(ErrBalance, errors.Cause(err))
err = ap.AddTsf(tsf6)
require.NoError(err)
err = ap.AddTsf(tsf7)
require.NoError(err)
err = ap.AddTsf(tsf8)
require.NoError(err)
pBalance1, _ := ap.getPendingBalance(addr1.RawAddress)
require.Equal(uint64(40), pBalance1.Uint64())
pNonce1, _ := ap.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), pNonce1)
pBalance2, _ := ap.getPendingBalance(addr2.RawAddress)
require.Equal(uint64(5), pBalance2.Uint64())
pNonce2, _ := ap.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(2), pNonce2)
tsf9, _ := signedTransfer(addr2, addr2, uint64(2), big.NewInt(3), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf9)
require.NoError(err)
pBalance2, _ = ap.getPendingBalance(addr2.RawAddress)
require.Equal(uint64(1), pBalance2.Uint64())
pNonce2, _ = ap.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(4), pNonce2)
// Error Case Handling
// Case I: Action already exists in pool
err = ap.AddTsf(tsf1)
require.Equal(fmt.Errorf("existed transfer: %x", tsf1.Hash()), err)
err = ap.AddVote(vote4)
require.Equal(fmt.Errorf("existed vote: %x", vote4.Hash()), err)
// Case II: Pool space is full
mockBC := mock_blockchain.NewMockBlockchain(ctrl)
Ap2, err := NewActPool(mockBC, apConfig)
require.NoError(err)
ap2, ok := Ap2.(*actPool)
require.True(ok)
for i := uint64(0); i < ap2.cfg.MaxNumActsPerPool; i++ {
nTsf := action.Transfer{Amount: big.NewInt(int64(i))}
nAction := nTsf.ConvertToActionPb()
ap2.allActions[nTsf.Hash()] = nAction
}
mockBC.EXPECT().Nonce(gomock.Any()).Times(2).Return(uint64(0), nil)
mockBC.EXPECT().StateByAddr(gomock.Any()).Times(1).Return(nil, nil)
err = ap2.AddTsf(tsf1)
require.Equal(ErrActPool, errors.Cause(err))
err = ap2.AddVote(vote4)
require.Equal(ErrActPool, errors.Cause(err))
// Case III: Nonce already exists
replaceTsf, _ := signedTransfer(addr1, addr2, uint64(1), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(replaceTsf)
require.Equal(ErrNonce, errors.Cause(err))
replaceVote, err := action.NewVote(4, addr1.RawAddress, "", uint64(100000), big.NewInt(10))
require.NoError(err)
replaceVote, _ = replaceVote.Sign(addr1)
err = ap.AddVote(replaceVote)
require.Equal(ErrNonce, errors.Cause(err))
// Case IV: Nonce is too large
outOfBoundsTsf, _ := signedTransfer(addr1, addr1, ap.cfg.MaxNumActsPerAcct+1, big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(outOfBoundsTsf)
require.Equal(ErrNonce, errors.Cause(err))
// Case V: Insufficient balance
overBalTsf, _ := signedTransfer(addr2, addr2, uint64(4), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(overBalTsf)
require.Equal(ErrBalance, errors.Cause(err))
// Case VI: over gas limit
creationExecution, err := action.NewExecution(addr1.RawAddress, action.EmptyAddress, uint64(5), big.NewInt(int64(0)), blockchain.GasLimit+100, big.NewInt(10), []byte{})
require.NoError(err)
err = ap.AddExecution(creationExecution)
require.Equal(ErrGasHigherThanLimit, errors.Cause(err))
// Case VII: insufficient gas
creationExecution.GasLimit = 10
tmpData := [1234]byte{}
creationExecution.Data = tmpData[:]
err = ap.AddExecution(creationExecution)
require.Equal(ErrInsufficientGas, errors.Cause(err))
}
func TestActPool_PickActs(t *testing.T) {
createActPool := func(cfg config.ActPool) (*actPool, []*action.Transfer, []*action.Vote, []*action.Execution) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(10))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
// Create actpool
Ap, err := NewActPool(bc, cfg)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
tsf4, _ := signedTransfer(addr1, addr1, uint64(4), big.NewInt(40), []byte{}, uint64(100000), big.NewInt(10))
tsf5, _ := signedTransfer(addr1, addr1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
vote6, _ := signedVote(addr1, addr1, uint64(6), uint64(100000), big.NewInt(10))
vote7, _ := signedVote(addr2, addr2, uint64(1), uint64(100000), big.NewInt(10))
tsf8, _ := signedTransfer(addr2, addr2, uint64(3), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
tsf9, _ := signedTransfer(addr2, addr2, uint64(4), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(10))
tsf10, _ := signedTransfer(addr2, addr2, uint64(5), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddTsf(tsf4)
require.NoError(err)
err = ap.AddTsf(tsf5)
require.Equal(ErrBalance, errors.Cause(err))
err = ap.AddVote(vote6)
require.NoError(err)
err = ap.AddVote(vote7)
require.NoError(err)
err = ap.AddTsf(tsf8)
require.NoError(err)
err = ap.AddTsf(tsf9)
require.NoError(err)
err = ap.AddTsf(tsf10)
require.NoError(err)
return ap, []*action.Transfer{tsf1, tsf2, tsf3, tsf4}, []*action.Vote{vote7}, []*action.Execution{}
}
t.Run("no-limit", func(t *testing.T) {
apConfig := getActPoolCfg()
ap, transfers, votes, executions := createActPool(apConfig)
pickedTsfs, pickedVotes, pickedExecutions := ap.PickActs()
require.Equal(t, transfers, pickedTsfs)
require.Equal(t, votes, pickedVotes)
require.Equal(t, executions, pickedExecutions)
})
t.Run("enough-limit", func(t *testing.T) {
apConfig := getActPoolCfg()
apConfig.MaxNumActsToPick = 10
ap, transfers, votes, executions := createActPool(apConfig)
pickedTsfs, pickedVotes, pickedExecutions := ap.PickActs()
require.Equal(t, transfers, pickedTsfs)
require.Equal(t, votes, pickedVotes)
require.Equal(t, executions, pickedExecutions)
})
t.Run("low-limit", func(t *testing.T) {
apConfig := getActPoolCfg()
apConfig.MaxNumActsToPick = 3
ap, _, _, _ := createActPool(apConfig)
pickedTsfs, pickedVotes, pickedExecutions := ap.PickActs()
require.Equal(t, 3, len(pickedTsfs)+len(pickedVotes)+len(pickedExecutions))
})
}
func TestActPool_removeConfirmedActs(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
require.Equal(4, len(ap.allActions))
require.NotNil(ap.accountActs[addr1.RawAddress])
err = bc.CommitStateChanges(0, []*action.Transfer{tsf1, tsf2, tsf3}, []*action.Vote{vote4}, []*action.Execution{})
require.NoError(err)
ap.removeConfirmedActs()
require.Equal(0, len(ap.allActions))
require.Nil(ap.accountActs[addr1.RawAddress])
}
func TestActPool_Reset(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(200))
require.NoError(err)
_, err = bc.CreateState(addr3.RawAddress, uint64(300))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
apConfig := getActPoolCfg()
Ap1, err := NewActPool(bc, apConfig)
require.NoError(err)
ap1, ok := Ap1.(*actPool)
require.True(ok)
Ap2, err := NewActPool(bc, apConfig)
require.NoError(err)
ap2, ok := Ap2.(*actPool)
require.True(ok)
// Tsfs to be added to ap1
tsf1, _ := signedTransfer(addr1, addr2, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr3, uint64(2), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr2, uint64(3), big.NewInt(60), []byte{}, uint64(100000), big.NewInt(10))
tsf4, _ := signedTransfer(addr2, addr1, uint64(1), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf5, _ := signedTransfer(addr2, addr3, uint64(2), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf6, _ := signedTransfer(addr2, addr1, uint64(3), big.NewInt(60), []byte{}, uint64(100000), big.NewInt(10))
tsf7, _ := signedTransfer(addr3, addr1, uint64(1), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf8, _ := signedTransfer(addr3, addr2, uint64(2), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf9, _ := signedTransfer(addr3, addr1, uint64(4), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
err = ap1.AddTsf(tsf1)
require.NoError(err)
err = ap1.AddTsf(tsf2)
require.NoError(err)
err = ap1.AddTsf(tsf3)
require.Equal(ErrBalance, errors.Cause(err))
err = ap1.AddTsf(tsf4)
require.NoError(err)
err = ap1.AddTsf(tsf5)
require.NoError(err)
err = ap1.AddTsf(tsf6)
require.Equal(ErrBalance, errors.Cause(err))
err = ap1.AddTsf(tsf7)
require.NoError(err)
err = ap1.AddTsf(tsf8)
require.NoError(err)
err = ap1.AddTsf(tsf9)
require.NoError(err)
// Tsfs to be added to ap2 only
tsf10, _ := signedTransfer(addr1, addr2, uint64(3), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf11, _ := signedTransfer(addr1, addr3, uint64(4), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf12, _ := signedTransfer(addr2, addr3, uint64(2), big.NewInt(70), []byte{}, uint64(100000), big.NewInt(10))
tsf13, _ := signedTransfer(addr3, addr1, uint64(1), big.NewInt(200), []byte{}, uint64(100000), big.NewInt(10))
tsf14, _ := signedTransfer(addr3, addr2, uint64(2), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
err = ap2.AddTsf(tsf1)
require.NoError(err)
err = ap2.AddTsf(tsf2)
require.NoError(err)
err = ap2.AddTsf(tsf10)
require.NoError(err)
err = ap2.AddTsf(tsf11)
require.Equal(ErrBalance, errors.Cause(err))
err = ap2.AddTsf(tsf4)
require.NoError(err)
err = ap2.AddTsf(tsf12)
require.NoError(err)
err = ap2.AddTsf(tsf13)
require.NoError(err)
err = ap2.AddTsf(tsf14)
require.NoError(err)
err = ap2.AddTsf(tsf9)
require.Equal(ErrBalance, errors.Cause(err))
// Check confirmed nonce, pending nonce, and pending balance after adding Tsfs above for each account
// ap1
// Addr1
ap1PNonce1, _ := ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ := ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(20).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ := ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ := ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(50).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ := ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap1PNonce3)
ap1PBalance3, _ := ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(100).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ := ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(4), ap2PNonce1)
ap2PBalance1, _ := ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(0).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ := ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap2PNonce2)
ap2PBalance2, _ := ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(30).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ := ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ := ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(50).Uint64(), ap2PBalance3.Uint64())
// Let ap1 be BP's actpool
pickedTsfs, pickedVotes, pickedExecutions := ap1.PickActs()
// ap1 commits update of accounts to trie
err = bc.CommitStateChanges(0, pickedTsfs, pickedVotes, pickedExecutions)
require.NoError(err)
//Reset
ap1.Reset()
ap2.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(220).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(4), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance3.Uint64())
// Add more Tsfs after resetting
// Tsfs To be added to ap1 only
tsf15, _ := signedTransfer(addr3, addr2, uint64(3), big.NewInt(80), []byte{}, uint64(100000), big.NewInt(10))
// Tsfs To be added to ap2 only
tsf16, _ := signedTransfer(addr1, addr2, uint64(4), big.NewInt(150), []byte{}, uint64(100000), big.NewInt(10))
tsf17, _ := signedTransfer(addr2, addr1, uint64(3), big.NewInt(90), []byte{}, uint64(100000), big.NewInt(10))
tsf18, _ := signedTransfer(addr2, addr3, uint64(4), big.NewInt(100), []byte{}, uint64(100000), big.NewInt(10))
tsf19, _ := signedTransfer(addr2, addr1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(10))
tsf20, _ := signedTransfer(addr3, addr2, uint64(3), big.NewInt(200), []byte{}, uint64(100000), big.NewInt(10))
err = ap1.AddTsf(tsf15)
require.NoError(err)
err = ap2.AddTsf(tsf16)
require.NoError(err)
err = ap2.AddTsf(tsf17)
require.NoError(err)
err = ap2.AddTsf(tsf18)
require.NoError(err)
err = ap2.AddTsf(tsf19)
require.Equal(ErrBalance, errors.Cause(err))
err = ap2.AddTsf(tsf20)
require.Equal(ErrBalance, errors.Cause(err))
// Check confirmed nonce, pending nonce, and pending balance after adding Tsfs above for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(220).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(200).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(5), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(50).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(5), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(10).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance3.Uint64())
// Let ap2 be BP's actpool
pickedTsfs, pickedVotes, pickedExecutions = ap2.PickActs()
// ap2 commits update of accounts to trie
err = bc.CommitStateChanges(0, pickedTsfs, pickedVotes, pickedExecutions)
require.NoError(err)
//Reset
ap1.Reset()
ap2.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(140).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(5), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(5), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(100).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1.RawAddress)
require.Equal(uint64(5), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1.RawAddress)
require.Equal(big.NewInt(140).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2.RawAddress)
require.Equal(uint64(5), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2.RawAddress)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3.RawAddress)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3.RawAddress)
require.Equal(big.NewInt(280).Uint64(), ap2PBalance3.Uint64())
// Add two more players
_, err = bc.CreateState(addr4.RawAddress, uint64(10))
require.NoError(err)
_, err = bc.CreateState(addr5.RawAddress, uint64(20))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(1, nil, nil, nil))
tsf21, _ := signedTransfer(addr4, addr5, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
vote22, _ := signedVote(addr4, addr4, uint64(2), uint64(100000), big.NewInt(10))
vote23, _ := action.NewVote(3, addr4.RawAddress, "", uint64(100000), big.NewInt(10))
vote23, _ = vote23.Sign(addr4)
vote24, _ := signedVote(addr5, addr5, uint64(1), uint64(100000), big.NewInt(10))
tsf25, _ := signedTransfer(addr5, addr4, uint64(2), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
vote26, _ := action.NewVote(3, addr5.RawAddress, "", uint64(100000), big.NewInt(10))
vote26, _ = vote26.Sign(addr5)
err = ap1.AddTsf(tsf21)
require.NoError(err)
err = ap1.AddVote(vote22)
require.NoError(err)
err = ap1.AddVote(vote23)
require.NoError(err)
err = ap1.AddVote(vote24)
require.NoError(err)
err = ap1.AddTsf(tsf25)
require.NoError(err)
err = ap1.AddVote(vote26)
require.NoError(err)
// Check confirmed nonce, pending nonce, and pending balance after adding actions above for account4 and account5
// ap1
// Addr4
ap1PNonce4, _ := ap1.getPendingNonce(addr4.RawAddress)
require.Equal(uint64(4), ap1PNonce4)
ap1PBalance4, _ := ap1.getPendingBalance(addr4.RawAddress)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance4.Uint64())
// Addr5
ap1PNonce5, _ := ap1.getPendingNonce(addr5.RawAddress)
require.Equal(uint64(4), ap1PNonce5)
ap1PBalance5, _ := ap1.getPendingBalance(addr5.RawAddress)
require.Equal(big.NewInt(10).Uint64(), ap1PBalance5.Uint64())
// Let ap1 be BP's actpool
pickedTsfs, pickedVotes, pickedExecutions = ap1.PickActs()
// ap1 commits update of accounts to trie
err = bc.CommitStateChanges(0, pickedTsfs, pickedVotes, pickedExecutions)
require.NoError(err)
//Reset
ap1.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr4
ap1PNonce4, _ = ap1.getPendingNonce(addr4.RawAddress)
require.Equal(uint64(4), ap1PNonce4)
ap1PBalance4, _ = ap1.getPendingBalance(addr4.RawAddress)
require.Equal(big.NewInt(10).Uint64(), ap1PBalance4.Uint64())
// Addr5
ap1PNonce5, _ = ap1.getPendingNonce(addr5.RawAddress)
require.Equal(uint64(4), ap1PNonce5)
ap1PBalance5, _ = ap1.getPendingBalance(addr5.RawAddress)
require.Equal(big.NewInt(20).Uint64(), ap1PBalance5.Uint64())
}
func TestActPool_removeInvalidActs(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf2, _ := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf2)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
hash1 := tsf1.Hash()
action1 := tsf1.ConvertToActionPb()
hash2 := vote4.Hash()
action2 := vote4.ConvertToActionPb()
acts := []*iproto.ActionPb{action1, action2}
require.NotNil(ap.allActions[hash1])
require.NotNil(ap.allActions[hash2])
ap.removeInvalidActs(acts)
require.Nil(ap.allActions[hash1])
require.Nil(ap.allActions[hash2])
}
func TestActPool_GetPendingNonce(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
nonce, err := ap.GetPendingNonce(addr2.RawAddress)
require.NoError(err)
require.Equal(uint64(1), nonce)
nonce, err = ap.GetPendingNonce(addr1.RawAddress)
require.NoError(err)
require.Equal(uint64(2), nonce)
}
func TestActPool_GetUnconfirmedActs(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.GetFactory().CommitStateChanges(0, nil, nil, nil))
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
act1 := tsf1.ConvertToActionPb()
tsf3, _ := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
act3 := tsf3.ConvertToActionPb()
vote4, _ := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
act4 := vote4.ConvertToActionPb()
err = ap.AddTsf(tsf1)
require.NoError(err)
err = ap.AddTsf(tsf3)
require.NoError(err)
err = ap.AddVote(vote4)
require.NoError(err)
acts := ap.GetUnconfirmedActs(addr2.RawAddress)
require.Equal([]*iproto.ActionPb{}, acts)
acts = ap.GetUnconfirmedActs(addr1.RawAddress)
require.Equal([]*iproto.ActionPb{act1, act3, act4}, acts)
}
func TestActPool_GetActionByHash(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
_, err = bc.CreateState(addr2.RawAddress, uint64(100))
require.NoError(err)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, _ := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
hash1 := tsf1.Hash()
act1 := tsf1.ConvertToActionPb()
vote2, _ := signedVote(addr1, addr1, uint64(2), uint64(100000), big.NewInt(10))
hash2 := vote2.Hash()
act2 := vote2.ConvertToActionPb()
ap.allActions[hash1] = act1
act, err := ap.GetActionByHash(hash1)
require.NoError(err)
require.Equal(act1, act)
act, err = ap.GetActionByHash(hash2)
require.Equal(ErrHash, errors.Cause(err))
require.Nil(act)
ap.allActions[hash2] = act2
act, err = ap.GetActionByHash(hash2)
require.NoError(err)
require.Equal(act2, act)
}
func TestActPool_GetCapacity(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
require.Equal(uint64(maxNumActsPerPool), ap.GetCapacity())
}
func TestActPool_GetSize(t *testing.T) {
require := require.New(t)
bc := blockchain.NewBlockchain(&config.Default, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption())
_, err := bc.CreateState(addr1.RawAddress, uint64(100))
require.NoError(err)
require.NoError(bc.CommitStateChanges(0, nil, nil, nil))
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(bc, apConfig)
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
require.Zero(ap.GetSize())
tsf1, err := signedTransfer(addr1, addr1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
tsf2, err := signedTransfer(addr1, addr1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
tsf3, err := signedTransfer(addr1, addr1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
vote4, err := signedVote(addr1, addr1, uint64(4), uint64(100000), big.NewInt(10))
require.NoError(err)
require.NoError(ap.AddTsf(tsf1))
require.NoError(ap.AddTsf(tsf2))
require.NoError(ap.AddTsf(tsf3))
require.NoError(ap.AddVote(vote4))
require.Equal(uint64(4), ap.GetSize())
require.NoError(bc.CommitStateChanges(0,
[]*action.Transfer{tsf1, tsf2, tsf3}, []*action.Vote{vote4}, nil))
ap.removeConfirmedActs()
require.Equal(uint64(0), ap.GetSize())
}
// Helper function to return the correct pending nonce just in case of empty queue
func (ap *actPool) getPendingNonce(addr string) (uint64, error) {
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingNonce(), nil
}
committedNonce, err := ap.bc.Nonce(addr)
pendingNonce := committedNonce + 1
return pendingNonce, err
}
// Helper function to return the correct pending balance just in case of empty queue
func (ap *actPool) getPendingBalance(addr string) (*big.Int, error) {
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingBalance(), nil
}
return ap.bc.Balance(addr)
}
// Helper function to return a signed transfer
func signedTransfer(sender *iotxaddress.Address, recipient *iotxaddress.Address, nonce uint64, amount *big.Int, payload []byte, gasLimit uint64, gasPrice *big.Int) (*action.Transfer, error) {
transfer, err := action.NewTransfer(nonce, amount, sender.RawAddress, recipient.RawAddress, payload, gasLimit, gasPrice)
if err != nil {
return nil, err
}
return transfer.Sign(sender)
}
// Helper function to return a signed vote
func signedVote(voter *iotxaddress.Address, votee *iotxaddress.Address, nonce uint64, gasLimit uint64, gasPrice *big.Int) (*action.Vote, error) {
vote, err := action.NewVote(nonce, voter.RawAddress, votee.RawAddress, gasLimit, gasPrice)
if err != nil {
return nil, err
}
return vote.Sign(voter)
}
func getActPoolCfg() config.ActPool {
return config.ActPool{
MaxNumActsPerPool: maxNumActsPerPool,
MaxNumActsPerAcct: maxNumActsPerAcct,
}
}
| 1 | 11,911 | Please group imports | iotexproject-iotex-core | go |
@@ -102,7 +102,7 @@ public class TestHiveMetastore {
.transportFactory(new TTransportFactory())
.protocolFactory(new TBinaryProtocol.Factory())
.minWorkerThreads(3)
- .maxWorkerThreads(5);
+ .maxWorkerThreads(10);
return new TThreadPoolServer(args);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
import org.apache.hadoop.hive.metastore.TSetIpAddressProcessor;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TTransportFactory;
import static java.nio.file.Files.createTempDirectory;
import static java.nio.file.attribute.PosixFilePermissions.asFileAttribute;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
public class TestHiveMetastore {
private File hiveLocalDir;
private HiveConf hiveConf;
private ExecutorService executorService;
private TServer server;
public void start() {
try {
hiveLocalDir = createTempDirectory("hive", asFileAttribute(fromString("rwxrwxrwx"))).toFile();
File derbyLogFile = new File(hiveLocalDir, "derby.log");
System.setProperty("derby.stream.error.file", derbyLogFile.getAbsolutePath());
setupMetastoreDB("jdbc:derby:" + getDerbyPath() + ";create=true");
TServerSocket socket = new TServerSocket(0);
int port = socket.getServerSocket().getLocalPort();
hiveConf = newHiveConf(port);
server = newThriftServer(socket, hiveConf);
executorService = Executors.newSingleThreadExecutor();
executorService.submit(() -> server.serve());
} catch (Exception e) {
throw new RuntimeException("Cannot start TestHiveMetastore", e);
}
}
public void stop() {
if (server != null) {
server.stop();
}
if (executorService != null) {
executorService.shutdown();
}
if (hiveLocalDir != null) {
hiveLocalDir.delete();
}
}
public HiveConf hiveConf() {
return hiveConf;
}
public String getDatabasePath(String dbName) {
File dbDir = new File(hiveLocalDir, dbName + ".db");
return dbDir.getPath();
}
private TServer newThriftServer(TServerSocket socket, HiveConf conf) throws Exception {
HiveConf serverConf = new HiveConf(conf);
serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + getDerbyPath() + ";create=true");
HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", serverConf);
IHMSHandler handler = RetryingHMSHandler.getProxy(serverConf, baseHandler, false);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket)
.processor(new TSetIpAddressProcessor<>(handler))
.transportFactory(new TTransportFactory())
.protocolFactory(new TBinaryProtocol.Factory())
.minWorkerThreads(3)
.maxWorkerThreads(5);
return new TThreadPoolServer(args);
}
private HiveConf newHiveConf(int port) {
HiveConf newHiveConf = new HiveConf(new Configuration(), TestHiveMetastore.class);
newHiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, "thrift://localhost:" + port);
newHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "file:" + hiveLocalDir.getAbsolutePath());
return newHiveConf;
}
private void setupMetastoreDB(String dbURL) throws SQLException, IOException {
Connection connection = DriverManager.getConnection(dbURL);
ScriptRunner scriptRunner = new ScriptRunner(connection, true, true);
ClassLoader classLoader = ClassLoader.getSystemClassLoader();
InputStream inputStream = classLoader.getResourceAsStream("hive-schema-3.1.0.derby.sql");
try (Reader reader = new InputStreamReader(inputStream)) {
scriptRunner.runScript(reader);
}
}
private String getDerbyPath() {
File metastoreDB = new File(hiveLocalDir, "metastore_db");
return metastoreDB.getPath();
}
}
| 1 | 15,227 | Would it make sense to expose this as a config option in `TestHiveMetastore`? A small number of worker threads helped us to catch a few connection leaks early on. | apache-iceberg | java |
@@ -10,9 +10,12 @@ void im2col_cpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
+ const int dilation_h, const int dilation_w,
Dtype* data_col) {
- const int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
- const int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
+ const int height_col = (height + 2 * pad_h -
+ (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
+ const int width_col = (width + 2 * pad_w -
+ (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int channels_col = channels * kernel_h * kernel_w;
for (int c_col = 0; c_col < channels_col; ++c_col) {
int w_offset = c_col % kernel_w; | 1 | #include <vector>
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void im2col_cpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
const int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
const int channels_col = channels * kernel_h * kernel_w;
for (int c_col = 0; c_col < channels_col; ++c_col) {
int w_offset = c_col % kernel_w;
int h_offset = (c_col / kernel_w) % kernel_h;
int c_im = c_col / kernel_h / kernel_w;
for (int h_col = 0; h_col < height_col; ++h_col) {
for (int w_col = 0; w_col < width_col; ++w_col) {
int h_im = h_col * stride_h - pad_h + h_offset;
int w_im = w_col * stride_w - pad_w + w_offset;
data_col[(c_col * height_col + h_col) * width_col + w_col] =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im[(c_im * height + h_im) * width + w_im] : 0;
}
}
}
}
// Explicit instantiation
template void im2col_cpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_col);
template void im2col_cpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_col);
template <typename Dtype>
inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col,
const int num_spatial_axes, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_output) {
if (!im2col) {
int im_size = im_shape[0];
for (int i = 0; i < num_spatial_axes; ++i) {
im_size *= im_shape[1 + i];
}
caffe_set(im_size, Dtype(0), data_output);
}
int kernel_size = 1;
for (int i = 0; i < num_spatial_axes; ++i) {
kernel_size *= kernel_shape[i];
}
const int channels_col = col_shape[0];
vector<int> d_offset(num_spatial_axes, 0);
vector<int> d_iter(num_spatial_axes, 0);
for (int c_col = 0; c_col < channels_col; ++c_col) {
// Loop over spatial axes in reverse order to compute a per-axis offset.
int offset = c_col;
for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) {
if (d_i < num_spatial_axes - 1) {
offset /= kernel_shape[d_i + 1];
}
d_offset[d_i] = offset % kernel_shape[d_i];
}
for (bool incremented = true; incremented; ) {
// Loop over spatial axes in forward order to compute the indices in the
// image and column, and whether the index lies in the padding.
int index_col = c_col;
int index_im = c_col / kernel_size;
bool is_padding = false;
for (int d_i = 0; d_i < num_spatial_axes; ++d_i) {
const int d = d_iter[d_i];
const int d_im = d * stride[d_i] - pad[d_i] + d_offset[d_i];
is_padding |= d_im < 0 || d_im >= im_shape[d_i + 1];
index_col *= col_shape[d_i + 1];
index_col += d;
index_im *= im_shape[d_i + 1];
index_im += d_im;
}
if (im2col) {
if (is_padding) {
data_output[index_col] = 0;
} else {
data_output[index_col] = data_input[index_im];
}
} else if (!is_padding) { // col2im
data_output[index_im] += data_input[index_col];
}
// Loop over spatial axes in reverse order to choose an index,
// like counting.
incremented = false;
for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) {
const int d_max = col_shape[d_i + 1];
DCHECK_LT(d_iter[d_i], d_max);
if (d_iter[d_i] == d_max - 1) {
d_iter[d_i] = 0;
} else { // d_iter[d_i] < d_max - 1
++d_iter[d_i];
incremented = true;
break;
}
}
} // while(incremented) {
} // for (int c = 0; c < channels_col; ++c) {
}
template <typename Dtype>
void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_col) {
const bool kIm2Col = true;
im2col_nd_core_cpu(data_im, kIm2Col, num_spatial_axes, im_shape, col_shape,
kernel_shape, pad, stride, data_col);
}
// Explicit instantiation
template void im2col_nd_cpu<float>(const float* data_im,
const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
float* data_col);
template void im2col_nd_cpu<double>(const double* data_im,
const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
double* data_col);
template <typename Dtype>
void col2im_cpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_im) {
caffe_set(height * width * channels, Dtype(0), data_im);
const int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
const int channels_col = channels * kernel_h * kernel_w;
for (int c_col = 0; c_col < channels_col; ++c_col) {
int w_offset = c_col % kernel_w;
int h_offset = (c_col / kernel_w) % kernel_h;
int c_im = c_col / kernel_h / kernel_w;
for (int h_col = 0; h_col < height_col; ++h_col) {
for (int w_col = 0; w_col < width_col; ++w_col) {
int h_im = h_col * stride_h - pad_h + h_offset;
int w_im = w_col * stride_w - pad_w + w_offset;
if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width)
data_im[(c_im * height + h_im) * width + w_im] +=
data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
}
}
// Explicit instantiation
template void col2im_cpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_cpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
template <typename Dtype>
void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
Dtype* data_im) {
const bool kIm2Col = false;
im2col_nd_core_cpu(data_col, kIm2Col, num_spatial_axes, im_shape, col_shape,
kernel_shape, pad, stride, data_im);
}
// Explicit instantiation
template void col2im_nd_cpu<float>(const float* data_col,
const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
float* data_im);
template void col2im_nd_cpu<double>(const double* data_col,
const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
double* data_im);
} // namespace caffe
| 1 | 35,877 | Let's fix the extra spaces above. | BVLC-caffe | cpp |
@@ -27,7 +27,7 @@ public interface SearchContext {
* @return A list of all {@link WebElement}s, or an empty list if nothing matches
* @see org.openqa.selenium.By
*/
- List<WebElement> findElements(By by);
+ <T extends WebElement> List<T> findElements(By by);
/** | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import java.util.List;
public interface SearchContext {
/**
* Find all elements within the current context using the given mechanism.
*
* @param by The locating mechanism to use
* @return A list of all {@link WebElement}s, or an empty list if nothing matches
* @see org.openqa.selenium.By
*/
List<WebElement> findElements(By by);
/**
* Find the first {@link WebElement} using the given method.
*
* @param by The locating mechanism
* @return The first matching element on the current context
* @throws NoSuchElementException If no matching elements are found
*/
WebElement findElement(By by);
}
| 1 | 19,273 | should a similar change be done for findElement? | SeleniumHQ-selenium | java |
@@ -15,11 +15,17 @@ import (
var _ keystore.Service = (*Service)(nil)
+// Service is the memory-based keystore.Service implementation.
+//
+// Keys are stored in the in-memory map, where the key is the name of the
+// private key, and value is structure where actual private key and password are
+// stored.
type Service struct {
m map[string]key
mu sync.Mutex
}
+// New creates new memory-based keystore.Service implementation.
func New() *Service {
return &Service{
m: make(map[string]key), | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mem
import (
"crypto/ecdsa"
"fmt"
"sync"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/keystore"
)
var _ keystore.Service = (*Service)(nil)
type Service struct {
m map[string]key
mu sync.Mutex
}
func New() *Service {
return &Service{
m: make(map[string]key),
}
}
func (s *Service) Exists(name string) (bool, error) {
s.mu.Lock()
defer s.mu.Unlock()
_, ok := s.m[name]
return ok, nil
}
func (s *Service) Key(name, password string) (pk *ecdsa.PrivateKey, created bool, err error) {
s.mu.Lock()
defer s.mu.Unlock()
k, ok := s.m[name]
if !ok {
pk, err := crypto.GenerateSecp256k1Key()
if err != nil {
return nil, false, fmt.Errorf("generate secp256k1 key: %w", err)
}
s.m[name] = key{
pk: pk,
password: password,
}
return pk, true, nil
}
if k.password != password {
return nil, false, keystore.ErrInvalidPassword
}
return k.pk, created, nil
}
type key struct {
pk *ecdsa.PrivateKey
password string
}
| 1 | 12,838 | // Keys are stored in an in-memory map, where the key is the name of the // private key, and the value is the structure where the actual private key and the password are // stored. | ethersphere-bee | go |
@@ -59,6 +59,9 @@ var _ = duck.VerifyType(&PullSubscription{}, &duckv1.Conditions{})
// PullSubscriptionSpec defines the desired state of the PullSubscription.
type PullSubscriptionSpec struct {
+ // This brings in CloudEventOverrides and Sink.
+ duckv1.SourceSpec
+
// Secret is the credential to use to create and poll the PullSubscription
// Subscription. The value of the secret entry must be a service account
// key in the JSON format (see https://cloud.google.com/iam/docs/creating-managing-service-account-keys). | 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
"knative.dev/pkg/apis/duck"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/apis/v1alpha1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PullSubscription is the Schema for the gcppullSubscriptions API.
// +k8s:openapi-gen=true
type PullSubscription struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PullSubscriptionSpec `json:"spec,omitempty"`
Status PullSubscriptionStatus `json:"status,omitempty"`
}
// PubSubMode returns the mode currently set for PullSubscription.
func (p *PullSubscription) PubSubMode() ModeType {
return p.Spec.Mode
}
// Check that PullSubscription can be validated and can be defaulted.
var _ runtime.Object = (*PullSubscription)(nil)
// Check that PullSubscription will be checked for immutable fields.
var _ apis.Immutable = (*PullSubscription)(nil)
// Check that PullSubscription implements the Conditions duck type.
var _ = duck.VerifyType(&PullSubscription{}, &duckv1.Conditions{})
// PullSubscriptionSpec defines the desired state of the PullSubscription.
type PullSubscriptionSpec struct {
// Secret is the credential to use to create and poll the PullSubscription
// Subscription. The value of the secret entry must be a service account
// key in the JSON format (see https://cloud.google.com/iam/docs/creating-managing-service-account-keys).
// +optional
Secret *corev1.SecretKeySelector `json:"secret,omitempty"`
// Project is the ID of the Google Cloud Project that the PullSubscription
// Topic exists in.
// +optional
Project string `json:"project,omitempty"`
// Topic is the ID of the PullSubscription Topic to Subscribe to. It must
// be in the form of the unique identifier within the project, not the
// entire name. E.g. it must be 'laconia', not
// 'projects/my-proj/topics/laconia'.
Topic string `json:"topic,omitempty"`
// AckDeadline is the default maximum time after a subscriber receives a
// message before the subscriber should acknowledge the message. Defaults
// to 30 seconds ('30s').
// +optional
AckDeadline *string `json:"ackDeadline,omitempty"`
// RetainAckedMessages defines whether to retain acknowledged messages. If
// true, acknowledged messages will not be expunged until they fall out of
// the RetentionDuration window.
RetainAckedMessages bool `json:"retainAckedMessages,omitempty"`
// RetentionDuration defines how long to retain messages in backlog, from
// the time of publish. If RetainAckedMessages is true, this duration
// affects the retention of acknowledged messages, otherwise only
// unacknowledged messages are retained. Cannot be longer than 7 days or
// shorter than 10 minutes. Defaults to 7 days ('7d').
// +optional
RetentionDuration *string `json:"retentionDuration,omitempty"`
// Sink is a reference to an object that will resolve to a domain name or a
// URI directly to use as the sink.
Sink v1alpha1.Destination `json:"sink"`
// Transformer is a reference to an object that will resolve to a domain
// name or a URI directly to use as the transformer or a URI directly.
// +optional
Transformer *v1alpha1.Destination `json:"transformer,omitempty"`
// Mode defines the encoding and structure of the payload of when the
// PullSubscription invokes the sink.
// +optional
Mode ModeType `json:"mode,omitempty"`
// CloudEventOverrides defines overrides to control modifications of the
// event sent to the sink.
// +optional
CloudEventOverrides *CloudEventOverrides `json:"ceOverrides,omitempty"`
}
// CloudEventOverrides defines arguments for a Source that control the output
// format of the CloudEvents produced by the Source.
type CloudEventOverrides struct {
// Extensions specify what attribute are added or overridden on the
// outbound event. Each `Extensions` key-value pair are set on the event as
// an attribute extension independently.
// +optional
Extensions map[string]string `json:"extensions,omitempty"`
}
// GetAckDeadline parses AckDeadline and returns the default if an error occurs.
func (ps PullSubscriptionSpec) GetAckDeadline() time.Duration {
if ps.AckDeadline != nil {
if duration, err := time.ParseDuration(*ps.AckDeadline); err == nil {
return duration
}
}
return defaultAckDeadline
}
// GetRetentionDuration parses RetentionDuration and returns the default if an error occurs.
func (ps PullSubscriptionSpec) GetRetentionDuration() time.Duration {
if ps.RetentionDuration != nil {
if duration, err := time.ParseDuration(*ps.RetentionDuration); err == nil {
return duration
}
}
return defaultRetentionDuration
}
type ModeType string
const (
// ModeCloudEventsBinary will use CloudEvents binary HTTP mode with
// flattened Pub/Sub payload.
ModeCloudEventsBinary ModeType = "CloudEventsBinary"
// ModeCloudEventsStructured will use CloudEvents structured HTTP mode with
// flattened Pub/Sub payload.
ModeCloudEventsStructured ModeType = "CloudEventsStructured"
// ModePushCompatible will use CloudEvents binary HTTP mode with expanded
// Pub/Sub payload that matches how Cloud Pub/Sub delivers a push message.
ModePushCompatible ModeType = "PushCompatible"
)
// PubSubEventSource returns the Cloud Pub/Sub CloudEvent source value.
func PubSubEventSource(googleCloudProject, topic string) string {
return fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", googleCloudProject, topic)
}
const (
// PullSubscription CloudEvent type
PubSubPublish = "com.google.cloud.pubsub.topic.publish"
)
const (
// PullSubscriptionConditionReady has status True when the PullSubscription is
// ready to send events.
PullSubscriptionConditionReady = apis.ConditionReady
// PullSubscriptionConditionSinkProvided has status True when the PullSubscription
// has been configured with a sink target.
PullSubscriptionConditionSinkProvided apis.ConditionType = "SinkProvided"
// PullSubscriptionConditionDeployed has status True when the PullSubscription has
// had its receive adapter deployment created.
PullSubscriptionConditionDeployed apis.ConditionType = "Deployed"
// PullSubscriptionConditionSubscribed has status True when a Google Cloud
// Pub/Sub Subscription has been created pointing at the created receive
// adapter deployment.
PullSubscriptionConditionSubscribed apis.ConditionType = "Subscribed"
// PullSubscriptionConditionTransformerProvided has status True when the
// PullSubscription has been configured with a transformer target.
PullSubscriptionConditionTransformerProvided apis.ConditionType = "TransformerProvided"
// PullSubscriptionConditionEventTypesProvided has status True when the
// PullSubscription has been configured with event types.
PullSubscriptionConditionEventTypesProvided apis.ConditionType = "EventTypesProvided"
)
var pullSubscriptionCondSet = apis.NewLivingConditionSet(
PullSubscriptionConditionSinkProvided,
PullSubscriptionConditionDeployed,
PullSubscriptionConditionSubscribed,
)
// PullSubscriptionStatus defines the observed state of PullSubscription.
type PullSubscriptionStatus struct {
// inherits duck/v1beta1 Status, which currently provides:
// * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
// * Conditions - the latest available observations of a resource's current state.
duckv1.Status `json:",inline"`
// SinkURI is the current active sink URI that has been configured for the
// PullSubscription.
// +optional
SinkURI string `json:"sinkUri,omitempty"`
// TransformerURI is the current active transformer URI that has been
// configured for the PullSubscription.
// +optional
TransformerURI string `json:"transformerUri,omitempty"`
// ProjectID is the resolved project ID in use by the PullSubscription.
// +optional
ProjectID string `json:"projectId,omitempty"`
// SubscriptionID is the created subscription ID used by the PullSubscription.
// +optional
SubscriptionID string `json:"subscriptionId,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PullSubscriptionList contains a list of PubSubs.
type PullSubscriptionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []PullSubscription `json:"items"`
}
// GetGroupVersionKind returns the GroupVersionKind.
func (s *PullSubscription) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("PullSubscription")
}
| 1 | 9,627 | I think this needs a json inline tag here | google-knative-gcp | go |
@@ -510,7 +510,7 @@ string t_erl_generator::render_member_type(t_field * field) {
} else if (type->is_struct() || type->is_xception()) {
return "#" + uncapitalize(type->get_name()) + "{}";
} else if (type->is_map()) {
- return "dict()";
+ return "dict:dict()";
} else if (type->is_set()) {
return "set()";
} else if (type->is_list()) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <string>
#include <fstream>
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sstream>
#include "t_generator.h"
#include "platform.h"
#include "version.h"
using std::map;
using std::ofstream;
using std::ostream;
using std::ostringstream;
using std::string;
using std::stringstream;
using std::vector;
static const std::string endl = "\n"; // avoid ostream << std::endl flushes
/**
* Erlang code generator.
*
*/
class t_erl_generator : public t_generator {
public:
t_erl_generator(
t_program* program,
const std::map<std::string, std::string>& parsed_options,
const std::string& option_string)
: t_generator(program)
{
(void) parsed_options;
(void) option_string;
program_name_[0] = tolower(program_name_[0]);
service_name_[0] = tolower(service_name_[0]);
out_dir_base_ = "gen-erl";
}
/**
* Init and close methods
*/
void init_generator();
void close_generator();
/**
* Program-level generation functions
*/
void generate_typedef (t_typedef* ttypedef);
void generate_enum (t_enum* tenum);
void generate_const (t_const* tconst);
void generate_struct (t_struct* tstruct);
void generate_xception (t_struct* txception);
void generate_service (t_service* tservice);
void generate_member_type(std::ostream & out, t_type* type);
void generate_member_value(std::ostream & out, t_type* type, t_const_value* value);
std::string render_member_type(t_field * field);
std::string render_member_value(t_field * field);
std::string render_member_requiredness(t_field * field);
// std::string render_default_value(t_type* type);
std::string render_default_value(t_field * field);
std::string render_const_value(t_type* type, t_const_value* value);
std::string render_type_term(t_type* ttype, bool expand_structs, bool extended_info = false);
/**
* Struct generation code
*/
void generate_erl_struct(t_struct* tstruct, bool is_exception);
void generate_erl_struct_definition(std::ostream& out, t_struct* tstruct);
void generate_erl_struct_member(std::ostream& out, t_field * tmember);
void generate_erl_struct_info(std::ostream& out, t_struct* tstruct);
void generate_erl_extended_struct_info(std::ostream& out, t_struct* tstruct);
void generate_erl_function_helpers(t_function* tfunction);
/**
* Service-level generation functions
*/
void generate_service_helpers (t_service* tservice);
void generate_service_interface (t_service* tservice);
void generate_function_info (t_service* tservice, t_function* tfunction);
/**
* Helper rendering functions
*/
std::string erl_autogen_comment();
std::string erl_imports();
std::string render_includes();
std::string type_name(t_type* ttype);
std::string function_signature(t_function* tfunction, std::string prefix="");
std::string argument_list(t_struct* tstruct);
std::string type_to_enum(t_type* ttype);
std::string type_module(t_type* ttype);
std::string capitalize(std::string in) {
in[0] = toupper(in[0]);
return in;
}
std::string uncapitalize(std::string in) {
in[0] = tolower(in[0]);
return in;
}
static std::string comment(string in);
private:
bool has_default_value(t_field *);
/**
* add function to export list
*/
void export_function(t_function* tfunction, std::string prefix="");
void export_string(std::string name, int num);
void export_types_function(t_function* tfunction, std::string prefix="");
void export_types_string(std::string name, int num);
/**
* write out headers and footers for hrl files
*/
void hrl_header(std::ostream& out, std::string name);
void hrl_footer(std::ostream& out, std::string name);
/**
* stuff to spit out at the top of generated files
*/
bool export_lines_first_;
std::ostringstream export_lines_;
bool export_types_lines_first_;
std::ostringstream export_types_lines_;
/**
* File streams
*/
std::ostringstream f_info_;
std::ostringstream f_info_ext_;
std::ofstream f_types_file_;
std::ofstream f_types_hrl_file_;
std::ofstream f_consts_;
std::ostringstream f_service_;
std::ofstream f_service_file_;
std::ofstream f_service_hrl_;
};
/**
* UI for file generation by opening up the necessary file output
* streams.
*
* @param tprogram The program to generate
*/
void t_erl_generator::init_generator() {
// Make output directory
MKDIR(get_out_dir().c_str());
// setup export lines
export_lines_first_ = true;
export_types_lines_first_ = true;
// types files
string f_types_name = get_out_dir()+program_name_+"_types.erl";
string f_types_hrl_name = get_out_dir()+program_name_+"_types.hrl";
f_types_file_.open(f_types_name.c_str());
f_types_hrl_file_.open(f_types_hrl_name.c_str());
hrl_header(f_types_hrl_file_, program_name_ + "_types");
f_types_file_ <<
erl_autogen_comment() << endl <<
"-module(" << program_name_ << "_types)." << endl <<
erl_imports() << endl;
f_types_file_ <<
"-include(\"" << program_name_ << "_types.hrl\")." << endl <<
endl;
f_types_hrl_file_ << render_includes() << endl;
// consts file
string f_consts_name = get_out_dir()+program_name_+"_constants.hrl";
f_consts_.open(f_consts_name.c_str());
f_consts_ <<
erl_autogen_comment() << endl <<
erl_imports() << endl <<
"-include(\"" << program_name_ << "_types.hrl\")." << endl <<
endl;
}
/**
* Boilerplate at beginning and end of header files
*/
void t_erl_generator::hrl_header(ostream& out, string name) {
out << "-ifndef(_" << name << "_included)." << endl <<
"-define(_" << name << "_included, yeah)." << endl;
}
void t_erl_generator::hrl_footer(ostream& out, string name) {
(void) name;
out << "-endif." << endl;
}
/**
* Renders all the imports necessary for including another Thrift program
*/
string t_erl_generator::render_includes() {
const vector<t_program*>& includes = program_->get_includes();
string result = "";
for (size_t i = 0; i < includes.size(); ++i) {
result += "-include(\"" + uncapitalize(includes[i]->get_name()) + "_types.hrl\").\n";
}
if (includes.size() > 0) {
result += "\n";
}
return result;
}
/**
* Autogen'd comment
*/
string t_erl_generator::erl_autogen_comment() {
return
std::string("%%\n") +
"%% Autogenerated by Thrift Compiler (" + THRIFT_VERSION + ")\n" +
"%%\n" +
"%% DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n" +
"%%\n";
}
/**
* Comment out text
*/
string t_erl_generator::comment(string in)
{
size_t pos = 0;
in.insert(pos, "%% ");
while ( (pos = in.find_first_of('\n', pos)) != string::npos )
{
in.insert(++pos, "%% ");
}
return in;
}
/**
* Prints standard thrift imports
*/
string t_erl_generator::erl_imports() {
return "";
}
/**
* Closes the type files
*/
void t_erl_generator::close_generator() {
export_types_string("struct_info", 1);
export_types_string("struct_info_ext", 1);
f_types_file_ << "-export([" << export_types_lines_.str() << "])." << endl << endl;
f_types_file_ << f_info_.str();
f_types_file_ << "struct_info('i am a dummy struct') -> undefined." << endl << endl;
f_types_file_ << f_info_ext_.str();
f_types_file_ << "struct_info_ext('i am a dummy struct') -> undefined." << endl << endl;
hrl_footer(f_types_hrl_file_, string("BOGUS"));
f_types_file_.close();
f_types_hrl_file_.close();
f_consts_.close();
}
/**
* Generates a typedef. no op
*
* @param ttypedef The type definition
*/
void t_erl_generator::generate_typedef(t_typedef* ttypedef) {
(void) ttypedef;
}
/**
* Generates code for an enumerated type. Done using a class to scope
* the values.
*
* @param tenum The enumeration
*/
void t_erl_generator::generate_enum(t_enum* tenum) {
vector<t_enum_value*> constants = tenum->get_constants();
vector<t_enum_value*>::iterator c_iter;
for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) {
int value = (*c_iter)->get_value();
string name = capitalize((*c_iter)->get_name());
indent(f_types_hrl_file_) <<
"-define(" << program_name_ << "_" << tenum->get_name() << "_" << name << ", " << value << ")."<< endl;
}
f_types_hrl_file_ << endl;
}
/**
* Generate a constant value
*/
void t_erl_generator::generate_const(t_const* tconst) {
t_type* type = tconst->get_type();
string name = capitalize(tconst->get_name());
t_const_value* value = tconst->get_value();
f_consts_ << "-define(" << program_name_ << "_" << name << ", " << render_const_value(type, value) << ")." << endl << endl;
}
/**
* Prints the value of a constant with the given type. Note that type checking
* is NOT performed in this function as it is always run beforehand using the
* validate_types method in main.cc
*/
string t_erl_generator::render_const_value(t_type* type, t_const_value* value) {
type = get_true_type(type);
std::ostringstream out;
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_STRING:
out << '"' << get_escaped_string(value) << '"';
break;
case t_base_type::TYPE_BOOL:
out << (value->get_integer() > 0 ? "true" : "false");
break;
case t_base_type::TYPE_BYTE:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
out << value->get_integer();
break;
case t_base_type::TYPE_DOUBLE:
if (value->get_type() == t_const_value::CV_INTEGER) {
out << value->get_integer();
} else {
out << value->get_double();
}
break;
default:
throw "compiler error: no const of base type " + t_base_type::t_base_name(tbase);
}
} else if (type->is_enum()) {
indent(out) << value->get_integer();
} else if (type->is_struct() || type->is_xception()) {
out << "#" << uncapitalize(type->get_name()) << "{";
const vector<t_field*>& fields = ((t_struct*)type)->get_members();
vector<t_field*>::const_iterator f_iter;
const map<t_const_value*, t_const_value*>& val = value->get_map();
map<t_const_value*, t_const_value*>::const_iterator v_iter;
bool first = true;
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
t_type* field_type = NULL;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_name() == v_iter->first->get_string()) {
field_type = (*f_iter)->get_type();
}
}
if (field_type == NULL) {
throw "type error: " + type->get_name() + " has no field " + v_iter->first->get_string();
}
if (first) {
first = false;
} else {
out << ",";
}
out << v_iter->first->get_string();
out << " = ";
out << render_const_value(field_type, v_iter->second);
}
indent_down();
indent(out) << "}";
} else if (type->is_map()) {
t_type* ktype = ((t_map*)type)->get_key_type();
t_type* vtype = ((t_map*)type)->get_val_type();
out << "dict:from_list([";
map<t_const_value*, t_const_value*>::const_iterator i, end = value->get_map().end();
for (i = value->get_map().begin(); i != end;) {
out << "{"
<< render_const_value(ktype, i->first) << ","
<< render_const_value(vtype, i->second) << "}";
if ( ++i != end ) {
out << ",";
}
}
out << "])";
} else if (type->is_set()) {
t_type* etype = ((t_set*)type)->get_elem_type();
out << "sets:from_list([";
vector<t_const_value*>::const_iterator i, end = value->get_list().end();
for( i = value->get_list().begin(); i != end; ) {
out << render_const_value(etype, *i) ;
if ( ++i != end ) {
out << ",";
}
}
out << "])";
} else if (type->is_list()) {
t_type* etype;
etype = ((t_list*)type)->get_elem_type();
out << "[";
bool first = true;
const vector<t_const_value*>& val = value->get_list();
vector<t_const_value*>::const_iterator v_iter;
for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) {
if (first) {
first=false;
} else {
out << ",";
}
out << render_const_value(etype, *v_iter);
}
out << "]";
} else {
throw "CANNOT GENERATE CONSTANT FOR TYPE: " + type->get_name();
}
return out.str();
}
string t_erl_generator::render_default_value(t_field* field) {
t_type *type = field->get_type();
if (type->is_struct() || type->is_xception()) {
return "#" + uncapitalize(type->get_name()) + "{}";
} else if (type->is_map()) {
return "dict:new()";
} else if (type->is_set()) {
return "sets:new()";
} else if (type->is_list()) {
return "[]";
} else {
return "undefined";
}
}
string t_erl_generator::render_member_type(t_field * field) {
t_type * type = get_true_type(field->get_type());
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_STRING:
return "string() | binary()";
case t_base_type::TYPE_BOOL:
return "boolean()";
case t_base_type::TYPE_BYTE:
case t_base_type::TYPE_I16:
case t_base_type::TYPE_I32:
case t_base_type::TYPE_I64:
return "integer()";
case t_base_type::TYPE_DOUBLE:
return "float()";
default:
throw "compiler error: unsupported base type " + t_base_type::t_base_name(tbase);
}
} else if (type->is_enum()) {
return "integer()";
} else if (type->is_struct() || type->is_xception()) {
return "#" + uncapitalize(type->get_name()) + "{}";
} else if (type->is_map()) {
return "dict()";
} else if (type->is_set()) {
return "set()";
} else if (type->is_list()) {
return "list()";
} else {
throw "compiler error: unsupported type " + type->get_name();
}
}
string t_erl_generator::render_member_requiredness(t_field * field) {
switch(field->get_req()) {
case t_field::T_REQUIRED: return "required";
case t_field::T_OPTIONAL: return "optional";
default: return "undefined";
}
}
/**
* Generates a struct
*/
void t_erl_generator::generate_struct(t_struct* tstruct) {
generate_erl_struct(tstruct, false);
}
/**
* Generates a struct definition for a thrift exception. Basically the same
* as a struct but extends the Exception class.
*
* @param txception The struct definition
*/
void t_erl_generator::generate_xception(t_struct* txception) {
generate_erl_struct(txception, true);
}
/**
* Generates a struct
*/
void t_erl_generator::generate_erl_struct(t_struct* tstruct, bool is_exception) {
(void) is_exception;
generate_erl_struct_definition(f_types_hrl_file_, tstruct);
generate_erl_struct_info(f_info_, tstruct);
generate_erl_extended_struct_info(f_info_ext_, tstruct);
}
/**
* Generates a struct definition for a thrift data type.
*
* @param tstruct The struct definition
*/
void t_erl_generator::generate_erl_struct_definition(ostream& out, t_struct* tstruct)
{
indent(out) << "%% struct " << type_name(tstruct) << endl << endl;
std::stringstream buf;
buf << indent() << "-record(" << type_name(tstruct) << ", {";
string field_indent(buf.str().size(), ' ');
const vector<t_field*>& members = tstruct->get_members();
for (vector<t_field*>::const_iterator m_iter = members.begin(); m_iter != members.end();) {
generate_erl_struct_member(buf, *m_iter);
if ( ++m_iter != members.end() ) {
buf << "," << endl << field_indent;
}
}
buf << "}).";
out << buf.str() << endl << endl;
}
/**
* Generates the record field definition
*/
void t_erl_generator::generate_erl_struct_member(ostream & out, t_field * tmember)
{
out << uncapitalize(tmember->get_name());
if (has_default_value(tmember))
out << " = " << render_member_value(tmember);
out << " :: " << render_member_type(tmember);
}
bool t_erl_generator::has_default_value(t_field * field) {
t_type *type = field->get_type();
if (!field->get_value()) {
if ( field->get_req() == t_field::T_REQUIRED) {
if (type->is_struct() || type->is_xception() || type->is_map() ||
type->is_set() || type->is_list()) {
return true;
} else {
return false;
}
} else {
return false;
}
} else {
return true;
}
}
string t_erl_generator::render_member_value(t_field * field) {
if (!field->get_value()) {
return render_default_value(field);
} else {
return render_const_value(field->get_type(), field->get_value());
}
}
/**
* Generates the read method for a struct
*/
void t_erl_generator::generate_erl_struct_info(ostream& out, t_struct* tstruct) {
indent(out) << "struct_info('" << type_name(tstruct) << "') ->" << endl;
indent_up();
out << indent() << render_type_term(tstruct, true) << ";" << endl;
indent_down();
out << endl;
}
void t_erl_generator::generate_erl_extended_struct_info(ostream& out, t_struct* tstruct) {
indent(out) << "struct_info_ext('" << type_name(tstruct) << "') ->" << endl;
indent_up();
out << indent() << render_type_term(tstruct, true, true) << ";" << endl;
indent_down();
out << endl;
}
/**
* Generates a thrift service.
*
* @param tservice The service definition
*/
void t_erl_generator::generate_service(t_service* tservice) {
// somehow this point is reached before the constructor and it's not downcased yet
// ...awesome
service_name_[0] = tolower(service_name_[0]);
string f_service_hrl_name = get_out_dir()+service_name_+"_thrift.hrl";
string f_service_name = get_out_dir()+service_name_+"_thrift.erl";
f_service_file_.open(f_service_name.c_str());
f_service_hrl_.open(f_service_hrl_name.c_str());
// Reset service text aggregating stream streams
f_service_.str("");
export_lines_.str("");
export_lines_first_ = true;
hrl_header(f_service_hrl_, service_name_);
if (tservice->get_extends() != NULL) {
f_service_hrl_ << "-include(\"" <<
uncapitalize(tservice->get_extends()->get_name()) << "_thrift.hrl\"). % inherit " << endl;
}
f_service_hrl_ <<
"-include(\"" << program_name_ << "_types.hrl\")." << endl <<
endl;
// Generate the three main parts of the service (well, two for now in PHP)
generate_service_helpers(tservice); // cpiro: New Erlang Order
generate_service_interface(tservice);
// indent_down();
f_service_file_ <<
erl_autogen_comment() << endl <<
"-module(" << service_name_ << "_thrift)." << endl <<
"-behaviour(thrift_service)." << endl << endl <<
erl_imports() << endl;
f_service_file_ << "-include(\"" << uncapitalize(tservice->get_name()) << "_thrift.hrl\")." << endl << endl;
f_service_file_ << "-export([" << export_lines_.str() << "])." << endl << endl;
f_service_file_ << f_service_.str();
hrl_footer(f_service_hrl_, f_service_name);
// Close service file
f_service_file_.close();
f_service_hrl_.close();
}
/**
* Generates helper functions for a service.
*
* @param tservice The service to generate a header definition for
*/
void t_erl_generator::generate_service_helpers(t_service* tservice) {
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::iterator f_iter;
// indent(f_service_) <<
// "% HELPER FUNCTIONS AND STRUCTURES" << endl << endl;
export_string("struct_info", 1);
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
generate_erl_function_helpers(*f_iter);
}
f_service_ << "struct_info('i am a dummy struct') -> undefined." << endl;
}
/**
* Generates a struct and helpers for a function.
*
* @param tfunction The function
*/
void t_erl_generator::generate_erl_function_helpers(t_function* tfunction) {
(void) tfunction;
}
/**
* Generates a service interface definition.
*
* @param tservice The service to generate a header definition for
*/
void t_erl_generator::generate_service_interface(t_service* tservice) {
export_string("function_info", 2);
vector<t_function*> functions = tservice->get_functions();
vector<t_function*>::iterator f_iter;
f_service_ << "%%% interface" << endl;
for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) {
f_service_ <<
indent() << "% " << function_signature(*f_iter) << endl;
generate_function_info(tservice, *f_iter);
}
// Inheritance - pass unknown functions to base class
if (tservice->get_extends() != NULL) {
indent(f_service_) << "function_info(Function, InfoType) ->" << endl;
indent_up();
indent(f_service_) << uncapitalize(tservice->get_extends()->get_name())
<< "_thrift:function_info(Function, InfoType)." << endl;
indent_down();
} else {
// Use a special return code for nonexistent functions
indent(f_service_) << "function_info(_Func, _Info) -> no_function." << endl;
}
indent(f_service_) << endl;
}
/**
* Generates a function_info(FunctionName, params_type) and
* function_info(FunctionName, reply_type)
*/
void t_erl_generator::generate_function_info(t_service* tservice,
t_function* tfunction) {
(void) tservice;
string name_atom = "'" + tfunction->get_name() + "'";
t_struct* xs = tfunction->get_xceptions();
t_struct* arg_struct = tfunction->get_arglist();
// function_info(Function, params_type):
indent(f_service_) <<
"function_info(" << name_atom << ", params_type) ->" << endl;
indent_up();
indent(f_service_) << render_type_term(arg_struct, true) << ";" << endl;
indent_down();
// function_info(Function, reply_type):
indent(f_service_) <<
"function_info(" << name_atom << ", reply_type) ->" << endl;
indent_up();
if (!tfunction->get_returntype()->is_void())
indent(f_service_) <<
render_type_term(tfunction->get_returntype(), false) << ";" << endl;
else if (tfunction->is_oneway())
indent(f_service_) << "oneway_void;" << endl;
else
indent(f_service_) << "{struct, []}" << ";" << endl;
indent_down();
// function_info(Function, exceptions):
indent(f_service_) <<
"function_info(" << name_atom << ", exceptions) ->" << endl;
indent_up();
indent(f_service_) << render_type_term(xs, true) << ";" << endl;
indent_down();
}
/**
* Renders a function signature of the form 'type name(args)'
*
* @param tfunction Function definition
* @return String of rendered function definition
*/
string t_erl_generator::function_signature(t_function* tfunction,
string prefix) {
return
prefix + tfunction->get_name() +
"(This" + capitalize(argument_list(tfunction->get_arglist())) + ")";
}
/**
* Add a function to the exports list
*/
void t_erl_generator::export_string(string name, int num) {
if (export_lines_first_) {
export_lines_first_ = false;
} else {
export_lines_ << ", ";
}
export_lines_ << name << "/" << num;
}
void t_erl_generator::export_types_function(t_function* tfunction,
string prefix) {
export_types_string(prefix + tfunction->get_name(),
1 // This
+ ((tfunction->get_arglist())->get_members()).size()
);
}
void t_erl_generator::export_types_string(string name, int num) {
if (export_types_lines_first_) {
export_types_lines_first_ = false;
} else {
export_types_lines_ << ", ";
}
export_types_lines_ << name << "/" << num;
}
void t_erl_generator::export_function(t_function* tfunction,
string prefix) {
export_string(prefix + tfunction->get_name(),
1 // This
+ ((tfunction->get_arglist())->get_members()).size()
);
}
/**
* Renders a field list
*/
string t_erl_generator::argument_list(t_struct* tstruct) {
string result = "";
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
bool first = true;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if (first) {
first = false;
result += ", "; // initial comma to compensate for initial This
} else {
result += ", ";
}
result += capitalize((*f_iter)->get_name());
}
return result;
}
string t_erl_generator::type_name(t_type* ttype) {
string prefix = "";
string name = ttype->get_name();
if (ttype->is_struct() || ttype->is_xception() || ttype->is_service()) {
name = uncapitalize(ttype->get_name());
}
return prefix + name;
}
/**
* Converts the parse type to a Erlang "type" (macro for int constants)
*/
string t_erl_generator::type_to_enum(t_type* type) {
type = get_true_type(type);
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "NO T_VOID CONSTRUCT";
case t_base_type::TYPE_STRING:
return "?tType_STRING";
case t_base_type::TYPE_BOOL:
return "?tType_BOOL";
case t_base_type::TYPE_BYTE:
return "?tType_BYTE";
case t_base_type::TYPE_I16:
return "?tType_I16";
case t_base_type::TYPE_I32:
return "?tType_I32";
case t_base_type::TYPE_I64:
return "?tType_I64";
case t_base_type::TYPE_DOUBLE:
return "?tType_DOUBLE";
}
} else if (type->is_enum()) {
return "?tType_I32";
} else if (type->is_struct() || type->is_xception()) {
return "?tType_STRUCT";
} else if (type->is_map()) {
return "?tType_MAP";
} else if (type->is_set()) {
return "?tType_SET";
} else if (type->is_list()) {
return "?tType_LIST";
}
throw "INVALID TYPE IN type_to_enum: " + type->get_name();
}
/**
* Generate an Erlang term which represents a thrift type
*/
std::string t_erl_generator::render_type_term(t_type* type, bool expand_structs, bool extended_info) {
type = get_true_type(type);
if (type->is_base_type()) {
t_base_type::t_base tbase = ((t_base_type*)type)->get_base();
switch (tbase) {
case t_base_type::TYPE_VOID:
throw "NO T_VOID CONSTRUCT";
case t_base_type::TYPE_STRING:
return "string";
case t_base_type::TYPE_BOOL:
return "bool";
case t_base_type::TYPE_BYTE:
return "byte";
case t_base_type::TYPE_I16:
return "i16";
case t_base_type::TYPE_I32:
return "i32";
case t_base_type::TYPE_I64:
return "i64";
case t_base_type::TYPE_DOUBLE:
return "double";
}
} else if (type->is_enum()) {
return "i32";
} else if (type->is_struct() || type->is_xception()) {
if (expand_structs) {
std::stringstream buf;
buf << "{struct, [";
string field_indent(buf.str().size(), ' ');
t_struct::members_type const& fields = static_cast<t_struct*>(type)->get_members();
t_struct::members_type::const_iterator i, end = fields.end();
for( i = fields.begin(); i != end; )
{
t_struct::members_type::value_type member = *i;
int32_t key = member->get_key();
string type = render_type_term(member->get_type(), false, false); // recursive call
if ( !extended_info ) {
// Convert to format: {struct, [{Fid, Type}|...]}
buf << "{" << key << ", " << type << "}";
} else {
// Convert to format: {struct, [{Fid, Req, Type, Name, Def}|...]}
string name = uncapitalize(member->get_name());
string value = render_member_value(member);
string requiredness = render_member_requiredness(member);
buf << "{" << key << ", " << requiredness << ", " << type << ", '" << name << "'"<< ", " << value << "}";
}
if ( ++i != end ) {
buf << "," << endl << field_indent;
}
}
buf << "]}" << endl;
return buf.str();
} else {
return "{struct, {'" + type_module(type) + "', '" + type_name(type) + "'}}";
}
} else if (type->is_map()) {
// {map, KeyType, ValType}
t_type *key_type = ((t_map*)type)->get_key_type();
t_type *val_type = ((t_map*)type)->get_val_type();
return "{map, " + render_type_term(key_type, false) + ", " +
render_type_term(val_type, false) + "}";
} else if (type->is_set()) {
t_type *elem_type = ((t_set*)type)->get_elem_type();
return "{set, " + render_type_term(elem_type, false) + "}";
} else if (type->is_list()) {
t_type *elem_type = ((t_list*)type)->get_elem_type();
return "{list, " + render_type_term(elem_type, false) + "}";
}
throw "INVALID TYPE IN type_to_enum: " + type->get_name();
}
std::string t_erl_generator::type_module(t_type* ttype) {
return uncapitalize(ttype->get_program()->get_name()) + "_types";
}
THRIFT_REGISTER_GENERATOR(erl, "Erlang", "")
| 1 | 10,413 | Hi @cjimison good point but doesn't this break the compatibility with Erlang/OTP prior to 17.0? I think this could be done over an conditional macro defined in the rebar.config: `{erl_opts, [ {platform_define, "^R", pre17} ]}` and some additions with `-ifdef(pre17). ... -else. ... -endif.` | apache-thrift | c |
@@ -210,7 +210,7 @@ func (pl *List) removeFromUnavailablePeers(p peer.Peer) {
// Choose selects the next available peer in the round robin
func (pl *List) Choose(ctx context.Context, req *transport.Request) (peer.Peer, func(error), error) {
- if !pl.IsRunning() {
+ if err := pl.once.WaitForStart(ctx); err != nil {
return nil, nil, peer.ErrPeerListNotStarted("RoundRobinList")
}
| 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package roundrobin
import (
"context"
"fmt"
"sync"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
yerrors "go.uber.org/yarpc/internal/errors"
"go.uber.org/yarpc/internal/introspection"
ysync "go.uber.org/yarpc/internal/sync"
)
const defaultCapacity = 10
// New creates a new round robin PeerList
func New(transport peer.Transport) *List {
rr := &List{
unavailablePeers: make(map[string]peer.Peer, defaultCapacity),
availablePeerRing: NewPeerRing(defaultCapacity),
transport: transport,
peerAvailableEvent: make(chan struct{}, 1),
}
return rr
}
// List is a PeerList which rotates which peers are to be selected in a circle
type List struct {
lock sync.Mutex
unavailablePeers map[string]peer.Peer
availablePeerRing *PeerRing
peerAvailableEvent chan struct{}
transport peer.Transport
once ysync.LifecycleOnce
}
// Update applies the additions and removals of peer Identifiers to the list
// it returns a multi-error result of every failure that happened without
// circuit breaking due to failures
func (pl *List) Update(updates peer.ListUpdates) error {
additions := updates.Additions
removals := updates.Removals
if len(additions) == 0 && len(removals) == 0 {
return nil
}
pl.lock.Lock()
defer pl.lock.Unlock()
var errs []error
for _, peerID := range removals {
if err := pl.removePeerIdentifier(peerID); err != nil {
errs = append(errs, err)
}
}
for _, peerID := range additions {
if err := pl.addPeerIdentifier(peerID); err != nil {
errs = append(errs, err)
}
}
return yerrors.MultiError(errs)
}
// Must be run inside a mutex.Lock()
func (pl *List) addPeerIdentifier(pid peer.Identifier) error {
p, err := pl.transport.RetainPeer(pid, pl)
if err != nil {
return err
}
return pl.addPeer(p)
}
// Must be run in a mutex.Lock()
func (pl *List) addPeer(p peer.Peer) error {
if p.Status().ConnectionStatus != peer.Available {
return pl.addToUnavailablePeers(p)
}
return pl.addToAvailablePeers(p)
}
// Must be run in a mutex.Lock()
func (pl *List) addToUnavailablePeers(p peer.Peer) error {
pl.unavailablePeers[p.Identifier()] = p
return nil
}
// Must be run in a mutex.Lock()
func (pl *List) addToAvailablePeers(p peer.Peer) error {
if err := pl.availablePeerRing.Add(p); err != nil {
return err
}
pl.notifyPeerAvailable()
return nil
}
// Start notifies the List that requests will start coming
func (pl *List) Start() error {
return pl.once.Start(nil)
}
// Stop notifies the List that requests will stop coming
func (pl *List) Stop() error {
return pl.once.Stop(pl.clearPeers)
}
// clearPeers will release all the peers from the list
func (pl *List) clearPeers() error {
pl.lock.Lock()
defer pl.lock.Unlock()
var errs []error
availablePeers := pl.availablePeerRing.RemoveAll()
errs = append(errs, pl.releaseAll(availablePeers)...)
unvavailablePeers := pl.removeAllUnavailable()
errs = append(errs, pl.releaseAll(unvavailablePeers)...)
return yerrors.MultiError(errs)
}
// removeAllUnavailable will clear the unavailablePeers list and
// return all the Peers in the list in a slice
// Must be run in a mutex.Lock()
func (pl *List) removeAllUnavailable() []peer.Peer {
peers := make([]peer.Peer, 0, len(pl.unavailablePeers))
for id, p := range pl.unavailablePeers {
peers = append(peers, p)
delete(pl.unavailablePeers, id)
}
return peers
}
// releaseAll will iterate through a list of peers and call release
// on the transport
func (pl *List) releaseAll(peers []peer.Peer) []error {
var errs []error
for _, p := range peers {
if err := pl.transport.ReleasePeer(p, pl); err != nil {
errs = append(errs, err)
}
}
return errs
}
// removePeerIdentifier will go remove references to the peer identifier and release
// it from the transport
// Must be run in a mutex.Lock()
func (pl *List) removePeerIdentifier(pid peer.Identifier) error {
if err := pl.removePeerIdentifierReferences(pid); err != nil {
// The peer has already been removed
return err
}
return pl.transport.ReleasePeer(pid, pl)
}
// removePeerIdentifierReferences will search through the Available and Unavailable Peers
// for the PeerID and remove it
// Must be run in a mutex.Lock()
func (pl *List) removePeerIdentifierReferences(pid peer.Identifier) error {
if p := pl.availablePeerRing.GetPeer(pid); p != nil {
return pl.availablePeerRing.Remove(p)
}
if p, ok := pl.unavailablePeers[pid.Identifier()]; ok && p != nil {
pl.removeFromUnavailablePeers(p)
return nil
}
return peer.ErrPeerRemoveNotInList(pid.Identifier())
}
// removeFromUnavailablePeers remove a peer from the Unavailable Peers list
// the Peer should already be validated as non-nil and in the Unavailable list
// Must be run in a mutex.Lock()
func (pl *List) removeFromUnavailablePeers(p peer.Peer) {
delete(pl.unavailablePeers, p.Identifier())
}
// Choose selects the next available peer in the round robin
func (pl *List) Choose(ctx context.Context, req *transport.Request) (peer.Peer, func(error), error) {
if !pl.IsRunning() {
return nil, nil, peer.ErrPeerListNotStarted("RoundRobinList")
}
for {
if nextPeer := pl.nextPeer(); nextPeer != nil {
pl.notifyPeerAvailable()
nextPeer.StartRequest()
return nextPeer, pl.getOnFinishFunc(nextPeer), nil
}
if err := pl.waitForPeerAddedEvent(ctx); err != nil {
return nil, nil, err
}
}
}
// IsRunning returns whether the peer list is running.
func (pl *List) IsRunning() bool {
return pl.once.IsRunning()
}
// nextPeer grabs the next available peer from the PeerRing and returns it,
// if there are no available peers it returns nil
func (pl *List) nextPeer() peer.Peer {
pl.lock.Lock()
p := pl.availablePeerRing.Next()
pl.lock.Unlock()
return p
}
// notifyPeerAvailable writes to a channel indicating that a Peer is currently
// available for requests
func (pl *List) notifyPeerAvailable() {
select {
case pl.peerAvailableEvent <- struct{}{}:
default:
}
}
// getOnFinishFunc creates a closure that will be run at the end of the request
func (pl *List) getOnFinishFunc(p peer.Peer) func(error) {
return func(_ error) {
p.EndRequest()
}
}
// waitForPeerAddedEvent waits until a peer is added to the peer list or the
// given context finishes.
// Must NOT be run in a mutex.Lock()
func (pl *List) waitForPeerAddedEvent(ctx context.Context) error {
if _, ok := ctx.Deadline(); !ok {
return peer.ErrChooseContextHasNoDeadline("RoundRobinList")
}
select {
case <-pl.peerAvailableEvent:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// NotifyStatusChanged when the peer's status changes
func (pl *List) NotifyStatusChanged(pid peer.Identifier) {
pl.lock.Lock()
defer pl.lock.Unlock()
if p := pl.availablePeerRing.GetPeer(pid); p != nil {
// TODO: log error
_ = pl.handleAvailablePeerStatusChange(p)
return
}
if p, ok := pl.unavailablePeers[pid.Identifier()]; ok && p != nil {
// TODO: log error
_ = pl.handleUnavailablePeerStatusChange(p)
}
// No action required
}
// handleAvailablePeerStatusChange checks the connection status of a connected peer to potentially
// move that Peer from the PeerRing to the unavailable peer map
// Must be run in a mutex.Lock()
func (pl *List) handleAvailablePeerStatusChange(p peer.Peer) error {
if p.Status().ConnectionStatus == peer.Available {
// Peer is in the proper pool, ignore
return nil
}
if err := pl.availablePeerRing.Remove(p); err != nil {
// Peer was not in list
return err
}
return pl.addToUnavailablePeers(p)
}
// handleUnavailablePeerStatusChange checks the connection status of an unavailable peer to potentially
// move that Peer from the unavailablePeerMap into the available Peer Ring
// Must be run in a mutex.Lock()
func (pl *List) handleUnavailablePeerStatusChange(p peer.Peer) error {
if p.Status().ConnectionStatus != peer.Available {
// Peer is in the proper pool, ignore
return nil
}
pl.removeFromUnavailablePeers(p)
return pl.addToAvailablePeers(p)
}
// Introspect returns a ChooserStatus with a summary of the Peers.
func (pl *List) Introspect() introspection.ChooserStatus {
state := "Stopped"
if pl.IsRunning() {
state = "Running"
}
pl.lock.Lock()
availables := pl.availablePeerRing.All()
unavailables := make([]peer.Peer, 0, len(pl.unavailablePeers))
for _, peer := range pl.unavailablePeers {
unavailables = append(unavailables, peer)
}
pl.lock.Unlock()
peersStatus := make([]introspection.PeerStatus, 0,
len(availables)+len(unavailables))
buildPeerStatus := func(peer peer.Peer) introspection.PeerStatus {
ps := peer.Status()
return introspection.PeerStatus{
Identifier: peer.Identifier(),
State: fmt.Sprintf("%s, %d pending request(s)",
ps.ConnectionStatus.String(),
ps.PendingRequestCount),
}
}
for _, peer := range availables {
peersStatus = append(peersStatus, buildPeerStatus(peer))
}
for _, peer := range unavailables {
peersStatus = append(peersStatus, buildPeerStatus(peer))
}
return introspection.ChooserStatus{
Name: "Single",
State: fmt.Sprintf("%s (%d/%d available)", state, len(availables),
len(availables)+len(unavailables)),
Peers: peersStatus,
}
}
| 1 | 12,368 | should we change/wrap the error? | yarpc-yarpc-go | go |
@@ -109,12 +109,12 @@ final class DeFramer extends ByteToMessageDecoder {
try {
peerInfo = HelloMessage.readFrom(message).getPeerInfo();
} catch (final RLPException e) {
- LOG.debug("Received invalid HELLO message", e);
+ LOG.warn("Received invalid HELLO message, set log level to TRACE for message body", e);
connectFuture.completeExceptionally(e);
ctx.close();
return;
}
- LOG.debug("Received HELLO message: {}", peerInfo);
+ LOG.trace("Received HELLO message: {}", peerInfo);
if (peerInfo.getVersion() >= 5) {
LOG.trace("Enable compression for p2pVersion: {}", peerInfo.getVersion());
framer.enableCompression(); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.p2p.rlpx.connections.netty;
import org.hyperledger.besu.ethereum.p2p.network.exceptions.BreachOfProtocolException;
import org.hyperledger.besu.ethereum.p2p.network.exceptions.IncompatiblePeerException;
import org.hyperledger.besu.ethereum.p2p.network.exceptions.PeerDisconnectedException;
import org.hyperledger.besu.ethereum.p2p.network.exceptions.UnexpectedPeerConnectionException;
import org.hyperledger.besu.ethereum.p2p.peers.DefaultPeer;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURLImpl;
import org.hyperledger.besu.ethereum.p2p.peers.LocalNode;
import org.hyperledger.besu.ethereum.p2p.peers.Peer;
import org.hyperledger.besu.ethereum.p2p.rlpx.connections.PeerConnection;
import org.hyperledger.besu.ethereum.p2p.rlpx.connections.PeerConnectionEventDispatcher;
import org.hyperledger.besu.ethereum.p2p.rlpx.framing.Framer;
import org.hyperledger.besu.ethereum.p2p.rlpx.framing.FramingException;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.CapabilityMultiplexer;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.MessageData;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.PeerInfo;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.SubProtocol;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.messages.DisconnectMessage;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.messages.HelloMessage;
import org.hyperledger.besu.ethereum.p2p.rlpx.wire.messages.WireMessageCodes;
import org.hyperledger.besu.ethereum.rlp.RLPException;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.metrics.LabelledMetric;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.DecoderException;
import io.netty.handler.timeout.IdleStateHandler;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
final class DeFramer extends ByteToMessageDecoder {
private static final Logger LOG = LogManager.getLogger();
private final CompletableFuture<PeerConnection> connectFuture;
private final PeerConnectionEventDispatcher connectionEventDispatcher;
private final Framer framer;
private final LocalNode localNode;
// The peer we are expecting to connect to, if such a peer is known
private final Optional<Peer> expectedPeer;
private final List<SubProtocol> subProtocols;
private boolean hellosExchanged;
private final LabelledMetric<Counter> outboundMessagesCounter;
DeFramer(
final Framer framer,
final List<SubProtocol> subProtocols,
final LocalNode localNode,
final Optional<Peer> expectedPeer,
final PeerConnectionEventDispatcher connectionEventDispatcher,
final CompletableFuture<PeerConnection> connectFuture,
final MetricsSystem metricsSystem) {
this.framer = framer;
this.subProtocols = subProtocols;
this.localNode = localNode;
this.expectedPeer = expectedPeer;
this.connectFuture = connectFuture;
this.connectionEventDispatcher = connectionEventDispatcher;
this.outboundMessagesCounter =
metricsSystem.createLabelledCounter(
BesuMetricCategory.NETWORK,
"p2p_messages_outbound",
"Count of each P2P message sent outbound.",
"protocol",
"name",
"code");
}
@Override
protected void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out) {
MessageData message;
while ((message = framer.deframe(in)) != null) {
if (hellosExchanged) {
out.add(message);
} else if (message.getCode() == WireMessageCodes.HELLO) {
hellosExchanged = true;
// Decode first hello and use the payload to modify pipeline
final PeerInfo peerInfo;
try {
peerInfo = HelloMessage.readFrom(message).getPeerInfo();
} catch (final RLPException e) {
LOG.debug("Received invalid HELLO message", e);
connectFuture.completeExceptionally(e);
ctx.close();
return;
}
LOG.debug("Received HELLO message: {}", peerInfo);
if (peerInfo.getVersion() >= 5) {
LOG.trace("Enable compression for p2pVersion: {}", peerInfo.getVersion());
framer.enableCompression();
}
final CapabilityMultiplexer capabilityMultiplexer =
new CapabilityMultiplexer(
subProtocols,
localNode.getPeerInfo().getCapabilities(),
peerInfo.getCapabilities());
final Peer peer = expectedPeer.orElse(createPeer(peerInfo, ctx));
final PeerConnection connection =
new NettyPeerConnection(
ctx,
peer,
peerInfo,
capabilityMultiplexer,
connectionEventDispatcher,
outboundMessagesCounter);
// Check peer is who we expected
if (expectedPeer.isPresent()
&& !Objects.equals(expectedPeer.get().getId(), peerInfo.getNodeId())) {
String unexpectedMsg =
String.format(
"Expected id %s, but got %s", expectedPeer.get().getId(), peerInfo.getNodeId());
connectFuture.completeExceptionally(new UnexpectedPeerConnectionException(unexpectedMsg));
LOG.debug("{}. Disconnecting.", unexpectedMsg);
connection.disconnect(DisconnectMessage.DisconnectReason.UNEXPECTED_ID);
}
// Check that we have shared caps
if (capabilityMultiplexer.getAgreedCapabilities().size() == 0) {
LOG.debug("Disconnecting because no capabilities are shared: {}", peerInfo);
connectFuture.completeExceptionally(
new IncompatiblePeerException("No shared capabilities"));
connection.disconnect(DisconnectMessage.DisconnectReason.USELESS_PEER);
}
// Setup next stage
final AtomicBoolean waitingForPong = new AtomicBoolean(false);
ctx.channel()
.pipeline()
.addLast(
new IdleStateHandler(15, 0, 0),
new WireKeepAlive(connection, waitingForPong),
new ApiHandler(
capabilityMultiplexer, connection, connectionEventDispatcher, waitingForPong),
new MessageFramer(capabilityMultiplexer, framer));
connectFuture.complete(connection);
} else if (message.getCode() == WireMessageCodes.DISCONNECT) {
DisconnectMessage disconnectMessage = DisconnectMessage.readFrom(message);
LOG.debug(
"Peer disconnected before sending HELLO. Reason: " + disconnectMessage.getReason());
ctx.close();
connectFuture.completeExceptionally(
new PeerDisconnectedException(disconnectMessage.getReason()));
} else {
// Unexpected message - disconnect
LOG.debug(
"Message received before HELLO's exchanged, disconnecting. Code: {}, Data: {}",
message.getCode(),
message.getData().toString());
ctx.writeAndFlush(
new OutboundMessage(
null,
DisconnectMessage.create(
DisconnectMessage.DisconnectReason.BREACH_OF_PROTOCOL)))
.addListener((f) -> ctx.close());
connectFuture.completeExceptionally(
new BreachOfProtocolException("Message received before HELLO's exchanged"));
}
}
}
private Peer createPeer(final PeerInfo peerInfo, final ChannelHandlerContext ctx) {
final InetSocketAddress remoteAddress = ((InetSocketAddress) ctx.channel().remoteAddress());
int port = peerInfo.getPort();
return DefaultPeer.fromEnodeURL(
EnodeURLImpl.builder()
.nodeId(peerInfo.getNodeId())
.ipAddress(remoteAddress.getAddress())
.listeningPort(port)
// Discovery information is unknown, so disable it
.disableDiscovery()
.build());
}
@Override
public void exceptionCaught(final ChannelHandlerContext ctx, final Throwable throwable)
throws Exception {
final Throwable cause =
throwable instanceof DecoderException && throwable.getCause() != null
? throwable.getCause()
: throwable;
if (cause instanceof FramingException
|| cause instanceof RLPException
|| cause instanceof IllegalArgumentException) {
LOG.debug("Invalid incoming message", throwable);
if (connectFuture.isDone() && !connectFuture.isCompletedExceptionally()) {
connectFuture.get().disconnect(DisconnectMessage.DisconnectReason.BREACH_OF_PROTOCOL);
return;
}
} else if (cause instanceof IOException) {
// IO failures are routine when communicating with random peers across the network.
LOG.debug("IO error while processing incoming message", throwable);
} else {
LOG.error("Exception while processing incoming message", throwable);
}
if (connectFuture.isDone() && !connectFuture.isCompletedExceptionally()) {
connectFuture
.get()
.terminateConnection(DisconnectMessage.DisconnectReason.TCP_SUBSYSTEM_ERROR, true);
} else {
connectFuture.completeExceptionally(throwable);
ctx.close();
}
}
}
| 1 | 25,391 | Are we sure that passing this log in warning does not risk spamming the logs of a node running on the mainnet or on ropsten etc ? Do we often receive invalid messages of this type ? | hyperledger-besu | java |
@@ -397,10 +397,7 @@ class Parser
# check for option to find list item text only
# if skipped a line, assume a list continuation was
# used and block content is acceptable
- if (text_only = options[:text]) && skipped > 0
- options.delete(:text)
- text_only = false
- end
+ text_only = is_text_only(options, skipped)
parse_metadata = options.fetch(:parse_metadata, true)
#parse_sections = options.fetch(:parse_sections, false) | 1 | module Asciidoctor
# Public: Methods to parse lines of AsciiDoc into an object hierarchy
# representing the structure of the document. All methods are class methods and
# should be invoked from the Parser class. The main entry point is ::next_block.
# No Parser instances shall be discovered running around. (Any attempt to
# instantiate a Parser will be futile).
#
# The object hierarchy created by the Parser consists of zero or more Section
# and Block objects. Section objects may be nested and a Section object
# contains zero or more Block objects. Block objects may be nested, but may
# only contain other Block objects. Block objects which represent lists may
# contain zero or more ListItem objects.
#
# Examples
#
# # Create a Reader for the AsciiDoc lines and retrieve the next block from it.
# # Parser.next_block requires a parent, so we begin by instantiating an empty Document.
#
# doc = Document.new
# reader = Reader.new lines
# block = Parser.next_block(reader, doc)
# block.class
# # => Asciidoctor::Block
class Parser
BlockMatchData = Struct.new :context, :masq, :tip, :terminator
# Public: Make sure the Parser object doesn't get initialized.
#
# Raises RuntimeError if this constructor is invoked.
def initialize
raise 'Au contraire, mon frere. No lexer instances will be running around.'
end
# Public: Parses AsciiDoc source read from the Reader into the Document
#
# This method is the main entry-point into the Parser when parsing a full document.
# It first looks for and, if found, processes the document title. It then
# proceeds to iterate through the lines in the Reader, parsing the document
# into nested Sections and Blocks.
#
# reader - the Reader holding the source lines of the document
# document - the empty Document into which the lines will be parsed
# options - a Hash of options to control processing
#
# returns the Document object
def self.parse(reader, document, options = {})
block_attributes = parse_document_header(reader, document)
unless options[:header_only]
while reader.has_more_lines?
new_section, block_attributes = next_section(reader, document, block_attributes)
document << new_section if new_section
end
end
document
end
# Public: Parses the document header of the AsciiDoc source read from the Reader
#
# Reads the AsciiDoc source from the Reader until the end of the document
# header is reached. The Document object is populated with information from
# the header (document title, document attributes, etc). The document
# attributes are then saved to establish a save point to which to rollback
# after parsing is complete.
#
# This method assumes that there are no blank lines at the start of the document,
# which are automatically removed by the reader.
#
# returns the Hash of orphan block attributes captured above the header
def self.parse_document_header(reader, document)
# capture any lines of block-level metadata and plow away any comment lines
# that precede first block
block_attributes = parse_block_metadata_lines(reader, document)
# special case, block title is not allowed above document title,
# carry attributes over to the document body
if block_attributes.has_key?('title')
return document.finalize_header block_attributes, false
end
# yep, document title logic in AsciiDoc is just insanity
# definitely an area for spec refinement
assigned_doctitle = nil
unless (val = document.attributes['doctitle']).nil_or_empty?
document.title = val
assigned_doctitle = val
end
section_title = nil
# check if the first line is the document title
# if so, add a header to the document and parse the header metadata
if is_next_line_document_title?(reader, block_attributes)
source_location = reader.cursor if document.sourcemap
document.id, _, doctitle, _, single_line = parse_section_title(reader, document)
unless assigned_doctitle
document.title = doctitle
assigned_doctitle = doctitle
end
# default to compat-mode if document uses atx-style doctitle
document.set_attribute 'compat-mode', '' unless single_line
document.header.source_location = source_location if source_location
document.attributes['doctitle'] = section_title = doctitle
# QUESTION: should the id assignment on Document be encapsulated in the Document class?
unless document.id
document.id = block_attributes.delete('id')
end
parse_header_metadata(reader, document)
end
if !(val = document.attributes['doctitle']).nil_or_empty? &&
val != section_title
document.title = val
assigned_doctitle = val
end
# restore doctitle attribute to original assignment
if assigned_doctitle
document.attributes['doctitle'] = assigned_doctitle
end
# parse title and consume name section of manpage document
parse_manpage_header(reader, document) if document.doctype == 'manpage'
# NOTE block_attributes are the block-level attributes (not document attributes) that
# precede the first line of content (document title, first section or first block)
document.finalize_header block_attributes
end
# Public: Parses the manpage header of the AsciiDoc source read from the Reader
#
# returns Nothing
def self.parse_manpage_header(reader, document)
if (m = ManpageTitleVolnumRx.match(document.attributes['doctitle']))
document.attributes['mantitle'] = document.sub_attributes(m[1].rstrip.downcase)
document.attributes['manvolnum'] = m[2].strip
else
warn %(asciidoctor: ERROR: #{reader.prev_line_info}: malformed manpage title)
end
reader.skip_blank_lines
if is_next_line_section?(reader, {})
name_section = initialize_section(reader, document, {})
if name_section.level == 1
name_section_buffer = reader.read_lines_until(:break_on_blank_lines => true).join(' ').tr_s(' ', ' ')
if (m = ManpageNamePurposeRx.match(name_section_buffer))
document.attributes['manname'] = document.sub_attributes m[1]
document.attributes['manpurpose'] = m[2]
# TODO parse multiple man names
if document.backend == 'manpage'
document.attributes['docname'] = document.attributes['manname']
document.attributes['outfilesuffix'] = %(.#{document.attributes['manvolnum']})
end
else
warn %(asciidoctor: ERROR: #{reader.prev_line_info}: malformed name section body)
end
else
warn %(asciidoctor: ERROR: #{reader.prev_line_info}: name section title must be at level 1)
end
else
warn %(asciidoctor: ERROR: #{reader.prev_line_info}: name section expected)
end
end
# Public: Return the next section from the Reader.
#
# This method process block metadata, content and subsections for this
# section and returns the Section object and any orphaned attributes.
#
# If the parent is a Document and has a header (document title), then
# this method will put any non-section blocks at the start of document
# into a preamble Block. If there are no such blocks, the preamble is
# dropped.
#
# Since we are reading line-by-line, there's a chance that metadata
# that should be associated with the following block gets consumed.
# To deal with this case, the method returns a running Hash of
# "orphaned" attributes that get passed to the next Section or Block.
#
# reader - the source Reader
# parent - the parent Section or Document of this new section
# attributes - a Hash of metadata that was left orphaned from the
# previous Section.
#
# Examples
#
# source
# # => "= Greetings\n\nThis is my doc.\n\n== Salutations\n\nIt is awesome."
#
# reader = Reader.new source, nil, :normalize => true
# # create empty document to parent the section
# # and hold attributes extracted from header
# doc = Document.new
#
# Parser.next_section(reader, doc).first.title
# # => "Greetings"
#
# Parser.next_section(reader, doc).first.title
# # => "Salutations"
#
# returns a two-element Array containing the Section and Hash of orphaned attributes
def self.next_section(reader, parent, attributes = {})
preamble = false
part = false
intro = false
# FIXME if attributes[1] is a verbatim style, then don't check for section
# check if we are at the start of processing the document
# NOTE we could drop a hint in the attributes to indicate
# that we are at a section title (so we don't have to check)
if parent.context == :document && parent.blocks.empty? &&
((has_header = parent.has_header?) || attributes.delete('invalid-header') || !is_next_line_section?(reader, attributes))
doctype = parent.doctype
if has_header || (doctype == 'book' && attributes[1] != 'abstract')
preamble = intro = Block.new(parent, :preamble, :content_model => :compound)
parent << preamble
end
section = parent
current_level = 0
if parent.attributes.has_key? 'fragment'
expected_next_levels = nil
# small tweak to allow subsequent level-0 sections for book doctype
elsif doctype == 'book'
expected_next_levels = [0, 1]
else
expected_next_levels = [1]
end
else
doctype = parent.document.doctype
section = initialize_section(reader, parent, attributes)
# clear attributes, except for title which carries over
# section title to next block of content
attributes = (title = attributes['title']) ? { 'title' => title } : {}
current_level = section.level
if current_level == 0 && doctype == 'book'
part = !section.special
# subsections in preface & appendix in multipart books start at level 2
if section.special && (['preface', 'appendix'].include? section.sectname)
expected_next_levels = [current_level + 2]
else
expected_next_levels = [current_level + 1]
end
else
expected_next_levels = [current_level + 1]
end
end
reader.skip_blank_lines
# Parse lines belonging to this section and its subsections until we
# reach the end of this section level
#
# 1. first look for metadata thingies (anchor, attribute list, block title line, etc)
# 2. then look for a section, recurse if found
# 3. then process blocks
#
# We have to parse all the metadata lines before continuing with the loop,
# otherwise subsequent metadata lines get interpreted as block content
while reader.has_more_lines?
parse_block_metadata_lines(reader, section, attributes)
if (next_level = is_next_line_section? reader, attributes)
next_level += section.document.attr('leveloffset', 0).to_i
if next_level > current_level || (section.context == :document && next_level == 0)
if next_level == 0 && doctype != 'book'
warn %(asciidoctor: ERROR: #{reader.line_info}: only book doctypes can contain level 0 sections)
elsif expected_next_levels && !expected_next_levels.include?(next_level)
warn %(asciidoctor: WARNING: #{reader.line_info}: section title out of sequence: ) +
%(expected #{expected_next_levels.size > 1 ? 'levels' : 'level'} #{expected_next_levels * ' or '}, ) +
%(got level #{next_level})
end
# the attributes returned are those that are orphaned
new_section, attributes = next_section(reader, section, attributes)
section << new_section
else
if next_level == 0 && doctype != 'book'
warn %(asciidoctor: ERROR: #{reader.line_info}: only book doctypes can contain level 0 sections)
end
# close this section (and break out of the nesting) to begin a new one
break
end
else
# just take one block or else we run the risk of overrunning section boundaries
block_line_info = reader.line_info
if (new_block = next_block reader, (intro || section), attributes, :parse_metadata => false)
# REVIEW this may be doing too much
if part
if !section.blocks?
# if this block wasn't marked as [partintro], emulate behavior as if it had
if new_block.style != 'partintro'
# emulate [partintro] paragraph
if new_block.context == :paragraph
new_block.context = :open
new_block.style = 'partintro'
# emulate [partintro] open block
else
intro = Block.new section, :open, :content_model => :compound
intro.style = 'partintro'
new_block.parent = intro
section << intro
end
end
elsif section.blocks.size == 1
first_block = section.blocks[0]
# open the [partintro] open block for appending
if !intro && first_block.content_model == :compound
#new_block.parent = (intro = first_block)
warn %(asciidoctor: ERROR: #{block_line_info}: illegal block content outside of partintro block)
# rebuild [partintro] paragraph as an open block
elsif first_block.content_model != :compound
intro = Block.new section, :open, :content_model => :compound
intro.style = 'partintro'
section.blocks.shift
if first_block.style == 'partintro'
first_block.context = :paragraph
first_block.style = nil
end
first_block.parent = intro
intro << first_block
new_block.parent = intro
section << intro
end
end
end
(intro || section) << new_block
attributes = {}
#else
# # don't clear attributes if we don't find a block because they may
# # be trailing attributes that didn't get associated with a block
end
end
reader.skip_blank_lines
end
if part
unless section.blocks? && section.blocks[-1].context == :section
warn %(asciidoctor: ERROR: #{reader.line_info}: invalid part, must have at least one section (e.g., chapter, appendix, etc.))
end
# NOTE we could try to avoid creating a preamble in the first place, though
# that would require reworking assumptions in next_section since the preamble
# is treated like an untitled section
elsif preamble # implies parent == document
document = parent
if preamble.blocks?
# unwrap standalone preamble (i.e., no sections), if permissible
if Compliance.unwrap_standalone_preamble && document.blocks.size == 1 && doctype != 'book'
document.blocks.shift
while (child_block = preamble.blocks.shift)
child_block.parent = document
document << child_block
end
end
# drop the preamble if it has no content
else
document.blocks.shift
end
end
# The attributes returned here are orphaned attributes that fall at the end
# of a section that need to get transfered to the next section
# see "trailing block attributes transfer to the following section" in
# test/attributes_test.rb for an example
[section != parent ? section : nil, attributes.dup]
end
# Public: Return the next Section or Block object from the Reader.
#
# Begins by skipping over blank lines to find the start of the next Section
# or Block. Processes each line of the reader in sequence until a Section or
# Block is found or the reader has no more lines.
#
# Uses regular expressions from the Asciidoctor module to match Section
# and Block delimiters. The ensuing lines are then processed according
# to the type of content.
#
# reader - The Reader from which to retrieve the next block
# parent - The Document, Section or Block to which the next block belongs
#
# Returns a Section or Block object holding the parsed content of the processed lines
#--
# QUESTION should next_block have an option for whether it should keep looking until
# a block is found? right now it bails when it encounters a line to be skipped
def self.next_block(reader, parent, attributes = {}, options = {})
# Skip ahead to the block content
skipped = reader.skip_blank_lines
# bail if we've reached the end of the parent block or document
return unless reader.has_more_lines?
# check for option to find list item text only
# if skipped a line, assume a list continuation was
# used and block content is acceptable
if (text_only = options[:text]) && skipped > 0
options.delete(:text)
text_only = false
end
parse_metadata = options.fetch(:parse_metadata, true)
#parse_sections = options.fetch(:parse_sections, false)
document = parent.document
if (extensions = document.extensions)
block_extensions = extensions.blocks?
block_macro_extensions = extensions.block_macros?
else
block_extensions = block_macro_extensions = false
end
#parent_context = parent.is_a?(Block) ? parent.context : nil
in_list = (parent.is_a? List)
block = nil
style = nil
explicit_style = nil
sourcemap = document.sourcemap
source_location = nil
while !block && reader.has_more_lines?
# if parsing metadata, read until there is no more to read
if parse_metadata && parse_block_metadata_line(reader, document, attributes, options)
reader.advance
next
#elsif parse_sections && !parent_context && is_next_line_section?(reader, attributes)
# block, attributes = next_section(reader, parent, attributes)
# break
end
# QUESTION should we introduce a parsing context object?
source_location = reader.cursor if sourcemap
this_line = reader.read_line
delimited_block = false
block_context = nil
cloaked_context = nil
terminator = nil
# QUESTION put this inside call to rekey attributes?
if attributes[1]
style, explicit_style = parse_style_attribute(attributes, reader)
end
if (delimited_blk_match = is_delimited_block? this_line, true)
delimited_block = true
block_context = cloaked_context = delimited_blk_match.context
terminator = delimited_blk_match.terminator
if !style
style = attributes['style'] = block_context.to_s
elsif style != block_context.to_s
if delimited_blk_match.masq.include? style
block_context = style.to_sym
elsif delimited_blk_match.masq.include?('admonition') && ADMONITION_STYLES.include?(style)
block_context = :admonition
elsif block_extensions && extensions.registered_for_block?(style, block_context)
block_context = style.to_sym
else
warn %(asciidoctor: WARNING: #{reader.prev_line_info}: invalid style for #{block_context} block: #{style})
style = block_context.to_s
end
end
end
unless delimited_block
# this loop only executes once; used for flow control
# break once a block is found or at end of loop
# returns nil if the line must be dropped
# Implementation note - while(true) is twice as fast as loop
while true
# process lines verbatim
if style && Compliance.strict_verbatim_paragraphs && VERBATIM_STYLES.include?(style)
block_context = style.to_sym
reader.unshift_line this_line
# advance to block parsing =>
break
end
# process lines normally
unless text_only
first_char = Compliance.markdown_syntax ? this_line.lstrip.chr : this_line.chr
# NOTE we're letting break lines (horizontal rule, page_break, etc) have attributes
if (LAYOUT_BREAK_LINES.has_key? first_char) && this_line.length >= 3 &&
(Compliance.markdown_syntax ? LayoutBreakLinePlusRx : LayoutBreakLineRx) =~ this_line
block = Block.new(parent, LAYOUT_BREAK_LINES[first_char], :content_model => :empty)
break
elsif this_line.end_with?(']') && (match = MediaBlockMacroRx.match(this_line))
blk_ctx = match[1].to_sym
block = Block.new(parent, blk_ctx, :content_model => :empty)
if blk_ctx == :image
posattrs = ['alt', 'width', 'height']
elsif blk_ctx == :video
posattrs = ['poster', 'width', 'height']
else
posattrs = []
end
unless !style || explicit_style
attributes['alt'] = style if blk_ctx == :image
attributes.delete('style')
style = nil
end
block.parse_attributes(match[3], posattrs,
:unescape_input => (blk_ctx == :image),
:sub_input => true,
:sub_result => false,
:into => attributes)
target = block.sub_attributes(match[2], :attribute_missing => 'drop-line')
if target.empty?
# retain as unparsed if attribute-missing is skip
if document.attributes.fetch('attribute-missing', Compliance.attribute_missing) == 'skip'
return Block.new(parent, :paragraph, :content_model => :simple, :source => [this_line])
# otherwise, drop the line
else
attributes.clear
return
end
end
attributes['target'] = target
# now done down below
#block.title = attributes.delete('title') if attributes.has_key?('title')
#if blk_ctx == :image
# if attributes.has_key? 'scaledwidth'
# # append % to scaledwidth if ends in number (no units present)
# if (48..57).include?((attributes['scaledwidth'][-1] || 0).ord)
# attributes['scaledwidth'] = %(#{attributes['scaledwidth']}%)
# end
# end
# document.register(:images, target)
# attributes['alt'] ||= ::File.basename(target, ::File.extname(target)).tr('_-', ' ')
# # QUESTION should video or audio have an auto-numbered caption?
# block.assign_caption attributes.delete('caption'), 'figure'
#end
break
# NOTE we're letting the toc macro have attributes
elsif first_char == 't' && (match = TocBlockMacroRx.match(this_line))
block = Block.new(parent, :toc, :content_model => :empty)
block.parse_attributes(match[1], [], :sub_result => false, :into => attributes)
break
elsif block_macro_extensions && (match = GenericBlockMacroRx.match(this_line)) &&
(extension = extensions.registered_for_block_macro?(match[1]))
target = match[2]
raw_attributes = match[3]
if extension.config[:content_model] == :attributes
unless raw_attributes.empty?
document.parse_attributes(raw_attributes, (extension.config[:pos_attrs] || []),
:sub_input => true, :sub_result => false, :into => attributes)
end
else
attributes['text'] = raw_attributes
end
if (default_attrs = extension.config[:default_attrs])
default_attrs.each {|k, v| attributes[k] ||= v }
end
if (block = extension.process_method[parent, target, attributes.dup])
attributes.replace block.attributes
else
attributes.clear
return
end
break
end
end
# haven't found anything yet, continue
if (match = CalloutListRx.match(this_line))
block = List.new(parent, :colist)
attributes['style'] = 'arabic'
reader.unshift_line this_line
expected_index = 1
begin
# might want to move this check to a validate method
if match[1].to_i != expected_index
# FIXME this lineno - 2 hack means we need a proper look-behind cursor
warn %(asciidoctor: WARNING: #{reader.path}: line #{reader.lineno - 2}: callout list item index: expected #{expected_index} got #{match[1]})
end
list_item = next_list_item(reader, block, match)
expected_index += 1
if list_item
block << list_item
coids = document.callouts.callout_ids(block.items.size)
if !coids.empty?
list_item.attributes['coids'] = coids
else
# FIXME this lineno - 2 hack means we need a proper look-behind cursor
warn %(asciidoctor: WARNING: #{reader.path}: line #{reader.lineno - 2}: no callouts refer to list item #{block.items.size})
end
end
end while reader.has_more_lines? && (match = CalloutListRx.match(reader.peek_line))
document.callouts.next_list
break
elsif UnorderedListRx =~ this_line
reader.unshift_line this_line
block = next_outline_list(reader, :ulist, parent)
break
elsif (match = OrderedListRx.match(this_line))
reader.unshift_line this_line
block = next_outline_list(reader, :olist, parent)
# QUESTION move this logic to next_outline_list?
if !attributes['style'] && !block.attributes['style']
marker = block.items[0].marker
if marker.start_with? '.'
# first one makes more sense, but second one is AsciiDoc-compliant
#attributes['style'] = (ORDERED_LIST_STYLES[block.level - 1] || ORDERED_LIST_STYLES[0]).to_s
attributes['style'] = (ORDERED_LIST_STYLES[marker.length - 1] || ORDERED_LIST_STYLES[0]).to_s
else
style = ORDERED_LIST_STYLES.detect{|s| OrderedListMarkerRxMap[s] =~ marker }
attributes['style'] = (style || ORDERED_LIST_STYLES[0]).to_s
end
end
break
elsif (match = DefinitionListRx.match(this_line))
reader.unshift_line this_line
block = next_labeled_list(reader, match, parent)
break
elsif (style == 'float' || style == 'discrete') &&
is_section_title?(this_line, (Compliance.underline_style_section_titles ? reader.peek_line(true) : nil))
reader.unshift_line this_line
float_id, float_reftext, float_title, float_level, _ = parse_section_title(reader, document)
attributes['reftext'] = float_reftext if float_reftext
float_id ||= attributes['id'] if attributes.has_key?('id')
block = Block.new(parent, :floating_title, :content_model => :empty)
if float_id.nil_or_empty?
# FIXME remove hack of creating throwaway Section to get at the generate_id method
tmp_sect = Section.new(parent)
tmp_sect.title = float_title
block.id = tmp_sect.generate_id
else
block.id = float_id
end
block.level = float_level
block.title = float_title
break
# FIXME create another set for "passthrough" styles
# FIXME make this more DRY!
elsif style && style != 'normal'
if PARAGRAPH_STYLES.include?(style)
block_context = style.to_sym
cloaked_context = :paragraph
reader.unshift_line this_line
# advance to block parsing =>
break
elsif ADMONITION_STYLES.include?(style)
block_context = :admonition
cloaked_context = :paragraph
reader.unshift_line this_line
# advance to block parsing =>
break
elsif block_extensions && extensions.registered_for_block?(style, :paragraph)
block_context = style.to_sym
cloaked_context = :paragraph
reader.unshift_line this_line
# advance to block parsing =>
break
else
warn %(asciidoctor: WARNING: #{reader.prev_line_info}: invalid style for paragraph: #{style})
style = nil
# continue to process paragraph
end
end
break_at_list = (skipped == 0 && in_list)
# a literal paragraph is contiguous lines starting at least one space
if style != 'normal' && LiteralParagraphRx =~ this_line
# So we need to actually include this one in the read_lines group
reader.unshift_line this_line
lines = reader.read_lines_until(
:break_on_blank_lines => true,
:break_on_list_continuation => true,
:preserve_last_line => true) {|line|
# a preceding blank line (skipped > 0) indicates we are in a list continuation
# and therefore we should not break at a list item
# (this won't stop breaking on item of same level since we've already parsed them out)
# QUESTION can we turn this block into a lambda or function call?
(break_at_list && AnyListRx =~ line) ||
(Compliance.block_terminates_paragraph && (is_delimited_block?(line) || BlockAttributeLineRx =~ line))
}
reset_block_indent! lines
block = Block.new(parent, :literal, :content_model => :verbatim, :source => lines, :attributes => attributes)
# a literal gets special meaning inside of a definition list
# TODO this feels hacky, better way to distinguish from explicit literal block?
block.set_option('listparagraph') if in_list
# a paragraph is contiguous nonblank/noncontinuation lines
else
reader.unshift_line this_line
lines = reader.read_lines_until(
:break_on_blank_lines => true,
:break_on_list_continuation => true,
:preserve_last_line => true,
:skip_line_comments => true) {|line|
# a preceding blank line (skipped > 0) indicates we are in a list continuation
# and therefore we should not break at a list item
# (this won't stop breaking on item of same level since we've already parsed them out)
# QUESTION can we turn this block into a lambda or function call?
(break_at_list && AnyListRx =~ line) ||
(Compliance.block_terminates_paragraph && (is_delimited_block?(line) || BlockAttributeLineRx =~ line))
}
# NOTE we need this logic because we've asked the reader to skip
# line comments, which may leave us w/ an empty buffer if those
# were the only lines found
if lines.empty?
# call advance since the reader preserved the last line
reader.advance
return
end
catalog_inline_anchors(lines.join(EOL), document)
first_line = lines[0]
if !text_only && (admonition_match = AdmonitionParagraphRx.match(first_line))
lines[0] = admonition_match.post_match.lstrip
attributes['style'] = admonition_match[1]
attributes['name'] = admonition_name = admonition_match[1].downcase
attributes['caption'] ||= document.attributes[%(#{admonition_name}-caption)]
block = Block.new(parent, :admonition, :content_model => :simple, :source => lines, :attributes => attributes)
elsif !text_only && Compliance.markdown_syntax && first_line.start_with?('> ')
lines.map! {|line|
if line == '>'
line[1..-1]
elsif line.start_with? '> '
line[2..-1]
else
line
end
}
if lines[-1].start_with? '-- '
attribution, citetitle = lines.pop[3..-1].split(', ', 2)
lines.pop while lines[-1].empty?
else
attribution, citetitle = nil
end
attributes['style'] = 'quote'
attributes['attribution'] = attribution if attribution
attributes['citetitle'] = citetitle if citetitle
# NOTE will only detect headings that are floating titles (not section titles)
# TODO could assume a floating title when inside a block context
# FIXME Reader needs to be created w/ line info
block = build_block(:quote, :compound, false, parent, Reader.new(lines), attributes)
elsif !text_only && lines.size > 1 && first_line.start_with?('"') &&
lines[-1].start_with?('-- ') && lines[-2].end_with?('"')
lines[0] = first_line[1..-1]
attribution, citetitle = lines.pop[3..-1].split(', ', 2)
lines.pop while lines[-1].empty?
# strip trailing quote
lines[-1] = lines[-1].chop
attributes['style'] = 'quote'
attributes['attribution'] = attribution if attribution
attributes['citetitle'] = citetitle if citetitle
block = Block.new(parent, :quote, :content_model => :simple, :source => lines, :attributes => attributes)
else
# if [normal] is used over an indented paragraph, unindent it
if style == 'normal' && ((first_char = lines[0].chr) == ' ' || first_char == TAB)
first_line = lines[0]
first_line_shifted = first_line.lstrip
indent = line_length(first_line) - line_length(first_line_shifted)
lines[0] = first_line_shifted
# QUESTION should we fix the rest of the lines, since in XML output it's insignificant?
lines.size.times do |i|
lines[i] = lines[i][indent..-1] if i > 0
end
end
block = Block.new(parent, :paragraph, :content_model => :simple, :source => lines, :attributes => attributes)
end
end
# forbid loop from executing more than once
break
end
end
# either delimited block or styled paragraph
if !block && block_context
# abstract and partintro should be handled by open block
# FIXME kind of hackish...need to sort out how to generalize this
block_context = :open if block_context == :abstract || block_context == :partintro
case block_context
when :admonition
attributes['name'] = admonition_name = style.downcase
attributes['caption'] ||= document.attributes[%(#{admonition_name}-caption)]
block = build_block(block_context, :compound, terminator, parent, reader, attributes)
when :comment
build_block(block_context, :skip, terminator, parent, reader, attributes)
return
when :example
block = build_block(block_context, :compound, terminator, parent, reader, attributes)
when :listing, :fenced_code, :source
if block_context == :fenced_code
style = attributes['style'] = 'source'
language, linenums = this_line[3..-1].split(',', 2)
if language && !(language = language.strip).empty?
attributes['language'] = language
attributes['linenums'] = '' if linenums && !linenums.strip.empty?
elsif (default_language = document.attributes['source-language'])
attributes['language'] = default_language
end
terminator = terminator[0..2]
elsif block_context == :source
AttributeList.rekey(attributes, [nil, 'language', 'linenums'])
unless attributes.has_key? 'language'
if (default_language = document.attributes['source-language'])
attributes['language'] = default_language
end
end
end
block = build_block(:listing, :verbatim, terminator, parent, reader, attributes)
when :literal
block = build_block(block_context, :verbatim, terminator, parent, reader, attributes)
when :pass
block = build_block(block_context, :raw, terminator, parent, reader, attributes)
when :stem, :latexmath, :asciimath
if block_context == :stem
attributes['style'] = if (explicit_stem_syntax = attributes[2])
explicit_stem_syntax.include?('tex') ? 'latexmath' : 'asciimath'
elsif (default_stem_syntax = document.attributes['stem']).nil_or_empty?
'asciimath'
else
default_stem_syntax
end
end
block = build_block(:stem, :raw, terminator, parent, reader, attributes)
when :open, :sidebar
block = build_block(block_context, :compound, terminator, parent, reader, attributes)
when :table
cursor = reader.cursor
block_reader = Reader.new reader.read_lines_until(:terminator => terminator, :skip_line_comments => true), cursor
case terminator.chr
when ','
attributes['format'] = 'csv'
when ':'
attributes['format'] = 'dsv'
end
block = next_table(block_reader, parent, attributes)
when :quote, :verse
AttributeList.rekey(attributes, [nil, 'attribution', 'citetitle'])
block = build_block(block_context, (block_context == :verse ? :verbatim : :compound), terminator, parent, reader, attributes)
else
if block_extensions && (extension = extensions.registered_for_block?(block_context, cloaked_context))
# TODO pass cloaked_context to extension somehow (perhaps a new instance for each cloaked_context?)
if (content_model = extension.config[:content_model]) != :skip
if !(pos_attrs = extension.config[:pos_attrs] || []).empty?
AttributeList.rekey(attributes, [nil].concat(pos_attrs))
end
if (default_attrs = extension.config[:default_attrs])
default_attrs.each {|k, v| attributes[k] ||= v }
end
end
block = build_block block_context, content_model, terminator, parent, reader, attributes, :extension => extension
unless block && content_model != :skip
attributes.clear
return
end
else
# this should only happen if there's a misconfiguration
raise %(Unsupported block type #{block_context} at #{reader.line_info})
end
end
end
end
# when looking for nested content, one or more line comments, comment
# blocks or trailing attribute lists could leave us without a block,
# so handle accordingly
# REVIEW we may no longer need this nil check
# FIXME we've got to clean this up, it's horrible!
if block
block.source_location = source_location if source_location
# REVIEW seems like there is a better way to organize this wrap-up
block.title = attributes['title'] unless block.title?
# FIXME HACK don't hardcode logic for alt, caption and scaledwidth on images down here
if block.context == :image
resolved_target = attributes['target']
block.document.register(:images, resolved_target)
attributes['alt'] ||= ::File.basename(resolved_target, ::File.extname(resolved_target)).tr('_-', ' ')
attributes['alt'] = block.sub_specialcharacters attributes['alt']
block.assign_caption attributes.delete('caption'), 'figure'
if (scaledwidth = attributes['scaledwidth'])
# append % to scaledwidth if ends in number (no units present)
if (48..57).include?((scaledwidth[-1] || 0).ord)
attributes['scaledwidth'] = %(#{scaledwidth}%)
end
end
else
block.caption ||= attributes.delete('caption')
end
# TODO eventualy remove the style attribute from the attributes hash
#block.style = attributes.delete('style')
block.style = attributes['style']
# AsciiDoc always use [id] as the reftext in HTML output,
# but I'd like to do better in Asciidoctor
if (block_id = (block.id ||= attributes['id']))
# TODO sub reftext
document.register(:ids, [block_id, (attributes['reftext'] || (block.title? ? block.title : nil))])
end
# FIXME remove the need for this update!
block.attributes.update(attributes) unless attributes.empty?
block.lock_in_subs
#if document.attributes.has_key? :pending_attribute_entries
# document.attributes.delete(:pending_attribute_entries).each do |entry|
# entry.save_to block.attributes
# end
#end
if block.sub? :callouts
unless (catalog_callouts block.source, document)
# No need to look for callouts if they aren't there
block.remove_sub :callouts
end
end
end
block
end
# Public: Determines whether this line is the start of any of the delimited blocks
#
# returns the match data if this line is the first line of a delimited block or nil if not
def self.is_delimited_block? line, return_match_data = false
# highly optimized for best performance
return unless (line_len = line.length) > 1 && (DELIMITED_BLOCK_LEADERS.include? line[0..1])
# catches open block
if line_len == 2
tip = line
tl = 2
else
# catches all other delimited blocks, including fenced code
if line_len <= 4
tip = line
tl = line_len
else
tip = line[0..3]
tl = 4
end
# special case for fenced code blocks
# REVIEW review this logic
fenced_code = false
if Compliance.markdown_syntax
tip_3 = (tl == 4 ? tip.chop : tip)
if tip_3 == '```'
if tl == 4 && tip.end_with?('`')
return
end
tip = tip_3
tl = 3
fenced_code = true
end
end
# short circuit if not a fenced code block
return if tl == 3 && !fenced_code
end
if DELIMITED_BLOCKS.has_key? tip
# tip is the full line when delimiter is minimum length
if tl < 4 || tl == line_len
if return_match_data
context, masq = *DELIMITED_BLOCKS[tip]
BlockMatchData.new(context, masq, tip, tip)
else
true
end
elsif %(#{tip}#{tip[-1..-1] * (line_len - tl)}) == line
if return_match_data
context, masq = *DELIMITED_BLOCKS[tip]
BlockMatchData.new(context, masq, tip, line)
else
true
end
# only enable if/when we decide to support non-congruent block delimiters
#elsif (match = BlockDelimiterRx.match(line))
# if return_match_data
# context, masq = *DELIMITED_BLOCKS[tip]
# BlockMatchData.new(context, masq, tip, match[0])
# else
# true
# end
else
nil
end
else
nil
end
end
# whether a block supports complex content should be a config setting
# if terminator is false, that means the all the lines in the reader should be parsed
# NOTE could invoke filter in here, before and after parsing
def self.build_block(block_context, content_model, terminator, parent, reader, attributes, options = {})
if content_model == :skip || content_model == :raw
skip_processing = content_model == :skip
parse_as_content_model = :simple
else
skip_processing = false
parse_as_content_model = content_model
end
if terminator.nil?
if parse_as_content_model == :verbatim
lines = reader.read_lines_until(:break_on_blank_lines => true, :break_on_list_continuation => true)
else
content_model = :simple if content_model == :compound
lines = reader.read_lines_until(
:break_on_blank_lines => true,
:break_on_list_continuation => true,
:preserve_last_line => true,
:skip_line_comments => true,
:skip_processing => skip_processing) {|line|
Compliance.block_terminates_paragraph && (is_delimited_block?(line) || BlockAttributeLineRx =~ line)
}
# QUESTION check for empty lines after grabbing lines for simple content model?
end
block_reader = nil
elsif parse_as_content_model != :compound
lines = reader.read_lines_until(:terminator => terminator, :skip_processing => skip_processing)
block_reader = nil
# terminator is false when reader has already been prepared
elsif terminator == false
lines = nil
block_reader = reader
else
lines = nil
cursor = reader.cursor
block_reader = Reader.new reader.read_lines_until(:terminator => terminator, :skip_processing => skip_processing), cursor
end
if content_model == :skip
attributes.clear
# FIXME we shouldn't be mixing return types
return lines
end
if content_model == :verbatim && (indent = attributes['indent'])
reset_block_indent! lines, indent.to_i
end
if (extension = options[:extension])
# QUESTION do we want to delete the style?
attributes.delete('style')
if (block = extension.process_method[parent, block_reader || (Reader.new lines), attributes.dup])
attributes.replace block.attributes
# FIXME if the content model is set to compound, but we only have simple in this context, then
# forcefully set the content_model to simple to prevent parsing blocks from children
# TODO document this behavior!!
if block.content_model == :compound && !(lines = block.lines).nil_or_empty?
content_model = :compound
block_reader = Reader.new lines
end
else
# FIXME need a test to verify this returns nil at the right time
return
end
else
block = Block.new(parent, block_context, :content_model => content_model, :source => lines, :attributes => attributes)
end
# QUESTION should we have an explicit map or can we rely on check for *-caption attribute?
if (attributes.has_key? 'title') && (block.document.attr? %(#{block.context}-caption))
block.title = attributes.delete 'title'
block.assign_caption attributes.delete('caption')
end
if content_model == :compound
# we can look for blocks until there are no more lines (and not worry
# about sections) since the reader is confined within the boundaries of a
# delimited block
parse_blocks block_reader, block
end
block
end
# Public: Parse blocks from this reader until there are no more lines.
#
# This method calls Parser#next_block until there are no more lines in the
# Reader. It does not consider sections because it's assumed the Reader only
# has lines which are within a delimited block region.
#
# reader - The Reader containing the lines to process
# parent - The parent Block to which to attach the parsed blocks
#
# Returns nothing.
def self.parse_blocks(reader, parent)
while reader.has_more_lines?
block = Parser.next_block(reader, parent)
parent << block if block
end
end
# Internal: Parse and construct an outline list Block from the current position of the Reader
#
# reader - The Reader from which to retrieve the outline list
# list_type - A Symbol representing the list type (:olist for ordered, :ulist for unordered)
# parent - The parent Block to which this outline list belongs
#
# Returns the Block encapsulating the parsed outline (unordered or ordered) list
def self.next_outline_list(reader, list_type, parent)
list_block = List.new(parent, list_type)
if parent.context == list_type
list_block.level = parent.level + 1
else
list_block.level = 1
end
#Debug.debug { "Created #{list_type} block: #{list_block}" }
while reader.has_more_lines? && (match = ListRxMap[list_type].match(reader.peek_line))
marker = resolve_list_marker(list_type, match[1])
# if we are moving to the next item, and the marker is different
# determine if we are moving up or down in nesting
if list_block.items? && marker != list_block.items[0].marker
# assume list is nested by default, but then check to see if we are
# popping out of a nested list by matching an ancestor's list marker
this_item_level = list_block.level + 1
ancestor = parent
while ancestor.context == list_type
if marker == ancestor.items[0].marker
this_item_level = ancestor.level
break
end
ancestor = ancestor.parent
end
else
this_item_level = list_block.level
end
if !list_block.items? || this_item_level == list_block.level
list_item = next_list_item(reader, list_block, match)
elsif this_item_level < list_block.level
# leave this block
break
elsif this_item_level > list_block.level
# If this next list level is down one from the
# current Block's, append it to content of the current list item
list_block.items[-1] << next_block(reader, list_block)
end
list_block << list_item if list_item
list_item = nil
reader.skip_blank_lines
end
list_block
end
# Internal: Catalog any callouts found in the text, but don't process them
#
# text - The String of text in which to look for callouts
# document - The current document on which the callouts are stored
#
# Returns A Boolean indicating whether callouts were found
def self.catalog_callouts(text, document)
found = false
if text.include? '<'
text.scan(CalloutQuickScanRx) {
# alias match for Ruby 1.8.7 compat
m = $~
if m[0].chr != '\\'
document.callouts.register(m[2])
end
# we have to mark as found even if it's escaped so it can be unescaped
found = true
}
end
found
end
# Internal: Catalog any inline anchors found in the text, but don't process them
#
# text - The String text in which to look for inline anchors
# document - The current document on which the references are stored
#
# Returns nothing
def self.catalog_inline_anchors(text, document)
if text.include? '['
text.scan(InlineAnchorRx) {
# alias match for Ruby 1.8.7 compat
m = $~
next if m[0].start_with? '\\'
id = m[1] || m[3]
reftext = m[2] || m[4]
# enable if we want to allow double quoted values
#id = id.sub(DoubleQuotedRx, '\2')
#if reftext
# reftext = reftext.sub(DoubleQuotedMultiRx, '\2')
#end
document.register(:ids, [id, reftext])
}
end
nil
end
# Internal: Parse and construct a labeled (e.g., definition) list Block from the current position of the Reader
#
# reader - The Reader from which to retrieve the labeled list
# match - The Regexp match for the head of the list
# parent - The parent Block to which this labeled list belongs
#
# Returns the Block encapsulating the parsed labeled list
def self.next_labeled_list(reader, match, parent)
list_block = List.new(parent, :dlist)
previous_pair = nil
# allows us to capture until we find a labeled item
# that uses the same delimiter (::, :::, :::: or ;;)
sibling_pattern = DefinitionListSiblingRx[match[2]]
begin
term, item = next_list_item(reader, list_block, match, sibling_pattern)
if previous_pair && !previous_pair[-1]
previous_pair.pop
previous_pair[0] << term
previous_pair << item
else
# FIXME this misses the automatic parent assignment
list_block.items << (previous_pair = [[term], item])
end
end while reader.has_more_lines? && (match = sibling_pattern.match(reader.peek_line))
list_block
end
# Internal: Parse and construct the next ListItem for the current bulleted
# (unordered or ordered) list Block, callout lists included, or the next
# term ListItem and definition ListItem pair for the labeled list Block.
#
# First collect and process all the lines that constitute the next list
# item for the parent list (according to its type). Next, parse those lines
# into blocks and associate them with the ListItem (in the case of a
# labeled list, the definition ListItem). Finally, fold the first block
# into the item's text attribute according to rules described in ListItem.
#
# reader - The Reader from which to retrieve the next list item
# list_block - The parent list Block of this ListItem. Also provides access to the list type.
# match - The match Array which contains the marker and text (first-line) of the ListItem
# sibling_trait - The list marker or the Regexp to match a sibling item
#
# Returns the next ListItem or ListItem pair (depending on the list type)
# for the parent list Block.
def self.next_list_item(reader, list_block, match, sibling_trait = nil)
if (list_type = list_block.context) == :dlist
list_term = ListItem.new(list_block, match[1])
list_item = ListItem.new(list_block, match[3])
has_text = !match[3].nil_or_empty?
else
# Create list item using first line as the text of the list item
text = match[2]
checkbox = false
if list_type == :ulist && text.start_with?('[')
if text.start_with?('[ ] ')
checkbox = true
checked = false
text = text[3..-1].lstrip
elsif text.start_with?('[x] ') || text.start_with?('[*] ')
checkbox = true
checked = true
text = text[3..-1].lstrip
end
end
list_item = ListItem.new(list_block, text)
if checkbox
# FIXME checklist never makes it into the options attribute
list_block.attributes['checklist-option'] = ''
list_item.attributes['checkbox'] = ''
list_item.attributes['checked'] = '' if checked
end
sibling_trait ||= resolve_list_marker(list_type, match[1], list_block.items.size, true, reader)
list_item.marker = sibling_trait
has_text = true
end
# first skip the line with the marker / term
reader.advance
cursor = reader.cursor
list_item_reader = Reader.new read_lines_for_list_item(reader, list_type, sibling_trait, has_text), cursor
if list_item_reader.has_more_lines?
comment_lines = list_item_reader.skip_line_comments
subsequent_line = list_item_reader.peek_line
list_item_reader.unshift_lines comment_lines unless comment_lines.empty?
if !subsequent_line.nil?
continuation_connects_first_block = subsequent_line.empty?
# if there's no continuation connecting the first block, then
# treat the lines as paragraph text (activated when has_text = false)
if !continuation_connects_first_block && list_type != :dlist
has_text = false
end
content_adjacent = !continuation_connects_first_block && !subsequent_line.empty?
else
continuation_connects_first_block = false
content_adjacent = false
end
# only relevant for :dlist
options = {:text => !has_text}
# we can look for blocks until there are no more lines (and not worry
# about sections) since the reader is confined within the boundaries of a
# list
while list_item_reader.has_more_lines?
new_block = next_block(list_item_reader, list_block, {}, options)
list_item << new_block if new_block
end
list_item.fold_first(continuation_connects_first_block, content_adjacent)
end
if list_type == :dlist
unless list_item.text? || list_item.blocks?
list_item = nil
end
[list_term, list_item]
else
list_item
end
end
# Internal: Collect the lines belonging to the current list item, navigating
# through all the rules that determine what comprises a list item.
#
# Grab lines until a sibling list item is found, or the block is broken by a
# terminator (such as a line comment). Definition lists are more greedy if
# they don't have optional inline item text...they want that text
#
# reader - The Reader from which to retrieve the lines.
# list_type - The Symbol context of the list (:ulist, :olist, :colist or :dlist)
# sibling_trait - A Regexp that matches a sibling of this list item or String list marker
# of the items in this list (default: nil)
# has_text - Whether the list item has text defined inline (always true except for labeled lists)
#
# Returns an Array of lines belonging to the current list item.
def self.read_lines_for_list_item(reader, list_type, sibling_trait = nil, has_text = true)
buffer = []
# three states for continuation: :inactive, :active & :frozen
# :frozen signifies we've detected sequential continuation lines &
# continuation is not permitted until reset
continuation = :inactive
# if we are within a nested list, we don't throw away the list
# continuation marks because they will be processed when grabbing
# the lines for those nested lists
within_nested_list = false
# a detached continuation is a list continuation that follows a blank line
# it gets associated with the outermost block
detached_continuation = nil
while reader.has_more_lines?
this_line = reader.read_line
# if we've arrived at a sibling item in this list, we've captured
# the complete list item and can begin processing it
# the remainder of the method determines whether we've reached
# the termination of the list
break if is_sibling_list_item?(this_line, list_type, sibling_trait)
prev_line = buffer.empty? ? nil : buffer[-1]
if prev_line == LIST_CONTINUATION
if continuation == :inactive
continuation = :active
has_text = true
buffer[-1] = '' unless within_nested_list
end
# dealing with adjacent list continuations (which is really a syntax error)
if this_line == LIST_CONTINUATION
if continuation != :frozen
continuation = :frozen
buffer << this_line
end
this_line = nil
next
end
end
# a delimited block immediately breaks the list unless preceded
# by a list continuation (they are harsh like that ;0)
if (match = is_delimited_block?(this_line, true))
if continuation == :active
buffer << this_line
# grab all the lines in the block, leaving the delimiters in place
# we're being more strict here about the terminator, but I think that's a good thing
buffer.concat reader.read_lines_until(:terminator => match.terminator, :read_last_line => true)
continuation = :inactive
else
break
end
# technically BlockAttributeLineRx only breaks if ensuing line is not a list item
# which really means BlockAttributeLineRx only breaks if it's acting as a block delimiter
# FIXME to be AsciiDoc compliant, we shouldn't break if style in attribute line is "literal" (i.e., [literal])
elsif list_type == :dlist && continuation != :active && BlockAttributeLineRx =~ this_line
break
else
if continuation == :active && !this_line.empty?
# literal paragraphs have special considerations (and this is one of
# two entry points into one)
# if we don't process it as a whole, then a line in it that looks like a
# list item will throw off the exit from it
if LiteralParagraphRx =~ this_line
reader.unshift_line this_line
buffer.concat reader.read_lines_until(
:preserve_last_line => true,
:break_on_blank_lines => true,
:break_on_list_continuation => true) {|line|
# we may be in an indented list disguised as a literal paragraph
# so we need to make sure we don't slurp up a legitimate sibling
list_type == :dlist && is_sibling_list_item?(line, list_type, sibling_trait)
}
continuation = :inactive
# let block metadata play out until we find the block
elsif BlockTitleRx =~ this_line || BlockAttributeLineRx =~ this_line || AttributeEntryRx =~ this_line
buffer << this_line
else
if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).detect {|ctx| ListRxMap[ctx] =~ this_line }
within_nested_list = true
if nested_list_type == :dlist && $~[3].nil_or_empty?
# get greedy again
has_text = false
end
end
buffer << this_line
continuation = :inactive
end
elsif !prev_line.nil? && prev_line.empty?
# advance to the next line of content
if this_line.empty?
reader.skip_blank_lines
this_line = reader.read_line
# if we hit eof or a sibling, stop reading
break if this_line.nil? || is_sibling_list_item?(this_line, list_type, sibling_trait)
end
if this_line == LIST_CONTINUATION
detached_continuation = buffer.size
buffer << this_line
else
# has_text is only relevant for dlist, which is more greedy until it has text for an item
# for all other lists, has_text is always true
# in this block, we have to see whether we stay in the list
if has_text
# TODO any way to combine this with the check after skipping blank lines?
if is_sibling_list_item?(this_line, list_type, sibling_trait)
break
elsif nested_list_type = NESTABLE_LIST_CONTEXTS.detect {|ctx| ListRxMap[ctx] =~ this_line }
buffer << this_line
within_nested_list = true
if nested_list_type == :dlist && $~[3].nil_or_empty?
# get greedy again
has_text = false
end
# slurp up any literal paragraph offset by blank lines
# NOTE we have to check for indented list items first
elsif LiteralParagraphRx =~ this_line
reader.unshift_line this_line
buffer.concat reader.read_lines_until(
:preserve_last_line => true,
:break_on_blank_lines => true,
:break_on_list_continuation => true) {|line|
# we may be in an indented list disguised as a literal paragraph
# so we need to make sure we don't slurp up a legitimate sibling
list_type == :dlist && is_sibling_list_item?(line, list_type, sibling_trait)
}
else
break
end
else # only dlist in need of item text, so slurp it up!
# pop the blank line so it's not interpretted as a list continuation
buffer.pop unless within_nested_list
buffer << this_line
has_text = true
end
end
else
has_text = true if !this_line.empty?
if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).detect {|ctx| ListRxMap[ctx] =~ this_line }
within_nested_list = true
if nested_list_type == :dlist && $~[3].nil_or_empty?
# get greedy again
has_text = false
end
end
buffer << this_line
end
end
this_line = nil
end
reader.unshift_line this_line if this_line
if detached_continuation
buffer.delete_at detached_continuation
end
# strip trailing blank lines to prevent empty blocks
buffer.pop while !buffer.empty? && buffer[-1].empty?
# We do need to replace the optional trailing continuation
# a blank line would have served the same purpose in the document
buffer.pop if !buffer.empty? && buffer[-1] == LIST_CONTINUATION
#warn "BUFFER[#{list_type},#{sibling_trait}]>#{buffer * EOL}<BUFFER"
#warn "BUFFER[#{list_type},#{sibling_trait}]>#{buffer.inspect}<BUFFER"
buffer
end
# Internal: Initialize a new Section object and assign any attributes provided
#
# The information for this section is retrieved by parsing the lines at the
# current position of the reader.
#
# reader - the source reader
# parent - the parent Section or Document of this Section
# attributes - a Hash of attributes to assign to this section (default: {})
def self.initialize_section(reader, parent, attributes = {})
document = parent.document
source_location = reader.cursor if document.sourcemap
sect_id, sect_reftext, sect_title, sect_level, _ = parse_section_title(reader, document)
attributes['reftext'] = sect_reftext if sect_reftext
section = Section.new parent, sect_level, document.attributes.has_key?('sectnums')
section.source_location = source_location if source_location
section.id = sect_id
section.title = sect_title
# parse style, id and role from first positional attribute
if attributes[1]
style, _ = parse_style_attribute attributes, reader
# handle case where only id and/or role are given (e.g., #idname.rolename)
if style
section.sectname = style
section.special = true
# HACK needs to be refactored so it's driven by config
if section.sectname == 'abstract' && document.doctype == 'book'
section.sectname = 'sect1'
section.special = false
section.level = 1
end
else
section.sectname = %(sect#{section.level})
end
elsif sect_title.downcase == 'synopsis' && document.doctype == 'manpage'
section.special = true
section.sectname = 'synopsis'
else
section.sectname = %(sect#{section.level})
end
if !section.id && (id = attributes['id'])
section.id = id
else
# generate an id if one was not *embedded* in the heading line
# or as an anchor above the section
section.id ||= section.generate_id
end
if section.id
# TODO sub reftext
section.document.register(:ids, [section.id, (attributes['reftext'] || section.title)])
end
section.update_attributes(attributes)
reader.skip_blank_lines
section
end
# Private: Get the Integer section level based on the characters
# used in the ASCII line under the section title.
#
# line - the String line from under the section title.
def self.section_level(line)
SECTION_LEVELS[line.chr]
end
#--
# = is level 0, == is level 1, etc.
def self.single_line_section_level(marker)
marker.length - 1
end
# Internal: Checks if the next line on the Reader is a section title
#
# reader - the source Reader
# attributes - a Hash of attributes collected above the current line
#
# returns the section level if the Reader is positioned at a section title,
# false otherwise
def self.is_next_line_section?(reader, attributes)
if !(val = attributes[1]).nil? && ((ord_0 = val[0].ord) == 100 || ord_0 == 102) && val =~ FloatingTitleStyleRx
return false
end
return false unless reader.has_more_lines?
Compliance.underline_style_section_titles ? is_section_title?(*reader.peek_lines(2)) : is_section_title?(reader.peek_line)
end
# Internal: Convenience API for checking if the next line on the Reader is the document title
#
# reader - the source Reader
# attributes - a Hash of attributes collected above the current line
#
# returns true if the Reader is positioned at the document title, false otherwise
def self.is_next_line_document_title?(reader, attributes)
is_next_line_section?(reader, attributes) == 0
end
# Public: Checks if these lines are a section title
#
# line1 - the first line as a String
# line2 - the second line as a String (default: nil)
#
# returns the section level if these lines are a section title,
# false otherwise
def self.is_section_title?(line1, line2 = nil)
if (level = is_single_line_section_title?(line1))
level
elsif line2 && (level = is_two_line_section_title?(line1, line2))
level
else
false
end
end
def self.is_single_line_section_title?(line1)
first_char = line1 ? line1.chr : nil
if (first_char == '=' || (Compliance.markdown_syntax && first_char == '#')) &&
(match = AtxSectionRx.match(line1))
single_line_section_level match[1]
else
false
end
end
def self.is_two_line_section_title?(line1, line2)
if line1 && line2 && SECTION_LEVELS.has_key?(line2.chr) &&
line2 =~ SetextSectionLineRx && line1 =~ SetextSectionTitleRx &&
# chomp so that a (non-visible) endline does not impact calculation
(line_length(line1) - line_length(line2)).abs <= 1
section_level line2
else
false
end
end
# Internal: Parse the section title from the current position of the reader
#
# Parse a single or double-line section title. After this method is called,
# the Reader will be positioned at the line after the section title.
#
# reader - the source reader, positioned at a section title
# document- the current document
#
# Examples
#
# reader.lines
# # => ["Foo", "~~~"]
#
# id, reftext, title, level, single = parse_section_title(reader, document)
#
# title
# # => "Foo"
# level
# # => 2
# id
# # => nil
# single
# # => false
#
# line1
# # => "==== Foo"
#
# id, reftext, title, level, single = parse_section_title(reader, document)
#
# title
# # => "Foo"
# level
# # => 3
# id
# # => nil
# single
# # => true
#
# returns an Array of [String, String, Integer, String, Boolean], representing the
# id, reftext, title, level and line count of the Section, or nil.
#
#--
# NOTE for efficiency, we don't reuse methods that check for a section title
def self.parse_section_title(reader, document)
line1 = reader.read_line
sect_id = nil
sect_title = nil
sect_level = -1
sect_reftext = nil
single_line = true
first_char = line1.chr
if (first_char == '=' || (Compliance.markdown_syntax && first_char == '#')) &&
(match = AtxSectionRx.match(line1))
sect_level = single_line_section_level match[1]
sect_title = match[2]
if sect_title.end_with?(']]') && (anchor_match = InlineSectionAnchorRx.match(sect_title))
if anchor_match[2].nil?
sect_title = anchor_match[1]
sect_id = anchor_match[3]
sect_reftext = anchor_match[4]
end
end
elsif Compliance.underline_style_section_titles
if (line2 = reader.peek_line(true)) && SECTION_LEVELS.has_key?(line2.chr) && line2 =~ SetextSectionLineRx &&
(name_match = SetextSectionTitleRx.match(line1)) &&
# chomp so that a (non-visible) endline does not impact calculation
(line_length(line1) - line_length(line2)).abs <= 1
sect_title = name_match[1]
if sect_title.end_with?(']]') && (anchor_match = InlineSectionAnchorRx.match(sect_title))
if anchor_match[2].nil?
sect_title = anchor_match[1]
sect_id = anchor_match[3]
sect_reftext = anchor_match[4]
end
end
sect_level = section_level line2
single_line = false
reader.advance
end
end
if sect_level >= 0
sect_level += document.attr('leveloffset', 0).to_i
end
[sect_id, sect_reftext, sect_title, sect_level, single_line]
end
# Public: Calculate the number of unicode characters in the line, excluding the endline
#
# line - the String to calculate
#
# returns the number of unicode characters in the line
def self.line_length(line)
FORCE_UNICODE_LINE_LENGTH ? line.scan(UnicodeCharScanRx).length : line.length
end
# Public: Consume and parse the two header lines (line 1 = author info, line 2 = revision info).
#
# Returns the Hash of header metadata. If a Document object is supplied, the metadata
# is applied directly to the attributes of the Document.
#
# reader - the Reader holding the source lines of the document
# document - the Document we are building (default: nil)
#
# Examples
#
# data = ["Author Name <[email protected]>\n", "v1.0, 2012-12-21: Coincide w/ end of world.\n"]
# parse_header_metadata(Reader.new data, nil, :normalize => true)
# # => {'author' => 'Author Name', 'firstname' => 'Author', 'lastname' => 'Name', 'email' => '[email protected]',
# # 'revnumber' => '1.0', 'revdate' => '2012-12-21', 'revremark' => 'Coincide w/ end of world.'}
def self.parse_header_metadata(reader, document = nil)
# NOTE this will discard away any comment lines, but not skip blank lines
process_attribute_entries(reader, document)
metadata = {}
implicit_author = nil
implicit_authors = nil
if reader.has_more_lines? && !reader.next_line_empty?
author_metadata = process_authors reader.read_line
unless author_metadata.empty?
if document
# apply header subs and assign to document
author_metadata.each do |key, val|
unless document.attributes.has_key? key
document.attributes[key] = ((val.is_a? ::String) ? document.apply_header_subs(val) : val)
end
end
implicit_author = document.attributes['author']
implicit_authors = document.attributes['authors']
end
metadata = author_metadata
end
# NOTE this will discard any comment lines, but not skip blank lines
process_attribute_entries(reader, document)
rev_metadata = {}
if reader.has_more_lines? && !reader.next_line_empty?
rev_line = reader.read_line
if (match = RevisionInfoLineRx.match(rev_line))
rev_metadata['revdate'] = match[2].strip
rev_metadata['revnumber'] = match[1].rstrip unless match[1].nil?
rev_metadata['revremark'] = match[3].rstrip unless match[3].nil?
else
# throw it back
reader.unshift_line rev_line
end
end
unless rev_metadata.empty?
if document
# apply header subs and assign to document
rev_metadata.each do |key, val|
unless document.attributes.has_key? key
document.attributes[key] = document.apply_header_subs(val)
end
end
end
metadata.update rev_metadata
end
# NOTE this will discard any comment lines, but not skip blank lines
process_attribute_entries(reader, document)
reader.skip_blank_lines
end
if document
# process author attribute entries that override (or stand in for) the implicit author line
author_metadata = nil
if document.attributes.has_key?('author') &&
(author_line = document.attributes['author']) != implicit_author
# do not allow multiple, process as names only
author_metadata = process_authors author_line, true, false
elsif document.attributes.has_key?('authors') &&
(author_line = document.attributes['authors']) != implicit_authors
# allow multiple, process as names only
author_metadata = process_authors author_line, true
else
authors = []
author_key = %(author_#{authors.size + 1})
while document.attributes.has_key? author_key
authors << document.attributes[author_key]
author_key = %(author_#{authors.size + 1})
end
if authors.size == 1
# do not allow multiple, process as names only
author_metadata = process_authors authors[0], true, false
elsif authors.size > 1
# allow multiple, process as names only
author_metadata = process_authors authors.join('; '), true
end
end
if author_metadata
document.attributes.update author_metadata
# special case
if !document.attributes.has_key?('email') && document.attributes.has_key?('email_1')
document.attributes['email'] = document.attributes['email_1']
end
end
end
metadata
end
# Internal: Parse the author line into a Hash of author metadata
#
# author_line - the String author line
# names_only - a Boolean flag that indicates whether to process line as
# names only or names with emails (default: false)
# multiple - a Boolean flag that indicates whether to process multiple
# semicolon-separated entries in the author line (default: true)
#
# returns a Hash of author metadata
def self.process_authors(author_line, names_only = false, multiple = true)
author_metadata = {}
keys = ['author', 'authorinitials', 'firstname', 'middlename', 'lastname', 'email']
author_entries = multiple ? (author_line.split ';').map {|line| line.strip } : [author_line]
author_entries.each_with_index do |author_entry, idx|
next if author_entry.empty?
key_map = {}
if idx.zero?
keys.each do |key|
key_map[key.to_sym] = key
end
else
keys.each do |key|
key_map[key.to_sym] = %(#{key}_#{idx + 1})
end
end
segments = nil
if names_only
# splitting on ' ' will collapse repeating spaces
segments = author_entry.split(' ', 3)
elsif (match = AuthorInfoLineRx.match(author_entry))
segments = match.to_a
segments.shift
end
unless segments.nil?
author_metadata[key_map[:firstname]] = fname = segments[0].tr('_', ' ')
author_metadata[key_map[:author]] = fname
author_metadata[key_map[:authorinitials]] = fname[0, 1]
if !segments[1].nil? && !segments[2].nil?
author_metadata[key_map[:middlename]] = mname = segments[1].tr('_', ' ')
author_metadata[key_map[:lastname]] = lname = segments[2].tr('_', ' ')
author_metadata[key_map[:author]] = [fname, mname, lname].join ' '
author_metadata[key_map[:authorinitials]] = [fname[0, 1], mname[0, 1], lname[0, 1]].join
elsif !segments[1].nil?
author_metadata[key_map[:lastname]] = lname = segments[1].tr('_', ' ')
author_metadata[key_map[:author]] = [fname, lname].join ' '
author_metadata[key_map[:authorinitials]] = [fname[0, 1], lname[0, 1]].join
end
author_metadata[key_map[:email]] = segments[3] unless names_only || segments[3].nil?
else
author_metadata[key_map[:author]] = author_metadata[key_map[:firstname]] = fname = author_entry.strip.tr_s(' ', ' ')
author_metadata[key_map[:authorinitials]] = fname[0, 1]
end
author_metadata['authorcount'] = idx + 1
# only assign the _1 attributes if there are multiple authors
if idx == 1
keys.each do |key|
author_metadata[%(#{key}_1)] = author_metadata[key] if author_metadata.has_key? key
end
end
if idx.zero?
author_metadata['authors'] = author_metadata[key_map[:author]]
else
author_metadata['authors'] = %(#{author_metadata['authors']}, #{author_metadata[key_map[:author]]})
end
end
author_metadata
end
# Internal: Parse lines of metadata until a line of metadata is not found.
#
# This method processes sequential lines containing block metadata, ignoring
# blank lines and comments.
#
# reader - the source reader
# parent - the parent to which the lines belong
# attributes - a Hash of attributes in which any metadata found will be stored (default: {})
# options - a Hash of options to control processing: (default: {})
# * :text indicates that lexer is only looking for text content
# and thus the block title should not be captured
#
# returns the Hash of attributes including any metadata found
def self.parse_block_metadata_lines(reader, parent, attributes = {}, options = {})
while parse_block_metadata_line(reader, parent, attributes, options)
# discard the line just processed
reader.advance
reader.skip_blank_lines
end
attributes
end
# Internal: Parse the next line if it contains metadata for the following block
#
# This method handles lines with the following content:
#
# * line or block comment
# * anchor
# * attribute list
# * block title
#
# Any attributes found will be inserted into the attributes argument.
# If the line contains block metadata, the method returns true, otherwise false.
#
# reader - the source reader
# parent - the parent of the current line
# attributes - a Hash of attributes in which any metadata found will be stored
# options - a Hash of options to control processing: (default: {})
# * :text indicates that lexer is only looking for text content
# and thus the block title should not be captured
#
# returns true if the line contains metadata, otherwise false
def self.parse_block_metadata_line(reader, parent, attributes, options = {})
return false unless reader.has_more_lines?
next_line = reader.peek_line
if (commentish = next_line.start_with?('//')) && (match = CommentBlockRx.match(next_line))
terminator = match[0]
reader.read_lines_until(:skip_first_line => true, :preserve_last_line => true, :terminator => terminator, :skip_processing => true)
elsif commentish && CommentLineRx =~ next_line
# do nothing, we'll skip it
elsif !options[:text] && next_line.start_with?(':') && (match = AttributeEntryRx.match(next_line))
process_attribute_entry(reader, parent, attributes, match)
elsif (in_square_brackets = next_line.start_with?('[') && next_line.end_with?(']')) && (match = BlockAnchorRx.match(next_line))
unless match[1].nil_or_empty?
attributes['id'] = match[1]
# AsciiDoc always uses [id] as the reftext in HTML output,
# but I'd like to do better in Asciidoctor
# registration is deferred until the block or section is processed
attributes['reftext'] = match[2] unless match[2].nil?
end
elsif in_square_brackets && (match = BlockAttributeListRx.match(next_line))
parent.document.parse_attributes(match[1], [], :sub_input => true, :into => attributes)
# NOTE title doesn't apply to section, but we need to stash it for the first block
# TODO should issue an error if this is found above the document title
elsif !options[:text] && (match = BlockTitleRx.match(next_line))
attributes['title'] = match[1]
else
return false
end
true
end
def self.process_attribute_entries(reader, parent, attributes = nil)
reader.skip_comment_lines
while process_attribute_entry(reader, parent, attributes)
# discard line just processed
reader.advance
reader.skip_comment_lines
end
end
def self.process_attribute_entry(reader, parent, attributes = nil, match = nil)
match ||= (reader.has_more_lines? ? AttributeEntryRx.match(reader.peek_line) : nil)
if match
name = match[1]
unless (value = match[2] || '').empty?
if value.end_with?(line_continuation = LINE_CONTINUATION) ||
value.end_with?(line_continuation = LINE_CONTINUATION_LEGACY)
value = value.chop.rstrip
while reader.advance
break if (next_line = reader.peek_line.strip).empty?
if (keep_open = next_line.end_with? line_continuation)
next_line = next_line.chop.rstrip
end
separator = (value.end_with? LINE_BREAK) ? EOL : ' '
value = %(#{value}#{separator}#{next_line})
break unless keep_open
end
end
end
store_attribute(name, value, (parent ? parent.document : nil), attributes)
true
else
false
end
end
# Public: Store the attribute in the document and register attribute entry if accessible
#
# name - the String name of the attribute to store
# value - the String value of the attribute to store
# doc - the Document being parsed
# attrs - the attributes for the current context
#
# returns a 2-element array containing the attribute name and value
def self.store_attribute(name, value, doc = nil, attrs = nil)
# TODO move processing of attribute value to utility method
if name.end_with?('!')
# a nil value signals the attribute should be deleted (undefined)
value = nil
name = name.chop
elsif name.start_with?('!')
# a nil value signals the attribute should be deleted (undefined)
value = nil
name = name[1..-1]
end
name = sanitize_attribute_name(name)
accessible = true
if doc
# alias numbered attribute to sectnums
if name == 'numbered'
name = 'sectnums'
# support relative leveloffset values
elsif name == 'leveloffset'
if value
case value.chr
when '+'
value = ((doc.attr 'leveloffset', 0).to_i + (value[1..-1] || 0).to_i).to_s
when '-'
value = ((doc.attr 'leveloffset', 0).to_i - (value[1..-1] || 0).to_i).to_s
end
end
end
accessible = value ? doc.set_attribute(name, value) : doc.delete_attribute(name)
end
if accessible && attrs
Document::AttributeEntry.new(name, value).save_to(attrs)
end
[name, value]
end
# Internal: Resolve the 0-index marker for this list item
#
# For ordered lists, match the marker used for this list item against the
# known list markers and determine which marker is the first (0-index) marker
# in its number series.
#
# For callout lists, return <1>.
#
# For bulleted lists, return the marker as passed to this method.
#
# list_type - The Symbol context of the list
# marker - The String marker for this list item
# ordinal - The position of this list item in the list
# validate - Whether to validate the value of the marker
#
# Returns the String 0-index marker for this list item
def self.resolve_list_marker(list_type, marker, ordinal = 0, validate = false, reader = nil)
if list_type == :olist && !marker.start_with?('.')
resolve_ordered_list_marker(marker, ordinal, validate, reader)
elsif list_type == :colist
'<1>'
else
marker
end
end
# Internal: Resolve the 0-index marker for this ordered list item
#
# Match the marker used for this ordered list item against the
# known ordered list markers and determine which marker is
# the first (0-index) marker in its number series.
#
# The purpose of this method is to normalize the implicit numbered markers
# so that they can be compared against other list items.
#
# marker - The marker used for this list item
# ordinal - The 0-based index of the list item (default: 0)
# validate - Perform validation that the marker provided is the proper
# marker in the sequence (default: false)
#
# Examples
#
# marker = 'B.'
# Parser.resolve_ordered_list_marker(marker, 1, true)
# # => 'A.'
#
# Returns the String of the first marker in this number series
def self.resolve_ordered_list_marker(marker, ordinal = 0, validate = false, reader = nil)
number_style = ORDERED_LIST_STYLES.detect {|s| OrderedListMarkerRxMap[s] =~ marker }
expected = actual = nil
case number_style
when :arabic
if validate
expected = ordinal + 1
actual = marker.to_i
end
marker = '1.'
when :loweralpha
if validate
expected = ('a'[0].ord + ordinal).chr
actual = marker.chomp('.')
end
marker = 'a.'
when :upperalpha
if validate
expected = ('A'[0].ord + ordinal).chr
actual = marker.chomp('.')
end
marker = 'A.'
when :lowerroman
if validate
# TODO report this in roman numerals; see https://github.com/jamesshipton/roman-numeral/blob/master/lib/roman_numeral.rb
expected = ordinal + 1
actual = roman_numeral_to_int(marker.chomp(')'))
end
marker = 'i)'
when :upperroman
if validate
# TODO report this in roman numerals; see https://github.com/jamesshipton/roman-numeral/blob/master/lib/roman_numeral.rb
expected = ordinal + 1
actual = roman_numeral_to_int(marker.chomp(')'))
end
marker = 'I)'
end
if validate && expected != actual
warn %(asciidoctor: WARNING: #{reader.line_info}: list item index: expected #{expected}, got #{actual})
end
marker
end
# Internal: Determine whether the this line is a sibling list item
# according to the list type and trait (marker) provided.
#
# line - The String line to check
# list_type - The context of the list (:olist, :ulist, :colist, :dlist)
# sibling_trait - The String marker for the list or the Regexp to match a sibling
#
# Returns a Boolean indicating whether this line is a sibling list item given
# the criteria provided
def self.is_sibling_list_item?(line, list_type, sibling_trait)
if sibling_trait.is_a? ::Regexp
matcher = sibling_trait
expected_marker = false
else
matcher = ListRxMap[list_type]
expected_marker = sibling_trait
end
if (m = matcher.match(line))
if expected_marker
expected_marker == resolve_list_marker(list_type, m[1])
else
true
end
else
false
end
end
# Internal: Parse the table contained in the provided Reader
#
# table_reader - a Reader containing the source lines of an AsciiDoc table
# parent - the parent Block of this Asciidoctor::Table
# attributes - attributes captured from above this Block
#
# returns an instance of Asciidoctor::Table parsed from the provided reader
def self.next_table(table_reader, parent, attributes)
table = Table.new(parent, attributes)
if (attributes.has_key? 'title')
table.title = attributes.delete 'title'
table.assign_caption attributes.delete('caption')
end
if attributes.has_key? 'cols'
table.create_columns(parse_col_specs(attributes['cols']))
explicit_col_specs = true
else
explicit_col_specs = false
end
skipped = table_reader.skip_blank_lines
parser_ctx = Table::ParserContext.new(table_reader, table, attributes)
loop_idx = -1
while table_reader.has_more_lines?
loop_idx += 1
line = table_reader.read_line
if skipped == 0 && loop_idx.zero? && !attributes.has_key?('options') &&
!(next_line = table_reader.peek_line).nil? && next_line.empty?
table.has_header_option = true
table.set_option 'header'
end
if parser_ctx.format == 'psv'
if parser_ctx.starts_with_delimiter? line
line = line[1..-1]
# push an empty cell spec if boundary at start of line
parser_ctx.close_open_cell
else
next_cell_spec, line = parse_cell_spec(line, :start, parser_ctx.delimiter)
# if the cell spec is not null, then we're at a cell boundary
if !next_cell_spec.nil?
parser_ctx.close_open_cell next_cell_spec
else
# QUESTION do we not advance to next line? if so, when will we if we came into this block?
end
end
end
seen = false
while !seen || !line.empty?
seen = true
if (m = parser_ctx.match_delimiter(line))
if parser_ctx.format == 'csv'
if parser_ctx.buffer_has_unclosed_quotes?(m.pre_match)
# throw it back, it's too small
line = parser_ctx.skip_matched_delimiter(m)
next
end
else
if m.pre_match.end_with? '\\'
line = parser_ctx.skip_matched_delimiter(m, true)
next
end
end
if parser_ctx.format == 'psv'
next_cell_spec, cell_text = parse_cell_spec(m.pre_match, :end)
parser_ctx.push_cell_spec next_cell_spec
parser_ctx.buffer = %(#{parser_ctx.buffer}#{cell_text})
else
parser_ctx.buffer = %(#{parser_ctx.buffer}#{m.pre_match})
end
line = m.post_match
parser_ctx.close_cell
else
# no other delimiters to see here
# suck up this line into the buffer and move on
parser_ctx.buffer = %(#{parser_ctx.buffer}#{line}#{EOL})
# QUESTION make stripping endlines in csv data an option? (unwrap-option?)
if parser_ctx.format == 'csv'
parser_ctx.buffer = %(#{parser_ctx.buffer.rstrip} )
end
line = ''
if parser_ctx.format == 'psv' || (parser_ctx.format == 'csv' &&
parser_ctx.buffer_has_unclosed_quotes?)
parser_ctx.keep_cell_open
else
parser_ctx.close_cell true
end
end
end
skipped = table_reader.skip_blank_lines unless parser_ctx.cell_open?
if !table_reader.has_more_lines?
parser_ctx.close_cell true
end
end
table.attributes['colcount'] ||= parser_ctx.col_count
if !explicit_col_specs
# TODO further encapsulate this logic (into table perhaps?)
even_width = (100.0 / parser_ctx.col_count).floor
table.columns.each {|c| c.assign_width(0, even_width) }
end
table.partition_header_footer attributes
table
end
# Internal: Parse the column specs for this table.
#
# The column specs dictate the number of columns, relative
# width of columns, default alignments for cells in each
# column, and/or default styles or filters applied to the cells in
# the column.
#
# Every column spec is guaranteed to have a width
#
# returns a Hash of attributes that specify how to format
# and layout the cells in the table.
def self.parse_col_specs(records)
# check for deprecated syntax: single number, equal column spread
# REVIEW could use records == records.to_i.to_s instead of regexp
if DigitsRx =~ records
return ::Array.new(records.to_i) { { 'width' => 1 } }
end
specs = []
records.split(',').each {|record|
# TODO might want to use scan rather than this mega-regexp
if (m = ColumnSpecRx.match(record))
spec = {}
if m[2]
# make this an operation
colspec, rowspec = m[2].split '.'
if !colspec.nil_or_empty? && Table::ALIGNMENTS[:h].has_key?(colspec)
spec['halign'] = Table::ALIGNMENTS[:h][colspec]
end
if !rowspec.nil_or_empty? && Table::ALIGNMENTS[:v].has_key?(rowspec)
spec['valign'] = Table::ALIGNMENTS[:v][rowspec]
end
end
# to_i permits us to support percentage width by stripping the %
# NOTE this is slightly out of compliance w/ AsciiDoc, but makes way more sense
spec['width'] = !m[3].nil? ? m[3].to_i : 1
# make this an operation
if m[4] && Table::TEXT_STYLES.has_key?(m[4])
spec['style'] = Table::TEXT_STYLES[m[4]]
end
repeat = !m[1].nil? ? m[1].to_i : 1
1.upto(repeat) {
specs << spec.dup
}
end
}
specs
end
# Internal: Parse the cell specs for the current cell.
#
# The cell specs dictate the cell's alignments, styles or filters,
# colspan, rowspan and/or repeating content.
#
# The default spec when pos == :end is {} since we already know we're at a
# delimiter. When pos == :start, we *may* be at a delimiter, nil indicates
# we're not.
#
# returns the Hash of attributes that indicate how to layout
# and style this cell in the table.
def self.parse_cell_spec(line, pos = :start, delimiter = nil)
m = nil
rest = ''
case pos
when :start
if line.include? delimiter
spec_part, rest = line.split delimiter, 2
if (m = CellSpecStartRx.match spec_part)
return [{}, rest] if m[0].empty?
else
return [nil, line]
end
else
return [nil, line]
end
when :end
if (m = CellSpecEndRx.match line)
# NOTE return the line stripped of trailing whitespace if no cellspec is found in this case
return [{}, line.rstrip] if m[0].lstrip.empty?
rest = m.pre_match
else
return [{}, line]
end
end
spec = {}
if m[1]
colspec, rowspec = m[1].split '.'
colspec = colspec.nil_or_empty? ? 1 : colspec.to_i
rowspec = rowspec.nil_or_empty? ? 1 : rowspec.to_i
if m[2] == '+'
spec['colspan'] = colspec unless colspec == 1
spec['rowspan'] = rowspec unless rowspec == 1
elsif m[2] == '*'
spec['repeatcol'] = colspec unless colspec == 1
end
end
if m[3]
colspec, rowspec = m[3].split '.'
if !colspec.nil_or_empty? && Table::ALIGNMENTS[:h].has_key?(colspec)
spec['halign'] = Table::ALIGNMENTS[:h][colspec]
end
if !rowspec.nil_or_empty? && Table::ALIGNMENTS[:v].has_key?(rowspec)
spec['valign'] = Table::ALIGNMENTS[:v][rowspec]
end
end
if m[4] && Table::TEXT_STYLES.has_key?(m[4])
spec['style'] = Table::TEXT_STYLES[m[4]]
end
[spec, rest]
end
# Public: Parse the first positional attribute and assign named attributes
#
# Parse the first positional attribute to extract the style, role and id
# parts, assign the values to their cooresponding attribute keys and return
# both the original style attribute and the parsed value from the first
# positional attribute.
#
# attributes - The Hash of attributes to process and update
#
# Examples
#
# puts attributes
# => {1 => "abstract#intro.lead%fragment", "style" => "preamble"}
#
# parse_style_attribute(attributes)
# => ["abstract", "preamble"]
#
# puts attributes
# => {1 => "abstract#intro.lead", "style" => "abstract", "id" => "intro",
# "role" => "lead", "options" => ["fragment"], "fragment-option" => ''}
#
# Returns a two-element Array of the parsed style from the
# first positional attribute and the original style that was
# replaced
def self.parse_style_attribute(attributes, reader = nil)
original_style = attributes['style']
raw_style = attributes[1]
# NOTE spaces are not allowed in shorthand, so if we find one, this ain't shorthand
if raw_style && !raw_style.include?(' ') && Compliance.shorthand_property_syntax
type = :style
collector = []
parsed = {}
# QUESTION should this be a private method? (though, it's never called if shorthand isn't used)
save_current = lambda {
if collector.empty?
if type != :style
warn %(asciidoctor: WARNING:#{reader.nil? ? nil : " #{reader.prev_line_info}:"} invalid empty #{type} detected in style attribute)
end
else
case type
when :role, :option
parsed[type] ||= []
parsed[type].push collector.join
when :id
if parsed.has_key? :id
warn %(asciidoctor: WARNING:#{reader.nil? ? nil : " #{reader.prev_line_info}:"} multiple ids detected in style attribute)
end
parsed[type] = collector.join
else
parsed[type] = collector.join
end
collector = []
end
}
raw_style.each_char do |c|
if c == '.' || c == '#' || c == '%'
save_current.call
case c
when '.'
type = :role
when '#'
type = :id
when '%'
type = :option
end
else
collector.push c
end
end
# small optimization if no shorthand is found
if type == :style
parsed_style = attributes['style'] = raw_style
else
save_current.call
if parsed.has_key? :style
parsed_style = attributes['style'] = parsed[:style]
else
parsed_style = nil
end
if parsed.has_key? :id
attributes['id'] = parsed[:id]
end
if parsed.has_key? :role
attributes['role'] = parsed[:role] * ' '
end
if parsed.has_key? :option
(options = parsed[:option]).each do |option|
attributes[%(#{option}-option)] = ''
end
if (existing_opts = attributes['options'])
attributes['options'] = (options + existing_opts.split(',')) * ','
else
attributes['options'] = options * ','
end
end
end
[parsed_style, original_style]
else
attributes['style'] = raw_style
[raw_style, original_style]
end
end
# Remove the indentation (block offset) shared by all the lines, then
# indent the lines by the specified amount if specified
#
# Trim the leading whitespace (indentation) equivalent to the length
# of the indent on the least indented line. If the indent argument
# is specified, indent the lines by this many spaces (columns).
#
# The purpose of this method is to shift a block of text to
# align to the left margin, while still preserving the relative
# indentation between lines
#
# lines - the Array of String lines to process
# indent - the integer number of spaces to add to the beginning
# of each line; if this value is nil, the existing
# space is preserved (optional, default: 0)
#
# Examples
#
# source = <<EOS
# def names
# @name.split ' ')
# end
# EOS
#
# source.split("\n")
# # => [" def names", " @names.split ' '", " end"]
#
# Parser.reset_block_indent(source.split "\n")
# # => ["def names", " @names.split ' '", "end"]
#
# puts Parser.reset_block_indent(source.split "\n") * "\n"
# # => def names
# # => @names.split ' '
# # => end
#
# returns the Array of String lines with block offset removed
#--
# FIXME refactor gsub matchers into compiled regex
def self.reset_block_indent!(lines, indent = 0)
return if !indent || lines.empty?
tab_detected = false
# TODO make tab size configurable
tab_expansion = ' '
# strip leading block indent
offsets = lines.map do |line|
# break if the first char is non-whitespace
break [] unless line.chr.lstrip.empty?
if line.include? TAB
tab_detected = true
line = line.gsub(TAB_PATTERN, tab_expansion)
end
if (flush_line = line.lstrip).empty?
nil
elsif (offset = line.length - flush_line.length) == 0
break []
else
offset
end
end
unless offsets.empty? || (offsets = offsets.compact).empty?
if (offset = offsets.min) > 0
lines.map! {|line|
line = line.gsub(TAB_PATTERN, tab_expansion) if tab_detected
line[offset..-1].to_s
}
end
end
if indent > 0
padding = ' ' * indent
lines.map! {|line| %(#{padding}#{line}) }
end
nil
end
# Public: Convert a string to a legal attribute name.
#
# name - the String name of the attribute
#
# Returns a String with the legal AsciiDoc attribute name.
#
# Examples
#
# sanitize_attribute_name('Foo Bar')
# => 'foobar'
#
# sanitize_attribute_name('foo')
# => 'foo'
#
# sanitize_attribute_name('Foo 3 #-Billy')
# => 'foo3-billy'
def self.sanitize_attribute_name(name)
name.gsub(InvalidAttributeNameCharsRx, '').downcase
end
# Internal: Converts a Roman numeral to an integer value.
#
# value - The String Roman numeral to convert
#
# Returns the Integer for this Roman numeral
def self.roman_numeral_to_int(value)
value = value.downcase
digits = { 'i' => 1, 'v' => 5, 'x' => 10 }
result = 0
(0..value.length - 1).each {|i|
digit = digits[value[i..i]]
if i + 1 < value.length && digits[value[i+1..i+1]] > digit
result -= digit
else
result += digit
end
}
result
end
end
end
| 1 | 4,867 | I'd like to avoid a method call here atm since this is an extremely hot line in the code. | asciidoctor-asciidoctor | rb |
@@ -421,8 +421,7 @@ def parse_input(vds):
def parse_witness(vds):
n = vds.read_compact_size()
- for i in range(n):
- x = vds.read_bytes(vds.read_compact_size())
+ return list(vds.read_bytes(vds.read_compact_size()).encode('hex') for i in xrange(n))
def parse_output(vds, i):
d = {} | 1 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from . import bitcoin
from .bitcoin import *
from .util import print_error, profiler, to_string
from . import bitcoin
from .bitcoin import *
import time
import sys
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
from six import StringIO
import random
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
class EnumException(Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if isinstance(x, tuple):
x, i = x
if not isinstance(x, str):
raise EnumException("enum name is not a string: " + x)
if not isinstance(i, int):
raise EnumException("enum value is not an integer: " + i)
if x in uniqueNames:
raise EnumException("enum name is not unique: " + x)
if i in uniqueValues:
raise EnumException("enum value is not unique for " + x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(_bytes):
i = 0
while i < len(_bytes):
vch = None
opcode = _bytes[i]
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = _bytes[i]
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', _bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', _bytes, i)
i += 4
vch = _bytes[i:i + nSize]
i += nSize
yield opcode, vch, i
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = [ x for x in script_GetOp(_bytes) ]
except Exception as e:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bh2u(_bytes))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == 0:
redeemScript = bh2u(item)
d['address'] = bitcoin.hash160_to_p2sh(bitcoin.hash_160(item))
d['type'] = 'p2wpkh-p2sh'
d['redeemScript'] = redeemScript
d['x_pubkeys'] = ["(witness)"]
d['pubkeys'] = ["(witness)"]
d['signatures'] = ['(witness)']
d['num_sig'] = 1
else:
# payto_pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("cannot find address in input script", bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = multisig_script(pubkeys, m)
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash160_to_p2sh(hash_160(bfh(redeemScript)))
def get_address_from_output_script(_bytes):
decoded = [x for x in script_GetOp(_bytes)]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_PUBKEY, bh2u(decoded[0][1])
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1])
return TYPE_SCRIPT, bh2u(_bytes)
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['scriptSig'] = bh2u(scriptSig)
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_witness(vds):
n = vds.read_compact_size()
for i in range(n):
x = vds.read_bytes(vds.read_compact_size())
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
assert marker == 1
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds,i) for i in range(n_vout)]
if is_segwit:
d['witness'] = [parse_witness(vds) for i in range(n_vin)]
d['lockTime'] = vds.read_uint32()
return d
# pay & redeem scripts
def push_script(x):
return op_push(len(x)//2) + x
def get_scriptPubKey(addr):
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == bitcoin.ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == bitcoin.ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def segwit_script(pubkey):
pubkey = safe_parse_pubkey(pubkey)
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(bfh(sig[:-2]), order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, pre_hash, curve = SECP256k1)
pubkey = bh2u(point_to_ser(public_key.pubkey.point, compressed))
if pubkey in pubkeys:
public_key.verify_digest(sig_string, pre_hash, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self._inputs[i]['signatures'][j] = sig
#self._inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
return self
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return bh2u(addr)
elif output_type == TYPE_ADDRESS:
return get_scriptPubKey(addr)
else:
raise TypeError('Unknown output type')
return script
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
# we assume that signature will be 0x48 bytes long
pk_list = [ "00" * 0x21 ] * num_sig
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin):
pubkeys, sig_list = self.get_siglist(txin)
n = len(pubkeys) + len(sig_list)
return var_int(n) + ''.join(push_script(x) for x in sig_list) + ''.join(push_script(x) for x in pubkeys)
@classmethod
def is_segwit_input(self, txin):
return txin['type'] in ['p2wpkh-p2sh']
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'p2wpkh-p2sh':
redeem_script = txin.get('redeemScript') or segwit_script(pubkeys[0])
return push_script(redeem_script)
elif _type == 'address':
script += push_script(pubkeys[0])
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def get_preimage_script(self, txin):
# only for non-segwit
if txin['type'] == 'p2pkh':
return get_scriptPubKey(txin['address'])
elif txin['type'] == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] == 'p2wpkh-p2sh':
pubkey = txin['pubkeys'][0]
pkh = bh2u(bitcoin.hash_160(bfh(pubkey)))
return '76a9' + push_script(pkh) + '88ac'
else:
raise TypeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 1)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(output_type, addr)
s += var_int(len(script)//2)
s += script
return s
def serialize_preimage(self, i):
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
# TODO: py3 hex
if self.is_segwit_input(txin):
hashPrevouts = bh2u(Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(Hash(bfh(''.join(self.serialize_output(o) for o in outputs))))
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script)/2) + preimage_script
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if i==k else '') for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self):
return any(self.is_segwit_input(x) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
if witness and self.is_segwit():
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def hash(self):
print("warning: deprecated tx.hash()")
return self.txid()
def txid(self):
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize(witness=False)
return bh2u(Hash(bfh(ser))[::-1])
def wtxid(self):
ser = self.serialize(witness=True)
return bh2u(Hash(bfh(ser))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1 for x in self.inputs()])
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None else len(self.raw) / 2 # ASCII hex string
@classmethod
def estimated_input_size(self, txin):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True)
return len(self.serialize_input(txin, script)) // 2
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, x_pubkey in enumerate(x_pubkeys):
signatures = list(filter(None, txin['signatures']))
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
sec = keypairs.get(x_pubkey)
pubkey = public_key_from_private_key(sec)
# add signature
pre_hash = Hash(bfh(self.serialize_preimage(i)))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = bitcoin.MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][j] = bh2u(sig) + '01'
#txin['x_pubkeys'][j] = pubkey
txin['pubkeys'][j] = pubkey # needed for fd keys
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs():
if type == TYPE_ADDRESS:
addr = x
elif type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(x))
else:
addr = 'SCRIPT ' + bh2u(x)
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def requires_fee(self, wallet):
# see https://en.bitcoin.it/wiki/Transaction_fees
#
# size must be smaller than 1 kbyte for free tx
size = len(self.serialize(-1))/2
if size >= 10000:
return True
# all outputs must be 0.01 BTC or larger for free tx
for addr, value in self.get_outputs():
if value < 1000000:
return True
# priority must be large enough for free tx
threshold = 57600000
weight = 0
for txin in self.inputs():
height, conf, timestamp = wallet.get_tx_height(txin["prevout_hash"])
weight += txin["value"] * conf
priority = weight / size
print_error(priority, threshold)
return priority < threshold
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
| 1 | 11,769 | Is this a bugfix? | spesmilo-electrum | py |
@@ -35,7 +35,6 @@ namespace Microsoft.DotNet.Build.Tasks
GlobalPackagesFolder = PackagesDir,
Sources = new List<string>(),
- FallbackSources = new List<string>(),
CacheContext = new SourceCacheContext(),
RequestProviders = new List<IRestoreRequestProvider>
{ | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using Microsoft.Build.Utilities;
using NuGet.Commands;
using NuGet.Protocol.Core.Types;
using System.Collections.Generic;
using System.Linq;
namespace Microsoft.DotNet.Build.Tasks
{
/// <summary>
/// Use NuGet.Commands manually to perform a NuGet restore. This allows us to restore on the
/// desktop framework on Windows, where encryption is available.
///
/// An alternative is to acquire nuget.exe, but it's heavier than NuGet.Commands and is harder
/// to use in the build.
/// </summary>
public partial class EncryptedConfigNuGetRestore : BuildTask
{
public ITaskItem[] Inputs { get; set; }
public string ConfigFile { get; set; }
public string PackagesDir { get; set; }
public override bool Execute()
{
var args = new RestoreArgs
{
Inputs = Inputs.Select(item => item.ItemSpec).ToList(),
ConfigFile = ConfigFile,
GlobalPackagesFolder = PackagesDir,
Sources = new List<string>(),
FallbackSources = new List<string>(),
CacheContext = new SourceCacheContext(),
RequestProviders = new List<IRestoreRequestProvider>
{
new ProjectJsonRestoreRequestProvider(new RestoreCommandProvidersCache())
},
Log = new NugetMsBuildLogger(new TaskLoggingHelper(this))
};
RestoreRunner.RunAsync(args).Wait();
return !Log.HasLoggedErrors;
}
}
}
| 1 | 13,781 | Is this a relevant change? | dotnet-buildtools | .cs |
@@ -62,9 +62,9 @@ public final class JUnit5RuleUsage extends BugChecker implements BugChecker.Clas
}
static Matcher<ClassTree> hasVariable(Matcher<VariableTree> matcher) {
- return (classTree, state) -> classTree.getMembers().stream()
- .filter(tree -> tree instanceof VariableTree)
- .anyMatch(tree -> matcher.matches((VariableTree) tree, state));
+ return (classTree, state) ->
+ classTree.getMembers().stream().filter(tree -> tree instanceof VariableTree).anyMatch(tree ->
+ matcher.matches((VariableTree) tree, state));
}
static Matcher<VariableTree> hasAnnotationOnVariable(String annotation) { | 1 | /*
* (c) Copyright 2019 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.errorprone;
import com.google.auto.service.AutoService;
import com.google.errorprone.BugPattern;
import com.google.errorprone.VisitorState;
import com.google.errorprone.bugpatterns.BugChecker;
import com.google.errorprone.matchers.Description;
import com.google.errorprone.matchers.Matcher;
import com.google.errorprone.matchers.Matchers;
import com.google.errorprone.util.ASTHelpers;
import com.sun.source.tree.ClassTree;
import com.sun.source.tree.VariableTree;
import com.sun.tools.javac.code.Symbol;
@AutoService(BugChecker.class)
@BugPattern(
name = "JUnit5RuleUsage",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
severity = BugPattern.SeverityLevel.ERROR,
summary = "Using Rule/ClassRules in Junit5 tests results in the rules silently not executing")
public final class JUnit5RuleUsage extends BugChecker implements BugChecker.ClassTreeMatcher {
private static final String JUNIT4_RULE = "org.junit.Rule";
private static final String JUNIT4_CLASS_RULE = "org.junit.ClassRule";
private static final String JUNIT5_TEST_ANNOTATION = "org.junit.jupiter.api.Test";
private static final String RULE_MIGRATION_SUPPORT =
"org.junit.jupiter.migrationsupport.rules.EnableRuleMigrationSupport";
private static final Matcher<ClassTree> hasMigrationSupport = Matchers.hasAnnotation(RULE_MIGRATION_SUPPORT);
static final Matcher<ClassTree> hasJunit5TestCases =
Matchers.hasMethod(Matchers.hasAnnotationOnAnyOverriddenMethod(JUNIT5_TEST_ANNOTATION));
private static final Matcher<ClassTree> hasJunit4Rules = hasVariable(
Matchers.anyOf(hasAnnotationOnVariable(JUNIT4_CLASS_RULE), hasAnnotationOnVariable(JUNIT4_RULE)));
@Override
public Description matchClass(ClassTree tree, VisitorState state) {
if (!hasMigrationSupport.matches(tree, state)
&& hasJunit5TestCases.matches(tree, state)
&& hasJunit4Rules.matches(tree, state)) {
return buildDescription(tree)
.setMessage("Do not use Rule/ClassRule with junit-jupiter")
.build();
}
return Description.NO_MATCH;
}
static Matcher<ClassTree> hasVariable(Matcher<VariableTree> matcher) {
return (classTree, state) -> classTree.getMembers().stream()
.filter(tree -> tree instanceof VariableTree)
.anyMatch(tree -> matcher.matches((VariableTree) tree, state));
}
static Matcher<VariableTree> hasAnnotationOnVariable(String annotation) {
return (variableTree, state) -> {
Symbol.VarSymbol sym = ASTHelpers.getSymbol(variableTree);
return sym != null && ASTHelpers.hasAnnotation(sym, annotation, state);
};
}
}
| 1 | 7,910 | @dansanduleac what do you think of this one? dots were aligned and no longer are | palantir-gradle-baseline | java |
@@ -738,10 +738,6 @@ function deleteAllAppData(appId, fromAppDelete, params, app) {
}, deleteEvents);
}
});
-
- if (fromAppDelete) {
- common.db.collection('graph_notes').remove({'_id': common.db.ObjectID(appId)}, function() {});
- }
}
/** | 1 | /**
* This module is meant for managing apps
* @module api/parts/mgmt/apps
*/
/** @lends module:api/parts/mgmt/apps */
var appsApi = {},
common = require('./../../utils/common.js'),
log = common.log('mgmt:apps'),
moment = require('moment-timezone'),
crypto = require('crypto'),
plugins = require('../../../plugins/pluginManager.js'),
jimp = require('jimp'),
fs = require('fs'),
countlyFs = require('./../../utils/countlyFs.js');
/**
* Get all apps and outputs to browser, requires global admin permission
* @param {params} params - params object
* @returns {boolean} true if got data from db, false if did not
**/
appsApi.getAllApps = function(params) {
if (!(params.member.global_admin)) {
common.returnMessage(params, 401, 'User is not a global administrator');
return false;
}
common.db.collection('apps').find({}).toArray(function(err, apps) {
if (!apps || err) {
common.returnOutput(params, {
admin_of: {},
user_of: {}
});
return false;
}
var appsObj = packApps(apps);
common.returnOutput(params, {
admin_of: appsObj,
user_of: appsObj
});
return true;
});
return true;
};
/**
* Get only apps that current user has access to and outputs to browser
* @param {params} params - params object
* @returns {boolean} true if got data from db, false if did not
**/
appsApi.getCurrentUserApps = function(params) {
if (params.member.global_admin) {
appsApi.getAllApps(params);
return true;
}
var adminOfAppIds = [],
userOfAppIds = [];
if (params.member.admin_of) {
for (let i = 0; i < params.member.admin_of.length ;i++) {
if (params.member.admin_of[i] === "") {
continue;
}
adminOfAppIds[adminOfAppIds.length] = common.db.ObjectID(params.member.admin_of[i]);
}
}
if (params.member.user_of) {
for (let i = 0; i < params.member.user_of.length ;i++) {
userOfAppIds[userOfAppIds.length] = common.db.ObjectID(params.member.user_of[i]);
}
}
common.db.collection('apps').find({ _id: { '$in': adminOfAppIds } }).toArray(function(err, admin_of) {
common.db.collection('apps').find({ _id: { '$in': userOfAppIds } }).toArray(function(err2, user_of) {
common.returnOutput(params, {
admin_of: packApps(admin_of),
user_of: packApps(user_of)
});
});
});
return true;
};
/**
* Gets app details for specific app and outputs to browser
* @param {params} params - params object
* @returns {boolean} true if got data from db, false if did not
**/
appsApi.getAppsDetails = function(params) {
if (params.app.owner) {
params.app.owner_id = params.app.owner;
params.app.owner = common.db.ObjectID(params.app.owner + "");
}
common.db.collection('app_users' + params.qstring.app_id).find({}, {
ls: 1,
_id: 0
}).sort({ls: -1}).limit(1).toArray(function(err, last) {
common.db.collection('members').findOne({ _id: params.app.owner }, {
full_name: 1,
username: 1
}, function(err2, owner) {
if (owner) {
if (owner.full_name && owner.full_name !== "") {
params.app.owner = owner.full_name;
}
else if (owner.username && owner.username !== "") {
params.app.owner = owner.username;
}
else {
params.app.owner = "";
}
}
else {
params.app.owner = "";
}
common.db.collection('members').find({ global_admin: true }, {
full_name: 1,
username: 1
}).toArray(function(err3, global_admins) {
common.db.collection('members').find({ admin_of: params.qstring.app_id }, {
full_name: 1,
username: 1
}).toArray(function(err4, admins) {
common.db.collection('members').find({ user_of: params.qstring.app_id }, {
full_name: 1,
username: 1
}).toArray(function(err5, users) {
common.returnOutput(params, {
app: {
owner: params.app.owner || "",
owner_id: params.app.owner_id || "",
created_at: params.app.created_at || 0,
edited_at: params.app.edited_at || 0,
last_data: (typeof last !== "undefined" && last.length) ? last[0].ls : 0,
},
global_admin: global_admins || [],
admin: admins || [],
user: users || []
});
});
});
});
});
});
return true;
};
/**
* upload app icon function
* @param {params} params - params object with args to create app
* @return {object} return promise object;
**/
const iconUpload = function(params) {
const appId = params.app_id || params.qstring.args.app_id;
if (params.files && params.files.app_image) {
const tmp_path = params.files.app_image.path,
target_path = __dirname + '/../../../frontend/express/public/appimages/' + appId + ".png",
type = params.files.app_image.type;
if (type !== "image/png" && type !== "image/gif" && type !== "image/jpeg") {
fs.unlink(tmp_path, function() {});
log.d("Invalid file type");
return Promise.reject();
}
try {
return jimp.read(tmp_path, function(err, icon) {
if (err) {
log.e(err, err.stack);
}
icon.cover(72, 72).getBuffer(jimp.MIME_PNG, function(err2, buffer) {
countlyFs.saveData("appimages", target_path, buffer, {id: appId + ".png", writeMode: "overwrite"}, function(err3) {
if (err3) {
log.e(err3, err3.stack);
}
fs.unlink(tmp_path, function() {});
});
});
});
}
catch (e) {
log.e(e.stack);
}
}
};
/**
* Creates new app, and outputs result to browser
* @param {params} params - params object with args to create app
* @returns {boolean} true if operation successful
**/
appsApi.createApp = function(params) {
var argProps = {
'name': {
'required': true,
'type': 'String'
},
'country': {
'required': false,
'type': 'String'
},
'type': {
'required': false,
'type': 'String'
},
'category': {
'required': false,
'type': 'String'
},
'timezone': {
'required': false,
'type': 'String'
},
'checksum_salt': {
'required': false,
'type': 'String'
}
},
newApp = {};
var createAppValidation = common.validateArgs(params.qstring.args, argProps, true);
if (!(newApp = createAppValidation.obj)) {
common.returnMessage(params, 400, 'Error: ' + createAppValidation.errors);
return false;
}
for (let i in params.qstring.args) {
if (typeof newApp[i] === "undefined") {
newApp[i] = params.qstring.args[i];
}
}
processAppProps(newApp);
newApp.created_at = Math.floor(((new Date()).getTime()) / 1000);
newApp.edited_at = newApp.created_at;
newApp.owner = params.member._id + "";
newApp.seq = 0;
common.db.collection('apps').insert(newApp, function(err, app) {
if (!err && app && app.ops && app.ops[0] && app.ops[0]._id) {
var appKey = common.sha1Hash(app.ops[0]._id, true);
common.db.collection('apps').update({'_id': app.ops[0]._id}, {$set: {key: appKey}}, function() {});
newApp._id = app.ops[0]._id;
newApp.key = appKey;
common.db.collection('app_users' + app.ops[0]._id).ensureIndex({ls: -1}, { background: true }, function() {});
common.db.collection('app_users' + app.ops[0]._id).ensureIndex({"uid": 1}, { background: true }, function() {});
common.db.collection('app_users' + app.ops[0]._id).ensureIndex({"sc": 1}, { background: true }, function() {});
common.db.collection('app_users' + app.ops[0]._id).ensureIndex({
"lac": 1,
"ls": 1
}, { background: true }, function() {});
common.db.collection('app_users' + app.ops[0]._id).ensureIndex({"tsd": 1}, { background: true }, function() {});
common.db.collection('app_users' + app.ops[0]._id).ensureIndex({"did": 1}, { background: true }, function() {});
common.db.collection('app_user_merges' + app.ops[0]._id).ensureIndex({cd: 1}, {
expireAfterSeconds: 60 * 60 * 3,
background: true
}, function() {});
common.db.collection('metric_changes' + app.ops[0]._id).ensureIndex({ts: -1}, { background: true }, function() {});
common.db.collection('metric_changes' + app.ops[0]._id).ensureIndex({ts: 1, "cc.o": 1}, { background: true }, function() {});
common.db.collection('metric_changes' + app.ops[0]._id).ensureIndex({uid: 1}, { background: true }, function() {});
plugins.dispatch("/i/apps/create", {
params: params,
appId: app.ops[0]._id,
data: newApp
});
iconUpload(Object.assign({}, params, {app_id: app.ops[0]._id}));
common.returnOutput(params, newApp);
}
else {
common.returnMessage(params, 500, "Error creating App: " + err);
}
});
};
/**
* Updates existing app, and outputs result to browser
* @param {params} params - params object with args to update app with
* @returns {boolean} true if operation successful
**/
appsApi.updateApp = function(params) {
var argProps = {
'app_id': {
'required': true,
'type': 'String',
'min-length': 24,
'max-length': 24,
'exclude-from-ret-obj': true
},
'name': {
'required': false,
'type': 'String'
},
'type': {
'required': false,
'type': 'String'
},
'category': {
'required': false,
'type': 'String'
},
'key': {
'required': false,
'type': 'String'
},
'timezone': {
'required': false,
'type': 'String'
},
'country': {
'required': false,
'type': 'String'
},
'checksum_salt': {
'required': false,
'type': 'String'
},
'locked': {
'required': false,
'type': 'Boolean'
}
},
updatedApp = {};
var updateAppValidation = common.validateArgs(params.qstring.args, argProps, true);
if (!(updatedApp = updateAppValidation.obj)) {
common.returnMessage(params, 400, 'Error: ' + updateAppValidation.errors);
return false;
}
var invalidProps = validateAppUpdateProps(updatedApp);
if (invalidProps.length > 0) {
common.returnMessage(params, 400, 'Invalid props: ' + invalidProps);
return false;
}
for (var i in params.qstring.args) {
if (typeof updatedApp[i] === "undefined" && i !== "app_id") {
updatedApp[i] = params.qstring.args[i];
}
}
if (Object.keys(updatedApp).length === 0) {
common.returnMessage(params, 200, 'Nothing changed');
return true;
}
updatedApp.edited_at = Math.floor(((new Date()).getTime()) / 1000);
common.db.collection('apps').findOne(common.db.ObjectID(params.qstring.args.app_id), function(err, appBefore) {
if (err || !appBefore) {
common.returnMessage(params, 404, 'App not found');
}
else {
if (params.member && params.member.global_admin) {
common.db.collection('apps').update({'_id': common.db.ObjectID(params.qstring.args.app_id)}, {$set: updatedApp}, function() {
plugins.dispatch("/i/apps/update", {
params: params,
appId: params.qstring.args.app_id,
data: {
app: appBefore,
update: updatedApp
}
});
iconUpload(params);
common.returnOutput(params, updatedApp);
});
}
else {
common.db.collection('members').findOne({'_id': params.member._id}, {admin_of: 1}, function(err2, member) {
if (member.admin_of && member.admin_of.indexOf(params.qstring.args.app_id) !== -1) {
common.db.collection('apps').update({'_id': common.db.ObjectID(params.qstring.args.app_id)}, {$set: updatedApp}, function() {
plugins.dispatch("/i/apps/update", {
params: params,
appId: params.qstring.args.app_id,
data: {
app: appBefore,
update: updatedApp
}
});
iconUpload(params);
common.returnOutput(params, updatedApp);
});
}
else {
common.returnMessage(params, 401, 'User does not have admin rights for this app');
}
});
}
}
});
return true;
};
/**
* Updates existing app's configurations and outputs result to browser
* @param {params} params - params object with args to update app with
* @returns {boolean} true if operation successful
**/
appsApi.updateAppPlugins = function(params) {
var props = {
'app_id': {
'required': true,
'type': 'String',
'min-length': 24,
'max-length': 24,
'exclude-from-ret-obj': true
},
};
log.d('Updating plugin config for app %s: %j', params.qstring.app_id, params.qstring.args);
var updateAppPluginsValidation = common.validateArgs(params.qstring, props, true);
if (!updateAppPluginsValidation.result) {
common.returnMessage(params, 400, 'Error: ' + updateAppPluginsValidation.errors);
return false;
}
common.db.collection('apps').findOne(common.db.ObjectID(params.qstring.app_id), (err1, app) => {
if (err1 || !app) {
log.w('App %s not found %j', params.qstring.app_id, err1 || '');
return common.returnMessage(params, 404, 'App not found');
}
let promises = [];
Object.keys(params.qstring.args).forEach(k => {
if (plugins.getPlugins().indexOf(k) !== -1) {
promises.push(new Promise((resolve, reject) => {
plugins.dispatch('/i/apps/update/plugins/' + k, {
params: params,
app: app,
config: params.qstring.args[k]
}, (err2, changes) => {
if (err2) {
reject(err2);
}
else if (changes) {
resolve({[k]: changes});
}
else {
log.d('Updating %s plugin config for app %s in db: %j', k, params.qstring.app_id, params.qstring.args[k]);
common.dbPromise('apps', 'updateOne', {_id: app._id}, {$set: {[`plugins.${k}`]: params.qstring.args[k]}}).then(() => {
plugins.dispatch('/systemlogs', {
params: params,
action: `plugin_${k}_config_updated`,
data: {
before: common.dot(app, `plugins.${k}` || {}),
after: params.qstring.args[k]
}
});
resolve({[k]: params.qstring.args[k]});
}, reject);
}
});
}));
}
//for plugins sections we might not have plugin
else {
promises.push(new Promise((resolve, reject) => {
log.d('Updating %s plugin config for app %s in db: %j', k, params.qstring.app_id, params.qstring.args[k]);
common.dbPromise('apps', 'updateOne', {_id: app._id}, {$set: {[`plugins.${k}`]: params.qstring.args[k]}}).then(() => {
plugins.dispatch('/systemlogs', {
params: params,
action: `plugin_${k}_config_updated`,
data: {
before: common.dot(app, `plugins.${k}` || {}),
after: params.qstring.args[k]
}
});
resolve({[k]: params.qstring.args[k]});
}, reject);
}));
}
});
if (promises.length) {
Promise.all(promises).then(results => {
log.d('Plugin config updates for app %s returned %j', params.qstring.app_id, results);
let ret = {}, errors = [];
results.forEach(r => {
let plugin = Object.keys(r)[0],
config = Array.isArray(r[plugin]) ? r[plugin][0] : r[plugin];
log.d('Result for %s is %j', plugin, config);
if (typeof config === 'object') {
Object.assign(ret, {[plugin]: config});
}
else {
errors.push(config);
}
});
ret = {
_id: app._id,
plugins: ret
};
if (errors.length) {
ret.result = errors.join('\n');
}
common.returnOutput(params, ret);
}, err => {
log.e('Error during plugin config updates for app %s: %j %s, %d', params.qstring.app_id, err, typeof err, err.length);
common.returnMessage(params, 400, 'Couldn\'t update plugin: ' + (typeof err === 'string' ? err : err.message || err.code || JSON.stringify(err)));
});
}
else {
common.returnMessage(params, 200, 'Nothing changed');
}
});
return true;
};
/**
* Deletes existing app's and outputs result to browser
* @param {params} params - params object with app_id to delete
* @returns {boolean} true if operation successful
**/
appsApi.deleteApp = function(params) {
var argProps = {
'app_id': {
'required': true,
'type': 'String',
'min-length': 24,
'max-length': 24
}
},
appId = '';
var deleteAppValidation = common.validateArgs(params.qstring.args, argProps, true);
if (!(deleteAppValidation.obj && (appId = deleteAppValidation.obj.app_id))) {
common.returnMessage(params, 400, 'Error: ' + deleteAppValidation.errors);
return false;
}
common.db.collection('apps').findOne({'_id': common.db.ObjectID(appId)}, function(err, app) {
if (!err && app) {
if (app.locked) {
common.returnMessage(params, 403, 'Application is locked');
}
else if (params.member && params.member.global_admin) {
removeApp(app);
}
else {
common.db.collection('members').findOne({'_id': params.member._id}, {admin_of: 1}, function(err2, member) {
if (member.admin_of && member.admin_of.indexOf(params.qstring.args.app_id) !== -1) {
removeApp(app);
}
else {
common.returnMessage(params, 401, 'User does not have admin rights for this app');
}
});
}
}
else {
common.returnMessage(params, 500, 'Error deleting app');
}
});
/**
* Removes the app after validation of params and calls deleteAppData
* @param {object} app - app document
**/
function removeApp(app) {
common.db.collection('apps').remove({'_id': common.db.ObjectID(appId)}, {safe: true}, function(err) {
if (err) {
common.returnMessage(params, 500, 'Error deleting app');
return false;
}
var iconPath = __dirname + '/../../../frontend/express/public/appimages/' + appId + '.png';
countlyFs.deleteFile("appimages", iconPath, {id: appId + ".png"}, function() {});
common.db.collection('members').update({}, {
$pull: {
'apps': appId,
'admin_of': appId,
'user_of': appId
}
}, {multi: true}, function() {});
deleteAppData(appId, true, params, app);
common.returnMessage(params, 200, 'Success');
return true;
});
}
return true;
};
/**
* Resets app to clean state
* @param {params} params - params object with app_id to reset
* @returns {boolean} true if operation successful
**/
appsApi.resetApp = function(params) {
var argProps = {
'app_id': {
'required': true,
'type': 'String',
'min-length': 24,
'max-length': 24
}
},
appId = '';
var resetAppValidation = common.validateArgs(params.qstring.args, argProps, true);
if (!(resetAppValidation.obj && (appId = resetAppValidation.obj.app_id))) {
common.returnMessage(params, 400, 'Error: ' + resetAppValidation.errors);
return false;
}
common.db.collection('apps').findOne({'_id': common.db.ObjectID(appId)}, function(err, app) {
if (!err && app) {
if (app.locked) {
common.returnMessage(params, 403, 'Application is locked');
}
else if (params.member.global_admin) {
deleteAppData(appId, false, params, app);
common.returnMessage(params, 200, 'Success');
}
else {
common.db.collection('members').findOne({
admin_of: appId,
api_key: params.member.api_key
}, function(err2, member) {
if (!err2 && member) {
deleteAppData(appId, false, params, app);
common.returnMessage(params, 200, 'Success');
}
else {
common.returnMessage(params, 401, 'User does not have admin rights for this app');
}
});
}
}
else {
common.returnMessage(params, 404, 'App not found');
}
});
return true;
};
/**
* Deletes app's data, either all or for specific period, as well as can reset data to clean state
* @param {string} appId - id of the app for which to delete data
* @param {boolean} fromAppDelete - true if all document will also be deleted
* @param {params} params - params object
* @param {object} app - app document
**/
function deleteAppData(appId, fromAppDelete, params, app) {
if (fromAppDelete || !params.qstring.args.period || params.qstring.args.period === "all" || params.qstring.args.period === "reset") {
deleteAllAppData(appId, fromAppDelete, params, app);
}
else {
deletePeriodAppData(appId, fromAppDelete, params, app);
}
}
/**
* Deletes all app's data or resets data to clean state
* @param {string} appId - id of the app for which to delete data
* @param {boolean} fromAppDelete - true if all document will also be deleted
* @param {params} params - params object
* @param {object} app - app document
**/
function deleteAllAppData(appId, fromAppDelete, params, app) {
if (!fromAppDelete) {
common.db.collection('apps').update({'_id': common.db.ObjectID(appId)}, {$set: {seq: 0}}, function() {});
}
common.db.collection('users').remove({'_id': {$regex: appId + ".*"}}, function() {});
common.db.collection('carriers').remove({'_id': {$regex: appId + ".*"}}, function() {});
common.db.collection('devices').remove({'_id': {$regex: appId + ".*"}}, function() {});
common.db.collection('device_details').remove({'_id': {$regex: appId + ".*"}}, function() {});
common.db.collection('cities').remove({'_id': {$regex: appId + ".*"}}, function() {});
/**
* Deletes all app's events
**/
function deleteEvents() {
common.db.collection('events').findOne({'_id': common.db.ObjectID(appId)}, function(err, events) {
if (!err && events && events.list) {
common.arrayAddUniq(events.list, plugins.internalEvents);
for (var i = 0; i < events.list.length; i++) {
var collectionNameWoPrefix = crypto.createHash('sha1').update(events.list[i] + appId).digest('hex');
common.db.collection("events" + collectionNameWoPrefix).drop(function() {});
}
if (fromAppDelete || params.qstring.args.period === "reset") {
common.db.collection('events').remove({'_id': common.db.ObjectID(appId)}, function() {});
}
}
});
}
common.db.collection('app_users' + appId).drop(function() {
if (!fromAppDelete) {
common.db.collection('metric_changes' + appId).drop(function() {
common.db.collection('metric_changes' + appId).ensureIndex({ts: -1}, { background: true }, function() {});
common.db.collection('metric_changes' + appId).ensureIndex({ts: 1, "cc.o": 1}, { background: true }, function() {});
common.db.collection('metric_changes' + appId).ensureIndex({uid: 1}, { background: true }, function() {});
});
common.db.collection('app_user_merges' + appId).drop(function() {
common.db.collection('app_user_merges' + appId).ensureIndex({cd: 1}, {
expireAfterSeconds: 60 * 60 * 3,
background: true
}, function() {});
});
if (params.qstring.args.period === "reset") {
plugins.dispatch("/i/apps/reset", {
params: params,
appId: appId,
data: app
}, deleteEvents);
}
else {
plugins.dispatch("/i/apps/clear_all", {
params: params,
appId: appId,
data: app
}, deleteEvents);
}
}
else {
common.db.collection('metric_changes' + appId).drop(function() {});
common.db.collection('app_user_merges' + appId).drop(function() {});
plugins.dispatch("/i/apps/delete", {
params: params,
appId: appId,
data: app
}, deleteEvents);
}
});
if (fromAppDelete) {
common.db.collection('graph_notes').remove({'_id': common.db.ObjectID(appId)}, function() {});
}
}
/**
* Deletes app's data for specific period
* @param {string} appId - id of the app for which to delete data
* @param {boolean} fromAppDelete - true if all document will also be deleted
* @param {params} params - params object
* @param {object} app - app document
**/
function deletePeriodAppData(appId, fromAppDelete, params, app) {
var periods = {
"1month": 1,
"3month": 3,
"6month": 6,
"1year": 12,
"2year": 24
};
var back = periods[params.qstring.args.period];
var skip = {};
var dates = {};
var now = moment();
skip[appId + "_" + now.format('YYYY:M')] = true;
skip[appId + "_" + now.format('YYYY') + ":0"] = true;
dates[now.format('YYYY:M')] = true;
dates[now.format('YYYY') + ":0"] = true;
for (let i = 0; i < common.base64.length; i++) {
skip[appId + "_" + now.format('YYYY:M') + "_" + common.base64[i]] = true;
skip[appId + "_" + now.format('YYYY') + ":0" + "_" + common.base64[i]] = true;
dates[now.format('YYYY:M') + "_" + common.base64[i]] = true;
dates[now.format('YYYY') + ":0" + "_" + common.base64[i]] = true;
}
for (let i = 0; i < back; i++) {
skip[appId + "_" + now.subtract(1, "months").format('YYYY:M')] = true;
skip[appId + "_" + now.format('YYYY') + ":0"] = true;
dates[now.format('YYYY:M')] = true;
dates[now.format('YYYY') + ":0"] = true;
for (let j = 0; j < common.base64.length; j++) {
skip[appId + "_" + now.format('YYYY:M') + "_" + common.base64[j]] = true;
skip[appId + "_" + now.format('YYYY') + ":0" + "_" + common.base64[j]] = true;
dates[now.format('YYYY:M') + "_" + common.base64[j]] = true;
dates[now.format('YYYY') + ":0" + "_" + common.base64[j]] = true;
}
}
/*
This variable set after the above loop because it already does the necessary subtraction
*/
var oldestTimestampWanted = Math.round(now.valueOf() / 1000);
skip = Object.keys(skip);
dates = Object.keys(dates);
common.db.collection('users').remove({$and: [{'_id': {$regex: appId + ".*"}}, {'_id': {$nin: skip}}]}, function() {});
common.db.collection('carriers').remove({$and: [{'_id': {$regex: appId + ".*"}}, {'_id': {$nin: skip}}]}, function() {});
common.db.collection('devices').remove({$and: [{'_id': {$regex: appId + ".*"}}, {'_id': {$nin: skip}}]}, function() {});
common.db.collection('device_details').remove({$and: [{'_id': {$regex: appId + ".*"}}, {'_id': {$nin: skip}}]}, function() {});
common.db.collection('cities').remove({$and: [{'_id': {$regex: appId + ".*"}}, {'_id': {$nin: skip}}]}, function() {});
common.db.collection('events').findOne({'_id': common.db.ObjectID(appId)}, function(err, events) {
if (!err && events && events.list) {
common.arrayAddUniq(events.list, plugins.internalEvents);
for (let i = 0; i < events.list.length; i++) {
var segments = [];
if (events.list[i] && events.segments && events.segments[events.list[i]]) {
segments = events.segments[events.list[i]];
}
segments.push("no-segment");
var docs = [];
for (let j = 0; j < segments.length; j++) {
for (let k = 0; k < dates.length; k++) {
docs.push(segments[j] + "_" + dates[k]);
}
}
var collectionNameWoPrefix = crypto.createHash('sha1').update(events.list[i] + appId).digest('hex');
common.db.collection("events" + collectionNameWoPrefix).remove({'_id': {$nin: docs}}, function() {});
}
}
});
/*
Set ls (last session) timestamp of users who had their last session before oldestTimestampWanted to 1
This prevents these users to be included as "total users" in the reports
*/
common.db.collection('app_users' + appId).update({ls: {$lte: oldestTimestampWanted}}, {$set: {ls: 1}});
/*
Remove all metric changes that happened before oldestTimestampWanted since we no longer need
old metric changes
*/
common.db.collection('metric_changes' + appId).remove({ts: {$lte: oldestTimestampWanted}});
plugins.dispatch("/i/apps/clear", {
params: params,
appId: appId,
data: app,
moment: now,
dates: dates,
ids: skip
});
}
/**
* Converts apps array into object with app_id as key
* @param {array} apps - array of apps documents
* @returns {object} with app_id as key and app doc as value
**/
function packApps(apps) {
var appsObj = {};
for (let i = 0; i < apps.length ;i++) {
appsObj[apps[i]._id] = {
'_id': apps[i]._id,
'category': apps[i].category,
'country': apps[i].country,
'key': apps[i].key,
'name': apps[i].name,
'timezone': apps[i].timezone
};
}
return appsObj;
}
/**
* Validate and correct app's properties, by modifying original object
* @param {object} app - app document
**/
function processAppProps(app) {
if (!app.country || !isValidCountry(app.country)) {
app.country = plugins.getConfig("apps").country;
}
if (!app.timezone || !isValidTimezone(app.timezone)) {
app.timezone = plugins.getConfig("apps").timezone;
}
if (!app.category || !isValidCategory(app.category)) {
app.category = plugins.getConfig("apps").category;
}
if (!app.type || !isValidType(app.type)) {
app.type = "mobile";
}
}
/**
* Validate and correct an app update's properties, replacing invalid
* values with defaults
* @param {object} app - app update document
* @returns {array} invalidProps - keys of invalid properties
**/
function validateAppUpdateProps(app) {
const invalidProps = [];
if (app.country && !isValidCountry(app.country)) {
invalidProps.push("country");
}
if (app.timezone && !isValidTimezone(app.timezone)) {
invalidProps.push("timezone");
}
if (app.category && !isValidCategory(app.category)) {
invalidProps.push("category");
}
if (app.type && !isValidType(app.type)) {
invalidProps.push("type");
}
return invalidProps;
}
/**
* Validate timezone
* @param {string} timezone - timezone value
* @returns {boolean} if timezone was valid or not
**/
function isValidTimezone(timezone) {
var timezones = ["Africa/Abidjan", "Africa/Accra", "Africa/Addis_Ababa", "Africa/Algiers", "Africa/Asmera", "Africa/Bamako", "Africa/Bangui", "Africa/Banjul", "Africa/Bissau", "Africa/Blantyre", "Africa/Brazzaville", "Africa/Bujumbura", "Africa/Cairo", "Africa/Casablanca", "Africa/Ceuta", "Africa/Conakry", "Africa/Dakar", "Africa/Dar_es_Salaam", "Africa/Djibouti", "Africa/Douala", "Africa/El_Aaiun", "Africa/Freetown", "Africa/Gaborone", "Africa/Harare", "Africa/Johannesburg", "Africa/Kampala", "Africa/Khartoum", "Africa/Kigali", "Africa/Kinshasa", "Africa/Lagos", "Africa/Libreville", "Africa/Lome", "Africa/Luanda", "Africa/Lubumbashi", "Africa/Lusaka", "Africa/Malabo", "Africa/Maputo", "Africa/Maseru", "Africa/Mbabane", "Africa/Mogadishu", "Africa/Monrovia", "Africa/Nairobi", "Africa/Ndjamena", "Africa/Niamey", "Africa/Nouakchott", "Africa/Ouagadougou", "Africa/Porto-Novo", "Africa/Sao_Tome", "Africa/Tripoli", "Africa/Tunis", "Africa/Windhoek", "America/Anchorage", "America/Anguilla", "America/Antigua", "America/Araguaina", "America/Aruba", "America/Asuncion", "America/Bahia", "America/Barbados", "America/Belem", "America/Belize", "America/Boa_Vista", "America/Bogota", "America/Buenos_Aires", "America/Campo_Grande", "America/Caracas", "America/Cayenne", "America/Cayman", "America/Chicago", "America/Costa_Rica", "America/Cuiaba", "America/Curacao", "America/Danmarkshavn", "America/Dawson_Creek", "America/Denver", "America/Dominica", "America/Edmonton", "America/El_Salvador", "America/Fortaleza", "America/Godthab", "America/Grand_Turk", "America/Grenada", "America/Guadeloupe", "America/Guatemala", "America/Guayaquil", "America/Guyana", "America/Halifax", "America/Havana", "America/Hermosillo", "America/Iqaluit", "America/Jamaica", "America/La_Paz", "America/Lima", "America/Los_Angeles", "America/Maceio", "America/Managua", "America/Manaus", "America/Martinique", "America/Mazatlan", "America/Mexico_City", "America/Miquelon", "America/Montevideo", "America/Montreal", "America/Montserrat", "America/Nassau", "America/New_York", "America/Noronha", "America/Panama", "America/Paramaribo", "America/Phoenix", "America/Port-au-Prince", "America/Port_of_Spain", "America/Porto_Velho", "America/Puerto_Rico", "America/Recife", "America/Regina", "America/Rio_Branco", "America/Santiago", "America/Santo_Domingo", "America/Sao_Paulo", "America/Scoresbysund", "America/St_Johns", "America/St_Kitts", "America/St_Lucia", "America/St_Thomas", "America/St_Vincent", "America/Tegucigalpa", "America/Thule", "America/Tijuana", "America/Toronto", "America/Tortola", "America/Vancouver", "America/Whitehorse", "America/Winnipeg", "America/Yellowknife", "Antarctica/Casey", "Antarctica/Davis", "Antarctica/DumontDUrville", "Antarctica/Mawson", "Antarctica/Palmer", "Antarctica/Rothera", "Antarctica/Syowa", "Antarctica/Vostok", "Arctic/Longyearbyen", "Asia/Aden", "Asia/Almaty", "Asia/Amman", "Asia/Aqtau", "Asia/Aqtobe", "Asia/Ashgabat", "Asia/Baghdad", "Asia/Bahrain", "Asia/Baku", "Asia/Bangkok", "Asia/Beirut", "Asia/Bishkek", "Asia/Brunei", "Asia/Calcutta", "Asia/Choibalsan", "Asia/Colombo", "Asia/Damascus", "Asia/Dhaka", "Asia/Dili", "Asia/Dubai", "Asia/Dushanbe", "Asia/Gaza", "Asia/Hong_Kong", "Asia/Hovd", "Asia/Irkutsk", "Asia/Jakarta", "Asia/Jayapura", "Asia/Jerusalem", "Asia/Kabul", "Asia/Kamchatka", "Asia/Karachi", "Asia/Katmandu", "Asia/Krasnoyarsk", "Asia/Kuala_Lumpur", "Asia/Kuwait", "Asia/Macau", "Asia/Magadan", "Asia/Makassar", "Asia/Manila", "Asia/Muscat", "Asia/Nicosia", "Asia/Omsk", "Asia/Phnom_Penh", "Asia/Pyongyang", "Asia/Qatar", "Asia/Rangoon", "Asia/Riyadh", "Asia/Saigon", "Asia/Seoul", "Asia/Shanghai", "Asia/Singapore", "Asia/Taipei", "Asia/Tashkent", "Asia/Tbilisi", "Asia/Tehran", "Asia/Thimphu", "Asia/Tokyo", "Asia/Ulaanbaatar", "Asia/Vientiane", "Asia/Vladivostok", "Asia/Yakutsk", "Asia/Yekaterinburg", "Asia/Yerevan", "Atlantic/Azores", "Atlantic/Bermuda", "Atlantic/Canary", "Atlantic/Cape_Verde", "Atlantic/Faeroe", "Atlantic/Reykjavik", "Atlantic/South_Georgia", "Atlantic/St_Helena", "Atlantic/Stanley", "Australia/Adelaide", "Australia/Brisbane", "Australia/Darwin", "Australia/Hobart", "Australia/Perth", "Australia/Sydney", "Etc/GMT", "Europe/Amsterdam", "Europe/Andorra", "Europe/Athens", "Europe/Belgrade", "Europe/Berlin", "Europe/Bratislava", "Europe/Brussels", "Europe/Bucharest", "Europe/Budapest", "Europe/Chisinau", "Europe/Copenhagen", "Europe/Dublin", "Europe/Gibraltar", "Europe/Helsinki", "Europe/Istanbul", "Europe/Kaliningrad", "Europe/Kiev", "Europe/Lisbon", "Europe/Ljubljana", "Europe/London", "Europe/Luxembourg", "Europe/Madrid", "Europe/Malta", "Europe/Minsk", "Europe/Monaco", "Europe/Moscow", "Europe/Oslo", "Europe/Paris", "Europe/Prague", "Europe/Riga", "Europe/Rome", "Europe/Samara", "Europe/San_Marino", "Europe/Sarajevo", "Europe/Skopje", "Europe/Sofia", "Europe/Stockholm", "Europe/Tallinn", "Europe/Tirane", "Europe/Vaduz", "Europe/Vatican", "Europe/Vienna", "Europe/Vilnius", "Europe/Warsaw", "Europe/Zagreb", "Europe/Zurich", "Indian/Antananarivo", "Indian/Chagos", "Indian/Christmas", "Indian/Cocos", "Indian/Comoro", "Indian/Kerguelen", "Indian/Mahe", "Indian/Maldives", "Indian/Mauritius", "Indian/Mayotte", "Indian/Reunion", "Pacific/Apia", "Pacific/Auckland", "Pacific/Easter", "Pacific/Efate", "Pacific/Enderbury", "Pacific/Fakaofo", "Pacific/Fiji", "Pacific/Funafuti", "Pacific/Galapagos", "Pacific/Gambier", "Pacific/Guadalcanal", "Pacific/Guam", "Pacific/Honolulu", "Pacific/Johnston", "Pacific/Kiritimati", "Pacific/Kosrae", "Pacific/Kwajalein", "Pacific/Majuro", "Pacific/Marquesas", "Pacific/Midway", "Pacific/Nauru", "Pacific/Niue", "Pacific/Norfolk", "Pacific/Noumea", "Pacific/Pago_Pago", "Pacific/Palau", "Pacific/Pitcairn", "Pacific/Ponape", "Pacific/Port_Moresby", "Pacific/Rarotonga", "Pacific/Saipan", "Pacific/Tahiti", "Pacific/Tarawa", "Pacific/Tongatapu", "Pacific/Truk", "Pacific/Wake", "Pacific/Wallis"];
return timezones.indexOf(timezone) !== -1;
}
/**
* Validate category
* @param {string} category - category value
* @returns {boolean} if category was valid or not
**/
function isValidCategory(category) {
var categories = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20"];
return categories.indexOf(category) !== -1;
}
/**
* Validate app type
* @param {string} type - type value
* @returns {boolean} if type was valid or not
**/
function isValidType(type) {
//check if valid app type and it's plugin is enabled
return plugins.appTypes.indexOf(type) !== -1 && plugins.isPluginEnabled(type);
}
/**
* Validate country
* @param {string} country - country value
* @returns {boolean} if country was valid or not
**/
function isValidCountry(country) {
var countries = ["AF", "AX", "AL", "DZ", "AS", "AD", "AO", "AI", "AQ", "AG", "AR", "AM", "AW", "AU", "AT", "AZ", "BS", "BH", "BD", "BB", "BY", "BE", "BZ", "BJ", "BM", "BT", "BO", "BQ", "BA", "BW", "BV", "BR", "IO", "BN", "BG", "BF", "BI", "KH", "CM", "CA", "CV", "KY", "CF", "TD", "CL", "CN", "CX", "CC", "CO", "KM", "CG", "CD", "CK", "CR", "CI", "HR", "CU", "CW", "CY", "CZ", "DK", "DJ", "DM", "DO", "EC", "EG", "SV", "GQ", "ER", "EE", "ET", "FK", "FO", "FJ", "FI", "FR", "GF", "PF", "TF", "GA", "GM", "GE", "DE", "GH", "GI", "GR", "GL", "GD", "GP", "GU", "GT", "GG", "GN", "GW", "GY", "HT", "HM", "VA", "HN", "HK", "HU", "IS", "IN", "ID", "IR", "IQ", "IE", "IM", "IL", "IT", "JM", "JP", "JE", "JO", "KZ", "KE", "KI", "KP", "KR", "KW", "KG", "LA", "LV", "LB", "LS", "LR", "LY", "LI", "LT", "LU", "MO", "MK", "MG", "MW", "MY", "MV", "ML", "MT", "MH", "MQ", "MR", "MU", "YT", "MX", "FM", "MD", "MC", "MN", "ME", "MS", "MA", "MZ", "MM", "NA", "NR", "NP", "NL", "NC", "NZ", "NI", "NE", "NG", "NU", "NF", "MP", "NO", "OM", "PK", "PW", "PS", "PA", "PG", "PY", "PE", "PH", "PN", "PL", "PT", "PR", "QA", "RE", "RO", "RU", "RW", "BL", "SH", "KN", "LC", "MF", "PM", "VC", "WS", "SM", "ST", "SA", "SN", "RS", "SC", "SL", "SG", "SX", "SK", "SI", "SB", "SO", "ZA", "GS", "SS", "ES", "LK", "SD", "SR", "SJ", "SZ", "SE", "CH", "SY", "TW", "TJ", "TZ", "TH", "TL", "TG", "TK", "TO", "TT", "TN", "TR", "TM", "TC", "TV", "UG", "UA", "AE", "GB", "US", "UM", "UY", "UZ", "VU", "VE", "VN", "VG", "VI", "WF", "EH", "YE", "ZM", "ZW"];
return countries.indexOf(country) !== -1;
}
module.exports = appsApi; | 1 | 13,317 | Why we dont delete app specific notes when app is deleted? | Countly-countly-server | js |
@@ -113,9 +113,17 @@ public class ConfigBuilderPlugin extends PluginBase {
pluginList = MainApp.getPluginsList();
upgradeSettings();
loadSettings();
+ setAlwaysEnabledPluginsEnabled();
MainApp.bus().post(new EventAppInitialized());
}
+ private void setAlwaysEnabledPluginsEnabled() {
+ for (PluginBase plugin : pluginList) {
+ if (plugin.pluginDescription.alwaysEnabled) plugin.setPluginEnabled(plugin.getType(), true);
+ }
+ storeSettings("setAlwaysEnabledPluginsEnabled");
+ }
+
public void storeSettings(String from) {
if (pluginList != null) {
if (Config.logPrefsChange) | 1 | package info.nightscout.androidaps.plugins.ConfigBuilder;
import android.content.Intent;
import android.support.annotation.Nullable;
import com.crashlytics.android.answers.CustomEvent;
import com.squareup.otto.Subscribe;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import info.nightscout.androidaps.BuildConfig;
import info.nightscout.androidaps.Config;
import info.nightscout.androidaps.Constants;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.data.DetailedBolusInfo;
import info.nightscout.androidaps.data.Profile;
import info.nightscout.androidaps.data.ProfileStore;
import info.nightscout.androidaps.data.PumpEnactResult;
import info.nightscout.androidaps.db.CareportalEvent;
import info.nightscout.androidaps.db.ProfileSwitch;
import info.nightscout.androidaps.db.Source;
import info.nightscout.androidaps.db.TemporaryBasal;
import info.nightscout.androidaps.events.EventAppInitialized;
import info.nightscout.androidaps.events.EventNewBasalProfile;
import info.nightscout.androidaps.events.EventProfileSwitchChange;
import info.nightscout.androidaps.interfaces.APSInterface;
import info.nightscout.androidaps.interfaces.BgSourceInterface;
import info.nightscout.androidaps.interfaces.Constraint;
import info.nightscout.androidaps.interfaces.InsulinInterface;
import info.nightscout.androidaps.interfaces.PluginBase;
import info.nightscout.androidaps.interfaces.PluginDescription;
import info.nightscout.androidaps.interfaces.PluginType;
import info.nightscout.androidaps.interfaces.ProfileInterface;
import info.nightscout.androidaps.interfaces.PumpInterface;
import info.nightscout.androidaps.interfaces.SensitivityInterface;
import info.nightscout.androidaps.interfaces.TreatmentsInterface;
import info.nightscout.androidaps.plugins.Insulin.InsulinOrefRapidActingPlugin;
import info.nightscout.androidaps.plugins.Loop.APSResult;
import info.nightscout.androidaps.plugins.Loop.LoopPlugin;
import info.nightscout.androidaps.plugins.Overview.Dialogs.ErrorHelperActivity;
import info.nightscout.androidaps.plugins.PumpVirtual.VirtualPumpPlugin;
import info.nightscout.androidaps.plugins.SensitivityOref0.SensitivityOref0Plugin;
import info.nightscout.androidaps.queue.Callback;
import info.nightscout.androidaps.queue.CommandQueue;
import info.nightscout.utils.FabricPrivacy;
import info.nightscout.utils.NSUpload;
import info.nightscout.utils.SP;
import info.nightscout.utils.ToastUtils;
/**
* Created by mike on 05.08.2016.
*/
public class ConfigBuilderPlugin extends PluginBase {
private static Logger log = LoggerFactory.getLogger(ConfigBuilderPlugin.class);
private static ConfigBuilderPlugin configBuilderPlugin;
static public ConfigBuilderPlugin getPlugin() {
if (configBuilderPlugin == null)
configBuilderPlugin = new ConfigBuilderPlugin();
return configBuilderPlugin;
}
private BgSourceInterface activeBgSource;
private static PumpInterface activePump;
private static ProfileInterface activeProfile;
private static TreatmentsInterface activeTreatments;
private static APSInterface activeAPS;
private static LoopPlugin activeLoop;
private static InsulinInterface activeInsulin;
private static SensitivityInterface activeSensitivity;
static public String nightscoutVersionName = "";
static public Integer nightscoutVersionCode = 0;
static public String nsClientVersionName = "";
static public Integer nsClientVersionCode = 0;
private static ArrayList<PluginBase> pluginList;
private static CommandQueue commandQueue = new CommandQueue();
public ConfigBuilderPlugin() {
super(new PluginDescription()
.mainType(PluginType.GENERAL)
.fragmentClass(ConfigBuilderFragment.class.getName())
.showInList(true)
.alwaysEnabled(true)
.alwayVisible(false)
.pluginName(R.string.configbuilder)
.shortName(R.string.configbuilder_shortname)
.description(R.string.description_config_builder)
);
}
@Override
protected void onStart() {
MainApp.bus().register(this);
super.onStart();
}
@Override
protected void onStop() {
super.onStop();
MainApp.bus().unregister(this);
}
public void initialize() {
pluginList = MainApp.getPluginsList();
upgradeSettings();
loadSettings();
MainApp.bus().post(new EventAppInitialized());
}
public void storeSettings(String from) {
if (pluginList != null) {
if (Config.logPrefsChange)
log.debug("Storing settings from: " + from);
for (PluginBase p : pluginList) {
PluginType type = p.getType();
if (p.pluginDescription.alwaysEnabled && p.pluginDescription.alwayVisible)
continue;
if (p.pluginDescription.alwaysEnabled && p.pluginDescription.neverVisible)
continue;
savePref(p, type, true);
if (type == PluginType.PUMP) {
if (p instanceof ProfileInterface) { // Store state of optional Profile interface
savePref(p, PluginType.PROFILE, false);
}
}
}
verifySelectionInCategories();
}
}
private void savePref(PluginBase p, PluginType type, boolean storeVisible) {
String settingEnabled = "ConfigBuilder_" + type.name() + "_" + p.getClass().getSimpleName() + "_Enabled";
SP.putBoolean(settingEnabled, p.isEnabled(type));
log.debug("Storing: " + settingEnabled + ":" + p.isEnabled(type));
if (storeVisible) {
String settingVisible = "ConfigBuilder_" + type.name() + "_" + p.getClass().getSimpleName() + "_Visible";
SP.putBoolean(settingVisible, p.isFragmentVisible());
log.debug("Storing: " + settingVisible + ":" + p.isFragmentVisible());
}
}
private void loadSettings() {
if (Config.logPrefsChange)
log.debug("Loading stored settings");
for (PluginBase p : pluginList) {
PluginType type = p.getType();
loadPref(p, type, true);
if (p.getType() == PluginType.PUMP) {
if (p instanceof ProfileInterface) {
loadPref(p, PluginType.PROFILE, false);
}
}
}
verifySelectionInCategories();
}
private void loadPref(PluginBase p, PluginType type, boolean loadVisible) {
String settingEnabled = "ConfigBuilder_" + type.name() + "_" + p.getClass().getSimpleName() + "_Enabled";
if (SP.contains(settingEnabled))
p.setPluginEnabled(type, SP.getBoolean(settingEnabled, false));
else if (p.getType() == type && (p.pluginDescription.enableByDefault || p.pluginDescription.alwaysEnabled)) {
p.setPluginEnabled(type, true);
}
log.debug("Loaded: " + settingEnabled + ":" + p.isEnabled(type));
if (loadVisible) {
String settingVisible = "ConfigBuilder_" + type.name() + "_" + p.getClass().getSimpleName() + "_Visible";
if (SP.contains(settingVisible))
p.setFragmentVisible(type, SP.getBoolean(settingVisible, false) && SP.getBoolean(settingEnabled, false));
else if (p.getType() == type && p.pluginDescription.visibleByDefault) {
p.setFragmentVisible(type, true);
}
log.debug("Loaded: " + settingVisible + ":" + p.isFragmentVisible());
}
}
// Detect settings prior 1.60
private void upgradeSettings() {
if (!SP.contains("ConfigBuilder_1_NSProfilePlugin_Enabled"))
return;
if (Config.logPrefsChange)
log.debug("Upgrading stored settings");
for (PluginBase p : pluginList) {
log.debug("Processing " + p.getName());
for (int type = 1; type < 11; type++) {
PluginType newType;
switch (type) {
case 1:
newType = PluginType.GENERAL;
break;
case 2:
newType = PluginType.TREATMENT;
break;
case 3:
newType = PluginType.SENSITIVITY;
break;
case 4:
newType = PluginType.PROFILE;
break;
case 5:
newType = PluginType.APS;
break;
case 6:
newType = PluginType.PUMP;
break;
case 7:
newType = PluginType.CONSTRAINTS;
break;
case 8:
newType = PluginType.LOOP;
break;
case 9:
newType = PluginType.BGSOURCE;
break;
case 10:
newType = PluginType.INSULIN;
break;
default:
newType = PluginType.GENERAL;
break;
}
String settingEnabled = "ConfigBuilder_" + type + "_" + p.getClass().getSimpleName() + "_Enabled";
String settingVisible = "ConfigBuilder_" + type + "_" + p.getClass().getSimpleName() + "_Visible";
if (SP.contains(settingEnabled))
p.setPluginEnabled(newType, SP.getBoolean(settingEnabled, false));
if (SP.contains(settingVisible))
p.setFragmentVisible(newType, SP.getBoolean(settingVisible, false) && SP.getBoolean(settingEnabled, false));
SP.remove(settingEnabled);
SP.remove(settingVisible);
if (newType == p.getType()) {
savePref(p, newType, true);
} else if (p.getType() == PluginType.PUMP && p instanceof ProfileInterface) {
savePref(p, PluginType.PROFILE, false);
}
}
}
}
public static CommandQueue getCommandQueue() {
return commandQueue;
}
public BgSourceInterface getActiveBgSource() {
return activeBgSource;
}
public ProfileInterface getActiveProfileInterface() {
return activeProfile;
}
public static InsulinInterface getActiveInsulin() {
return activeInsulin;
}
public static APSInterface getActiveAPS() {
return activeAPS;
}
public static PumpInterface getActivePump() {
return activePump;
}
public static SensitivityInterface getActiveSensitivity() {
return activeSensitivity;
}
void logPluginStatus() {
for (PluginBase p : pluginList) {
log.debug(p.getName() + ":" +
(p.isEnabled(PluginType.GENERAL) ? " GENERAL" : "") +
(p.isEnabled(PluginType.TREATMENT) ? " TREATMENT" : "") +
(p.isEnabled(PluginType.SENSITIVITY) ? " SENSITIVITY" : "") +
(p.isEnabled(PluginType.PROFILE) ? " PROFILE" : "") +
(p.isEnabled(PluginType.APS) ? " APS" : "") +
(p.isEnabled(PluginType.PUMP) ? " PUMP" : "") +
(p.isEnabled(PluginType.CONSTRAINTS) ? " CONSTRAINTS" : "") +
(p.isEnabled(PluginType.LOOP) ? " LOOP" : "") +
(p.isEnabled(PluginType.BGSOURCE) ? " BGSOURCE" : "") +
(p.isEnabled(PluginType.INSULIN) ? " INSULIN" : "")
);
}
}
private void verifySelectionInCategories() {
ArrayList<PluginBase> pluginsInCategory;
// PluginType.APS
activeAPS = this.determineActivePlugin(APSInterface.class, PluginType.APS);
// PluginType.INSULIN
pluginsInCategory = MainApp.getSpecificPluginsList(PluginType.INSULIN);
activeInsulin = (InsulinInterface) getTheOneEnabledInArray(pluginsInCategory, PluginType.INSULIN);
if (activeInsulin == null) {
activeInsulin = InsulinOrefRapidActingPlugin.getPlugin();
InsulinOrefRapidActingPlugin.getPlugin().setPluginEnabled(PluginType.INSULIN, true);
}
this.setFragmentVisiblities(((PluginBase) activeInsulin).getName(), pluginsInCategory, PluginType.INSULIN);
// PluginType.SENSITIVITY
pluginsInCategory = MainApp.getSpecificPluginsList(PluginType.SENSITIVITY);
activeSensitivity = (SensitivityInterface) getTheOneEnabledInArray(pluginsInCategory, PluginType.SENSITIVITY);
if (activeSensitivity == null) {
activeSensitivity = SensitivityOref0Plugin.getPlugin();
SensitivityOref0Plugin.getPlugin().setPluginEnabled(PluginType.SENSITIVITY, true);
}
this.setFragmentVisiblities(((PluginBase) activeSensitivity).getName(), pluginsInCategory, PluginType.SENSITIVITY);
// PluginType.PROFILE
activeProfile = this.determineActivePlugin(ProfileInterface.class, PluginType.PROFILE);
// PluginType.BGSOURCE
activeBgSource = this.determineActivePlugin(BgSourceInterface.class, PluginType.BGSOURCE);
// PluginType.PUMP
pluginsInCategory = MainApp.getSpecificPluginsList(PluginType.PUMP);
activePump = (PumpInterface) getTheOneEnabledInArray(pluginsInCategory, PluginType.PUMP);
if (activePump == null) {
activePump = VirtualPumpPlugin.getPlugin();
VirtualPumpPlugin.getPlugin().setPluginEnabled(PluginType.PUMP, true);
}
this.setFragmentVisiblities(((PluginBase) activePump).getName(), pluginsInCategory, PluginType.PUMP);
// PluginType.LOOP
activeLoop = this.determineActivePlugin(PluginType.LOOP);
// PluginType.TREATMENT
activeTreatments = this.determineActivePlugin(PluginType.TREATMENT);
}
/**
* disables the visibility for all fragments of Plugins with the given PluginType
* which are not equally named to the Plugin implementing the given Plugin Interface.
*
* @param pluginInterface
* @param pluginType
* @param <T>
* @return
*/
private <T> T determineActivePlugin(Class<T> pluginInterface, PluginType pluginType) {
ArrayList<PluginBase> pluginsInCategory;
pluginsInCategory = MainApp.getSpecificPluginsListByInterface(pluginInterface);
return this.determineActivePlugin(pluginsInCategory, pluginType);
}
private <T> T determineActivePlugin(PluginType pluginType) {
ArrayList<PluginBase> pluginsInCategory;
pluginsInCategory = MainApp.getSpecificPluginsList(pluginType);
return this.determineActivePlugin(pluginsInCategory, pluginType);
}
/**
* disables the visibility for all fragments of Plugins in the given pluginsInCategory
* with the given PluginType which are not equally named to the Plugin implementing the
* given Plugin Interface.
* <p>
* TODO we are casting an interface to PluginBase, which seems to be rather odd, since
* TODO the interface is not implementing PluginBase (this is just avoiding errors through
* TODO conventions.
*
* @param pluginsInCategory
* @param pluginType
* @param <T>
* @return
*/
private <T> T determineActivePlugin(ArrayList<PluginBase> pluginsInCategory,
PluginType pluginType) {
T activePlugin = (T) getTheOneEnabledInArray(pluginsInCategory, pluginType);
if (activePlugin != null) {
this.setFragmentVisiblities(((PluginBase) activePlugin).getName(),
pluginsInCategory, pluginType);
}
return activePlugin;
}
private void setFragmentVisiblities(String activePluginName, ArrayList<PluginBase> pluginsInCategory,
PluginType pluginType) {
if (Config.logConfigBuilder)
log.debug("Selected interface: " + activePluginName);
for (PluginBase p : pluginsInCategory) {
if (!p.getName().equals(activePluginName)) {
p.setFragmentVisible(pluginType, false);
}
}
}
@Nullable
private PluginBase getTheOneEnabledInArray(ArrayList<PluginBase> pluginsInCategory, PluginType type) {
PluginBase found = null;
for (PluginBase p : pluginsInCategory) {
if (p.isEnabled(type) && found == null) {
found = p;
} else if (p.isEnabled(type)) {
// set others disabled
p.setPluginEnabled(type, false);
}
}
// If none enabled, enable first one
//if (found == null && pluginsInCategory.size() > 0)
// found = pluginsInCategory.get(0);
return found;
}
/**
* expect absolute request and allow both absolute and percent response based on pump capabilities
*/
public void applyTBRRequest(APSResult request, Profile profile, Callback callback) {
if (!request.tempBasalRequested) {
if (callback != null) {
callback.result(new PumpEnactResult().enacted(false).success(true).comment(MainApp.gs(R.string.nochangerequested))).run();
}
return;
}
PumpInterface pump = getActivePump();
request.rateConstraint = new Constraint<>(request.rate);
request.rate = MainApp.getConstraintChecker().applyBasalConstraints(request.rateConstraint, profile).value();
if (!pump.isInitialized()) {
log.debug("applyAPSRequest: " + MainApp.gs(R.string.pumpNotInitialized));
if (callback != null) {
callback.result(new PumpEnactResult().comment(MainApp.gs(R.string.pumpNotInitialized)).enacted(false).success(false)).run();
}
return;
}
if (pump.isSuspended()) {
log.debug("applyAPSRequest: " + MainApp.gs(R.string.pumpsuspended));
if (callback != null) {
callback.result(new PumpEnactResult().comment(MainApp.gs(R.string.pumpsuspended)).enacted(false).success(false)).run();
}
return;
}
if (Config.logCongigBuilderActions)
log.debug("applyAPSRequest: " + request.toString());
long now = System.currentTimeMillis();
TemporaryBasal activeTemp = activeTreatments.getTempBasalFromHistory(now);
if ((request.rate == 0 && request.duration == 0) || Math.abs(request.rate - pump.getBaseBasalRate()) < pump.getPumpDescription().basalStep) {
if (activeTemp != null) {
if (Config.logCongigBuilderActions)
log.debug("applyAPSRequest: cancelTempBasal()");
getCommandQueue().cancelTempBasal(false, callback);
} else {
if (Config.logCongigBuilderActions)
log.debug("applyAPSRequest: Basal set correctly");
if (callback != null) {
callback.result(new PumpEnactResult().absolute(request.rate).duration(0)
.enacted(false).success(true).comment(MainApp.gs(R.string.basal_set_correctly))).run();
}
}
} else if (activeTemp != null
&& activeTemp.getPlannedRemainingMinutes() > 5
&& request.duration - activeTemp.getPlannedRemainingMinutes() < 30
&& Math.abs(request.rate - activeTemp.tempBasalConvertedToAbsolute(now, profile)) < pump.getPumpDescription().basalStep) {
if (Config.logCongigBuilderActions)
log.debug("applyAPSRequest: Temp basal set correctly");
if (callback != null) {
callback.result(new PumpEnactResult().absolute(activeTemp.tempBasalConvertedToAbsolute(now, profile))
.enacted(false).success(true).duration(activeTemp.getPlannedRemainingMinutes())
.comment(MainApp.gs(R.string.let_temp_basal_run))).run();
}
} else {
if (Config.logCongigBuilderActions)
log.debug("applyAPSRequest: setTempBasalAbsolute()");
getCommandQueue().tempBasalAbsolute(request.rate, request.duration, false, profile, callback);
}
}
public void applySMBRequest(APSResult request, Callback callback) {
if (!request.bolusRequested) {
return;
}
long lastBolusTime = activeTreatments.getLastBolusTime();
if (lastBolusTime != 0 && lastBolusTime + 3 * 60 * 1000 > System.currentTimeMillis()) {
log.debug("SMB requested but still in 3 min interval");
if (callback != null) {
callback.result(new PumpEnactResult()
.comment(MainApp.gs(R.string.smb_frequency_exceeded))
.enacted(false).success(false)).run();
}
return;
}
PumpInterface pump = getActivePump();
if (!pump.isInitialized()) {
log.debug("applySMBRequest: " + MainApp.gs(R.string.pumpNotInitialized));
if (callback != null) {
callback.result(new PumpEnactResult().comment(MainApp.gs(R.string.pumpNotInitialized)).enacted(false).success(false)).run();
}
return;
}
if (pump.isSuspended()) {
log.debug("applySMBRequest: " + MainApp.gs(R.string.pumpsuspended));
if (callback != null) {
callback.result(new PumpEnactResult().comment(MainApp.gs(R.string.pumpsuspended)).enacted(false).success(false)).run();
}
return;
}
if (Config.logCongigBuilderActions)
log.debug("applySMBRequest: " + request.toString());
// deliver SMB
DetailedBolusInfo detailedBolusInfo = new DetailedBolusInfo();
detailedBolusInfo.eventType = CareportalEvent.CORRECTIONBOLUS;
detailedBolusInfo.insulin = request.smb;
detailedBolusInfo.isSMB = true;
detailedBolusInfo.source = Source.USER;
detailedBolusInfo.deliverAt = request.deliverAt;
if (Config.logCongigBuilderActions)
log.debug("applyAPSRequest: bolus()");
getCommandQueue().bolus(detailedBolusInfo, callback);
}
@Subscribe
public void onProfileSwitch(EventProfileSwitchChange ignored) {
getCommandQueue().setProfile(getProfile(), new Callback() {
@Override
public void run() {
if (!result.success) {
Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class);
i.putExtra("soundid", R.raw.boluserror);
i.putExtra("status", result.comment);
i.putExtra("title", MainApp.gs(R.string.failedupdatebasalprofile));
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
MainApp.instance().startActivity(i);
}
MainApp.bus().post(new EventNewBasalProfile());
}
});
}
public String getProfileName() {
return getProfileName(System.currentTimeMillis());
}
public String getProfileName(boolean customized) {
return getProfileName(System.currentTimeMillis(), customized);
}
public String getProfileName(long time) {
return getProfileName(time, true);
}
public String getProfileName(long time, boolean customized) {
ProfileSwitch profileSwitch = activeTreatments.getProfileSwitchFromHistory(time);
if (profileSwitch != null) {
if (profileSwitch.profileJson != null) {
return customized ? profileSwitch.getCustomizedName() : profileSwitch.profileName;
} else {
ProfileStore profileStore = activeProfile.getProfile();
if (profileStore != null) {
Profile profile = profileStore.getSpecificProfile(profileSwitch.profileName);
if (profile != null)
return profileSwitch.profileName;
}
}
}
return MainApp.gs(R.string.noprofileselected);
}
public boolean isProfileValid(String from) {
return getProfile() != null && getProfile().isValid(from);
}
@Nullable
public Profile getProfile() {
return getProfile(System.currentTimeMillis());
}
public String getProfileUnits() {
Profile profile = getProfile();
return profile != null ? profile.getUnits() : Constants.MGDL;
}
@Nullable
public Profile getProfile(long time) {
if (activeTreatments == null) {
log.debug("getProfile activeTreatments == null: returning null");
return null; //app not initialized
}
//log.debug("Profile for: " + new Date(time).toLocaleString() + " : " + getProfileName(time));
ProfileSwitch profileSwitch = activeTreatments.getProfileSwitchFromHistory(time);
if (profileSwitch != null) {
if (profileSwitch.profileJson != null) {
return profileSwitch.getProfileObject();
} else if (activeProfile.getProfile() != null) {
Profile profile = activeProfile.getProfile().getSpecificProfile(profileSwitch.profileName);
if (profile != null)
return profile;
}
}
if (activeTreatments.getProfileSwitchesFromHistory().size() > 0) {
FabricPrivacy.getInstance().logCustom(new CustomEvent("CatchedError")
.putCustomAttribute("buildversion", BuildConfig.BUILDVERSION)
.putCustomAttribute("version", BuildConfig.VERSION)
.putCustomAttribute("time", time)
.putCustomAttribute("getProfileSwitchesFromHistory", activeTreatments.getProfileSwitchesFromHistory().toString())
);
}
log.debug("getProfile at the end: returning null");
return null;
}
public void disconnectPump(int durationInMinutes, Profile profile) {
LoopPlugin.getPlugin().disconnectTo(System.currentTimeMillis() + durationInMinutes * 60 * 1000L);
getCommandQueue().tempBasalPercent(0, durationInMinutes, true, profile, new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.tempbasaldeliveryerror));
}
}
});
if (getActivePump().getPumpDescription().isExtendedBolusCapable && activeTreatments.isInHistoryExtendedBoluslInProgress()) {
getCommandQueue().cancelExtended(new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.extendedbolusdeliveryerror));
}
}
});
}
NSUpload.uploadOpenAPSOffline(durationInMinutes);
}
public void suspendLoop(int durationInMinutes) {
LoopPlugin.getPlugin().suspendTo(System.currentTimeMillis() + durationInMinutes * 60 * 1000);
getCommandQueue().cancelTempBasal(true, new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.tempbasaldeliveryerror));
}
}
});
NSUpload.uploadOpenAPSOffline(durationInMinutes);
}
}
| 1 | 30,630 | The _OverviewPlugin_ is forcefully enabled without the option to disable it, using _PluginDescription.alwaysEnabled_. That should also work for this plugin? | MilosKozak-AndroidAPS | java |
@@ -220,6 +220,8 @@ public class SearchFields {
* i.e. "Unpublished", "Draft" (multivalued)
*/
public static final String PUBLICATION_STATUS = "publicationStatus";
+
+ public static final String EXTERNAL_STATUS = "externalStatus";
/**
* @todo reconcile different with Solr schema.xml where type is Long rather
* than String. | 1 | package edu.harvard.iq.dataverse.search;
/**
* We define Solr search fields here in one central place so they can be used
* throughout the code but renamed here if need be.
*
* Note that there are many fields in Solr that are *not* here because their
* values come from the database. For example "authorName" comes from the
* database. We update the Solr schema.xml file by merging the output of `curl
* http://localhost:8080/api/admin/index/solr/schema` into the file in the
* source tree when a metadata block update warrants it.
*
* This process of updating schema.xml for new metadata block fields documented
* at doc/sphinx-guides/source/admin/metadatacustomization.rst
*
* Generally speaking, we want the search fields to be readable. This is a
* challenge for long field names but a power user should be able to type
* "authorAffiliation:Harvard" into the general search box. A regular user is
* much more likely to used Advanced Search to populate that field
* automatically.
*
* Originally, these fields were all snake_case but since the dynamic fields are
* camelCase we might want to standardize on that.
*
* You'll notice that dynamic fields like this are used...
*
* - _s (string)
*
* - _ss (multivalued string)
*
* - _l (long)
*
* - _dt (datetime)
*
* ... and these endings should not be changed unless you plan to convert them
* to non-dynamic (by removing the ending) and specify their "type" in the Solr
* schema.xml.
*
* Most fields we want to be searchable but some are stored with indexed=false
* because we *don't* want them to be searchable and we're just using Solr as a
* convenient key/value store. Why go to the database if you don't have to? For
* a string here or there that needs to be available to both the GUI and the
* Search API, we can just store them in Solr.
*
* For faceting we use a "string" type. If you use something like "text_general"
* the field is tokenized ("Foo Bar" becomes "foo" "bar" which is not what we
* want). See also
* http://stackoverflow.com/questions/16559911/facet-query-will-give-wrong-output-on-dynamicfield-in-solr
*/
public class SearchFields {
/**
* @todo: consider making various dynamic fields (_s) static in schema.xml
* instead. Should they be stored in the database?
*/
// standard fields from example/solr/collection1/conf/schema.xml
// (but we are getting away from these...)
public static final String ID = "id";
/**
* Determine which DvObjects you might want to target for reindexing after
* an upgrade such as between Dataverse 4.2 and 4.3.
*/
public static final String DATAVERSE_VERSION_INDEXED_BY = "dataverseVersionIndexedBy_s";
public static final String NAME = "name";
/**
* @todo Do we want to support finding dataverses, datasets, and files with
* a query for description:foo? Maybe not, since people will probably just
* use basic search for this. They could also use "dvDescription:foo OR
* dsDescription:foo OR fileDescription:foo" if they *really* only want to
* target the description of all three types at once.
*
* See also https://redmine.hmdc.harvard.edu/issues/3745
*/
public static final String DESCRIPTION = "description";
/**
* Identifiers differ per DvObject: alias for dataverses, globalId for
* datasets, and database id for files.
*/
public static final String IDENTIFIER = "identifier";
/**
* Visible in the GUI as a facet to click: "Harvested" vs. "Root Dataverse".
*/
public static final String METADATA_SOURCE = "metadataSource";
/**
* Internal boolean used when creating OAI sets, for example.
*/
public static final String IS_HARVESTED = "isHarvested";
/**
* Such as https://doi.org/10.5072/FK2/HXI35W
*
* For files, the URL will be the parent dataset.
*/
public static final String PERSISTENT_URL = "persistentUrl";
public static final String UNF = "unf";
public static final String DATAVERSE_NAME = "dvName";
public static final String DATAVERSE_ALIAS = "dvAlias";
public static final String DATAVERSE_AFFILIATION = "dvAffiliation";
public static final String DATAVERSE_DESCRIPTION = "dvDescription";
public static final String DATAVERSE_CATEGORY = "dvCategory";
/**
* What is dvSubject_en for? How does it get populated into Solr? The
* behavior changed so that now the subjects of dataverses are based on
* their datasets. Should this be a string so we can facet on it more
* properly? Should all checkboxes on the advanced search page (controlled
* vocabularies) be backed by a string? When we rename this to "foobar" (a
* field Solr doesn't know about) why doesn't Solr complain when we "index
* all"? See also https://github.com/IQSS/dataverse/issues/1681
*/
public static final String DATAVERSE_SUBJECT = "dvSubject";
/**
* A "collapsed" facet (e.g. applies to both dataverses and datasets and is
* merged as a single facet in the GUI) like affiliation that needs to match
* the corresponding dynamic "facet" Solr field at the dataset level to work
* properly. Should we use/expose "_ss" when you click a facet? It needs to
* be different from "subject" which is used for general search but maybe we
* could have a convention like "subjectFacet" for the facets?
*/
public static final String SUBJECT = "subject_ss";
/*
* The category of the Dataverse (aka Dataverse Type). Named differently
* than DATAVERSE_CATEGORY so it can be searched but doesn't show up on the
* homepage facet
*/
public static final String CATEGORY_OF_DATAVERSE = "categoryOfDataverse";
/*
* The alias of the dataverse. This named differently because IDENTIFIER
* is used for dataset for its own identifier.
*/
public static final String IDENTIFIER_OF_DATAVERSE = "identifierOfDataverse";
/**
* @todo think about how to tie the fact that this needs to be multivalued
* (_ss) because a multivalued facet (authorAffilition_ss) will be collapsed
* into it at index time. The business logic to determine if a data-driven
* metadata field should be indexed into Solr as a single or multiple value
* lives in the getSolrField() method of DatasetField.java
*
* AFFILIATION is used for the "collapsed" "Affiliation" facet that means
* either "Author Affiliation" or dataverse affiliation. It needs to be a
* string so we can facet on it and it needs to be multivalued because
* "Author Affiliation" can be multivalued.
*/
public static final String AFFILIATION = "affiliation_ss";
public static final String FILE_NAME = "fileName";
public static final String FILE_DESCRIPTION = "fileDescription";
public static final String FILE_PERSISTENT_ID = "filePersistentId";
/**
* Can be multivalued and includes both "friendly" and "group" versions:
* "PNG Image", "image"
*/
public static final String FILE_TYPE_SEARCHABLE = "fileType";
/**
* @todo Thie static variable not named properly. We want to expose an
* acutal MIME Type in https://github.com/IQSS/dataverse/issues/1595 . See
* also cleanup ticket at https://github.com/IQSS/dataverse/issues/1314
*
* i.e. "PNG Image"
*/
public static final String FILE_TYPE_FRIENDLY = "fileTypeDisplay";
public static final String FILE_CONTENT_TYPE = "fileContentType";
/**
* Used as a facet for file groups like "image" or "document"
*/
public static final String FILE_TYPE = "fileTypeGroupFacet";
public static final String FILE_SIZE_IN_BYTES = "fileSizeInBytes";
public static final String FILE_MD5 = "fileMd5";
public static final String FILE_CHECKSUM_TYPE = "fileChecksumType";
public static final String FILE_CHECKSUM_VALUE = "fileChecksumValue";
public static final String FILENAME_WITHOUT_EXTENSION = "fileNameWithoutExtension";
/**
* Indexed as a string so we can facet on it.
*/
public static final String FILE_TAG = "fileTag";
/**
* Indexed as text_en so it's searchable by lower case etc.
*/
public static final String FILE_TAG_SEARCHABLE = "fileTags";
/**
* Internal boolean indicating that the file has been deleted in the draft version.
*/
public static final String FILE_DELETED = "fileDeleted";
/*
* (tabular) Data Tags are indexed as a string, since we are only planning to
* use these in facet-like, exact searches:
*/
public static final String TABDATA_TAG = "tabularDataTag";
public static final String ACCESS = "fileAccess";
public static final String SUBTREE = "subtreePaths";
// i.e. http://localhost:8080/search.xhtml?q=*&fq0=citationdate_dt:[2008-01-01T00%3A00%3A00Z+TO+2011-01-01T00%3A00%3A00Z%2B1YEAR}
// public static final String PRODUCTION_DATE_ORIGINAL = DatasetFieldConstant.productionDate + "_dt";
// public static final String PRODUCTION_DATE_YEAR_ONLY = DatasetFieldConstant.productionDate + "_i";
// public static final String DISTRIBUTION_DATE_ORIGINAL = DatasetFieldConstant.distributionDate + "_dt";
// public static final String DISTRIBUTION_DATE_YEAR_ONLY = DatasetFieldConstant.distributionDate + "_i";
/**
* Solr refers to "relevance" as "score"
*/
public static final String RELEVANCE = "score";
/**
* A dataverse, a dataset, or a file.
*/
public static final String TYPE = "dvObjectType";
public static final String NAME_SORT = "nameSort";
// PUBLICATION_YEAR used to be called PUBLICATION_DATE.
public static final String PUBLICATION_YEAR = "publicationDate";
public static final String RELEASE_OR_CREATE_DATE = "dateSort";
public static final String DEFINITION_POINT = "definitionPointDocId";
public static final String DEFINITION_POINT_DVOBJECT_ID = "definitionPointDvObjectId";
public static final String DISCOVERABLE_BY = "discoverableBy";
/**
* i.e. "Unpublished", "Draft" (multivalued)
*/
public static final String PUBLICATION_STATUS = "publicationStatus";
/**
* @todo reconcile different with Solr schema.xml where type is Long rather
* than String.
*/
public static final String ENTITY_ID = "entityId";
public static final String PARENT_NAME = "parentName";
public static final String PARENT_ID = "parentId";
public static final String PARENT_IDENTIFIER = "parentIdentifier";
/**
* @todo Should we add a "parentCitationHtml" field now or wait for demand
* for it?
*/
public static final String PARENT_CITATION = "parentCitation";
public static final String DATASET_DESCRIPTION = "dsDescriptionValue";
/**
* In Datavese 4.3 and earlier "citation" was indexed as the "online" or
* HTML version, with the DOI link wrapped in an href tag but now it's the
* plaintext version and anyone who was depending on the old version can
* switch to the new "citationHTML" field.
*/
public static final String DATASET_CITATION = "citation";
public static final String DATASET_CITATION_HTML = "citationHtml";
public static final String DATASET_DEACCESSION_REASON = "deaccessionReason";
/**
* In contrast to PUBLICATION_YEAR, this field applies only to datasets for
more targeted results for just datasets. The format is YYYY (i.e.
* "2015").
*/
public static final String DATASET_PUBLICATION_DATE = "dsPublicationDate";
public static final String DATASET_PERSISTENT_ID = "dsPersistentId";
public static final String DATASET_VERSION_ID = "datasetVersionId";
public static final String VARIABLE_NAME = "variableName";
public static final String VARIABLE_LABEL = "variableLabel";
public static final String LITERAL_QUESTION = "literalQuestion";
public static final String INTERVIEW_INSTRUCTIONS = "interviewInstructions";
public static final String POST_QUESTION = "postQuestion";
public static final String VARIABLE_UNIVERSE = "variableUniverse";
public static final String VARIABLE_NOTES = "variableNotes";
public static final String FULL_TEXT = "_text_";
}
| 1 | 44,876 | Can you facet on this new externalStatus Solr field? | IQSS-dataverse | java |
@@ -132,11 +132,14 @@ module Blacklight
end
def filters
- @filters ||= blacklight_config.facet_fields.each_value.map do |value|
- f = filter(value)
-
- f if f.any?
- end.compact
+ @filters ||= blacklight_config.facet_fields.each_value.reduce([]) do |acc, value|
+ if value.pivot
+ acc + value.pivot.map { |v| value.class.new(key: v) }
+ .map { |f| filter(f) }.select(&:any?)
+ else
+ acc + Array(filter(value)).select(&:any?)
+ end
+ end
end
def filter(field_key_or_field) | 1 | # frozen_string_literal: true
require 'blacklight/search_state/filter_field'
module Blacklight
# This class encapsulates the search state as represented by the query
# parameters namely: :f, :q, :page, :per_page and, :sort
class SearchState
extend Deprecation
attr_reader :blacklight_config # Must be called blacklight_config, because Blacklight::Facet calls blacklight_config.
attr_reader :params
# This method is never accessed in this class, but may be used by subclasses that need
# to access the url_helpers
attr_reader :controller
delegate :facet_configuration_for_field, to: :blacklight_config
# @param [ActionController::Parameters] params
# @param [Blacklight::Config] blacklight_config
# @param [ApplicationController] controller used for the routing helpers
def initialize(params, blacklight_config, controller = nil)
@params = self.class.normalize_params(params)
@blacklight_config = blacklight_config
@controller = controller
end
def self.normalize_params(untrusted_params = {})
params = untrusted_params
if params.respond_to?(:to_unsafe_h)
# This is the typical (not-ActionView::TestCase) code path.
params = params.to_unsafe_h
# In Rails 5 to_unsafe_h returns a HashWithIndifferentAccess, in Rails 4 it returns Hash
params = params.with_indifferent_access if params.instance_of? Hash
elsif params.is_a? Hash
# This is an ActionView::TestCase workaround for Rails 4.2.
params = params.dup.with_indifferent_access
else
params = params.dup.to_h.with_indifferent_access
end
# Normalize facet parameters mangled by facebook
if params[:f].is_a?(Hash) && params[:f].values.any? { |x| x.is_a?(Hash) }
params[:f] = params[:f].transform_values do |value|
value.is_a?(Hash) ? value.values : value
end
end
params
end
def to_hash
@params.deep_dup
end
alias to_h to_hash
def to_unsafe_h
Deprecation.warn(self.class, 'Use SearchState#to_h instead of SearchState#to_unsafe_h')
to_hash
end
def method_missing(method_name, *arguments, &block)
if @params.respond_to?(method_name)
Deprecation.warn(self.class, "Calling `#{method_name}` on Blacklight::SearchState " \
'is deprecated and will be removed in Blacklight 8. Call #to_h first if you ' \
' need to use hash methods (or, preferably, use your own SearchState implementation)')
@params.public_send(method_name, *arguments, &block)
else
super
end
end
def respond_to_missing?(method_name, include_private = false)
@params.respond_to?(method_name, include_private) || super
end
# Tiny shim to make it easier to migrate raw params access to using this class
delegate :[], to: :params
deprecation_deprecate :[]
def has_constraints?
Deprecation.silence(Blacklight::SearchState) do
!(query_param.blank? && filter_params.blank? && filters.blank? && clause_params.blank?)
end
end
def query_param
params[:q]
end
def clause_params
params[:clause] || {}
end
def filter_params
params[:f] || {}
end
deprecation_deprecate filter_params: 'Use #filters instead'
# @return [Blacklight::SearchState]
def reset(params = nil)
self.class.new(params || ActionController::Parameters.new, blacklight_config, controller)
end
# @return [Blacklight::SearchState]
def reset_search(additional_params = {})
reset(reset_search_params.merge(additional_params))
end
##
# Extension point for downstream applications
# to provide more interesting routing to
# documents
def url_for_document(doc, options = {})
if respond_to?(:blacklight_config) &&
blacklight_config.view_config(:show).route &&
(!doc.respond_to?(:to_model) || doc.to_model.is_a?(SolrDocument))
route = blacklight_config.view_config(:show).route.merge(action: :show, id: doc).merge(options)
route[:controller] = params[:controller] if route[:controller] == :current
route
else
doc
end
end
def remove_query_params
p = reset_search_params
p.delete(:q)
p
end
def filters
@filters ||= blacklight_config.facet_fields.each_value.map do |value|
f = filter(value)
f if f.any?
end.compact
end
def filter(field_key_or_field)
field = field_key_or_field if field_key_or_field.is_a? Blacklight::Configuration::Field
field ||= blacklight_config.facet_fields[field_key_or_field]
field ||= Blacklight::Configuration::NullField.new(key: field_key_or_field)
(field.filter_class || FilterField).new(field, self)
end
# adds the value and/or field to params[:f]
# Does NOT remove request keys and otherwise ensure that the hash
# is suitable for a redirect. See
# add_facet_params_and_redirect
def add_facet_params(field, item)
filter(field).add(item).params
end
deprecation_deprecate add_facet_params: 'Use filter(field).add(item) instead'
# Used in catalog/facet action, facets.rb view, for a click
# on a facet value. Add on the facet params to existing
# search constraints. Remove any paginator-specific request
# params, or other request params that should be removed
# for a 'fresh' display.
# Change the action to 'index' to send them back to
# catalog/index with their new facet choice.
def add_facet_params_and_redirect(field, item)
new_params = Deprecation.silence(self.class) do
add_facet_params(field, item)
end
# Delete any request params from facet-specific action, needed
# to redir to index action properly.
request_keys = blacklight_config.facet_paginator_class.request_keys
new_params.extract!(*request_keys.values)
new_params
end
# copies the current params (or whatever is passed in as the 3rd arg)
# removes the field value from params[:f]
# removes the field if there are no more values in params[:f][field]
# removes additional params (page, id, etc..)
# @param [String] field
# @param [String] item
def remove_facet_params(field, item)
filter(field).remove(item).params
end
deprecation_deprecate remove_facet_params: 'Use filter(field).remove(item) instead'
def has_facet?(config, value: nil)
if value
filter(config).include?(value)
else
filter(config).any?
end
end
deprecation_deprecate has_facet?: 'Use filter(field).include?(value) or .any? instead'
# Merge the source params with the params_to_merge hash
# @param [Hash] params_to_merge to merge into above
# @return [ActionController::Parameters] the current search parameters after being sanitized by Blacklight::Parameters.sanitize
# @yield [params] The merged parameters hash before being sanitized
def params_for_search(params_to_merge = {})
# params hash we'll return
my_params = params.dup.merge(self.class.new(params_to_merge, blacklight_config, controller))
if block_given?
yield my_params
end
if my_params[:page] && (my_params[:per_page] != params[:per_page] || my_params[:sort] != params[:sort])
my_params[:page] = 1
end
Parameters.sanitize(my_params)
end
def page
[params[:page].to_i, 1].max
end
def per_page
params[:rows].presence&.to_i ||
params[:per_page].presence&.to_i ||
blacklight_config.default_per_page
end
def sort_field
if sort_field_key.blank?
# no sort param provided, use default
blacklight_config.default_sort_field
else
# check for sort field key
blacklight_config.sort_fields[sort_field_key]
end
end
def search_field
blacklight_config.search_fields[search_field_key]
end
def facet_page
[params[facet_request_keys[:page]].to_i, 1].max
end
def facet_sort
params[facet_request_keys[:sort]]
end
def facet_prefix
params[facet_request_keys[:prefix]]
end
private
def search_field_key
params[:search_field]
end
def sort_field_key
params[:sort]
end
def facet_request_keys
blacklight_config.facet_paginator_class.request_keys
end
##
# Reset any search parameters that store search context
# and need to be reset when e.g. constraints change
# @return [ActionController::Parameters]
def reset_search_params
Parameters.sanitize(params).except(:page, :counter)
end
end
end
| 1 | 8,843 | I wonder if it would it make sense to push the pivot facet handling down into `FilterField` (or maybe a subclass, `PivotFilterField`?)? | projectblacklight-blacklight | rb |
@@ -47,7 +47,12 @@ Puppet::Functions.create_function(:run_plan, Puppet::Functions::InternalFunction
end
begin
- result = func.class.dispatcher.dispatchers[0].call_by_name_with_scope(scope, params, true)
+ func.class.dispatcher.dispatchers[0].call_by_name_with_scope(scope, params, true)
+ # If the plan returns using a return statement then we never get here.
+ # We only get here if the plan finishes without executing any return
+ # statement and since we don't want to leak any implicit return value
+ # from the plan we simply set the result to nil here.
+ result = nil
rescue Puppet::PreformattedError => err
if named_args['_catch_errors'] && err.cause.is_a?(Bolt::Error)
result = err.cause.to_puppet_error | 1 | # frozen_string_literal: true
require 'bolt/error'
# Runs the `plan` referenced by its name passing giving arguments to it given as a hash of name to value mappings.
# A plan is autoloaded from under <root>/plans if not already defined.
#
# @example defining and running a plan
# plan myplan($x) {
# # do things with tasks
# notice "plan done with param x = ${x}"
# }
# run_plan('myplan', { x => 'testing' })
#
Puppet::Functions.create_function(:run_plan, Puppet::Functions::InternalFunction) do
dispatch :run_plan do
scope_param
param 'String', :plan_name
optional_param 'Hash', :named_args
end
def run_plan(scope, plan_name, named_args = {})
unless Puppet[:tasks]
raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
Puppet::Pops::Issues::TASK_OPERATION_NOT_SUPPORTED_WHEN_COMPILING, operation: 'run_plan'
)
end
executor = Puppet.lookup(:bolt_executor) { nil }
unless executor && Puppet.features.bolt?
raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
Puppet::Pops::Issues::TASK_MISSING_BOLT, action: _('run a plan')
)
end
params = named_args.reject { |k, _| k.start_with?('_') }
loaders = closure_scope.compiler.loaders
# The perspective of the environment is wanted here (for now) to not have to
# require modules to have dependencies defined in meta data.
loader = loaders.private_environment_loader
if loader && (func = loader.load(:plan, plan_name))
# TODO: Add profiling around this
if (run_as = named_args['_run_as'])
old_run_as = executor.run_as
executor.run_as = run_as
end
begin
result = func.class.dispatcher.dispatchers[0].call_by_name_with_scope(scope, params, true)
rescue Puppet::PreformattedError => err
if named_args['_catch_errors'] && err.cause.is_a?(Bolt::Error)
result = err.cause.to_puppet_error
else
raise err
end
ensure
if run_as
executor.run_as = old_run_as
end
end
return result
end
# Could not find plan
raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
Puppet::Pops::Issues.issue(:UNKNOWN_PLAN) { Bolt::Error.unknown_plan(plan_name) }
)
end
end
| 1 | 8,450 | Ah I see what you were saying about making it harder to validate types. We'll have to catch the return for that. | puppetlabs-bolt | rb |
@@ -86,6 +86,7 @@ public abstract class BaseActivity extends AppCompatActivity implements BottomNa
.setTarget(nav_cam)
.setDismissText(getResources().getString(R.string.ok_button))
.setContentText(getResources().getString(R.string.camera_button))
+ .hideOnTouchOutside()
.build()
);
| 1 | package org.fossasia.phimpme.base;
import android.animation.Animator;
import android.animation.AnimatorListenerAdapter;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.res.ColorStateList;
import android.graphics.Color;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.design.internal.BottomNavigationItemView;
import android.support.design.widget.BottomNavigationView;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AppCompatActivity;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import org.fossasia.phimpme.R;
import org.fossasia.phimpme.accounts.AccountActivity;
import org.fossasia.phimpme.gallery.activities.LFMainActivity;
import org.fossasia.phimpme.opencamera.Camera.CameraActivity;
import uk.co.deanwild.materialshowcaseview.MaterialShowcaseSequence;
import uk.co.deanwild.materialshowcaseview.MaterialShowcaseView;
import uk.co.deanwild.materialshowcaseview.ShowcaseConfig;
public abstract class BaseActivity extends AppCompatActivity implements BottomNavigationView.OnNavigationItemSelectedListener {
protected BottomNavigationView navigationView;
private static final String SHOWCASE_ID = "1";
BottomNavigationItemView nav_home;
BottomNavigationItemView nav_cam;
BottomNavigationItemView nav_acc;
private int[][] states = new int[][] {
new int[] {android.R.attr.state_checked}, // checked
new int[] {-android.R.attr.state_checked}, // unchecked
};
private int[] colors = new int[] {
Color.WHITE, // checked
0 // unchecked set default in onCreate
};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(getContentViewId());
colors[1] = ContextCompat.getColor(this, R.color.bottom_navigation_tabs);
ColorStateList myList = new ColorStateList(states, colors);
navigationView = (BottomNavigationView) findViewById(R.id.bottombar);
navigationView.setItemIconTintList(myList);
navigationView.setItemTextColor(myList);
navigationView.setOnNavigationItemSelectedListener(this);
nav_home = (BottomNavigationItemView) findViewById(R.id.navigation_home);
nav_cam = (BottomNavigationItemView) findViewById(R.id.navigation_camera);
nav_acc = (BottomNavigationItemView) findViewById(R.id.navigation_accounts);
int checkStoragePermission = ContextCompat.checkSelfPermission(this, android.Manifest.permission.WRITE_EXTERNAL_STORAGE);
if(checkStoragePermission == PackageManager.PERMISSION_GRANTED)
presentShowcaseSequence(); // one second delay
}
private void presentShowcaseSequence() {
ShowcaseConfig config = new ShowcaseConfig();
config.setDelay(4000); // half second between each showcase view
MaterialShowcaseSequence sequence = new MaterialShowcaseSequence(this, SHOWCASE_ID);
sequence.setOnItemShownListener(new MaterialShowcaseSequence.OnSequenceItemShownListener() {
@Override
public void onShow(MaterialShowcaseView itemView, int position) {
//Toast.makeText(itemView.getContext(), "Item #" + position, Toast.LENGTH_SHORT).show();
}
});
sequence.setConfig(config);
sequence.addSequenceItem(nav_home, getResources().getString(R.string.home_button), getResources().getString(R.string.ok_button));
sequence.addSequenceItem(
new MaterialShowcaseView.Builder(this)
.setTarget(nav_cam)
.setDismissText(getResources().getString(R.string.ok_button))
.setContentText(getResources().getString(R.string.camera_button))
.build()
);
sequence.addSequenceItem(
new MaterialShowcaseView.Builder(this)
.setTarget(nav_acc)
.setDismissText(getResources().getString(R.string.ok_button))
.setContentText(getResources().getString(R.string.accounts_button))
.build()
);
sequence.start();
}
@Override
protected void onStart() {
super.onStart();
updateNavigationBarState();
}
// Remove inter-activity transition to avoid screen tossing on tapping bottom navigation items
@Override
public void onPause() {
super.onPause();
overridePendingTransition(0, 0);
}
@Override
public boolean onNavigationItemSelected(@NonNull final MenuItem item) {
if (item.getItemId() != getNavigationMenuItemId()) {
switch (item.getItemId()) {
case R.id.navigation_camera:
startActivity(new Intent(this, CameraActivity.class).addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP));
break;
case R.id.navigation_home:
startActivity(new Intent(this, LFMainActivity.class));
break;
case R.id.navigation_accounts:
startActivity(new Intent(this, AccountActivity.class).addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP));
break;
}
}
return true;
}
private void updateNavigationBarState() {
int actionId = getNavigationMenuItemId();
selectBottomNavigationBarItem(actionId);
}
void selectBottomNavigationBarItem(int itemId) {
Menu menu = navigationView.getMenu();
for (int i = 0, size = menu.size(); i < size; i++) {
MenuItem item = menu.getItem(i);
boolean shouldBeChecked = item.getItemId() == itemId;
if (shouldBeChecked) {
item.setChecked(true);
break;
}
}
}
void setIconColor(int color){
if(Color.red(color) + Color.green(color)+ Color.blue(color) < 300)
colors[0] = Color.WHITE;
else
colors[0] = Color.BLACK;
}
public abstract int getContentViewId();
public abstract int getNavigationMenuItemId();
public void setNavigationBarColor(int color) {
navigationView.setBackgroundColor(color);
setIconColor(color);
}
/**
* Animate bottom navigation bar from GONE to VISIBLE
*/
public void showNavigationBar() {
navigationView.animate()
.translationY(0)
.alpha(1.0f)
.setDuration(400)
.setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationStart(Animator animation) {
super.onAnimationStart(animation);
navigationView.setVisibility(View.VISIBLE);
}
});
}
/**
* Animate bottom navigation bar from VISIBLE to GONE
*/
public void hideNavigationBar() {
navigationView.animate()
.alpha(0.0f)
.translationYBy(navigationView.getHeight())
.setDuration(400)
.setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
super.onAnimationEnd(animation);
navigationView.setVisibility(View.GONE);
}
});
}
}
| 1 | 11,789 | Try using using `.setDismissOnTouch(true)` instead | fossasia-phimpme-android | java |
@@ -122,6 +122,12 @@ namespace Datadog.Trace.ClrProfiler.Integrations
}
catch (Exception ex)
{
+ if (scope != null)
+ {
+ // some fields aren't set till after execution, so populate anything missing
+ UpdateSpan(controllerContext, scope.Span);
+ }
+
scope?.Span.SetException(ex);
throw;
} | 1 | #if !NETSTANDARD2_0
using System;
using System.Collections.Generic;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
using Datadog.Trace.Util;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// Contains instrumentation wrappers for ASP.NET Web API 5.
/// </summary>
public static class AspNetWebApi2Integration
{
private const string IntegrationName = "AspNetWebApi2";
private const string OperationName = "aspnet-webapi.request";
private const string Major5Minor2 = "5.2";
private const string Major5 = "5";
private const string SystemWebHttpAssemblyName = "System.Web.Http";
private const string HttpControllerTypeName = "System.Web.Http.Controllers.IHttpController";
private const string HttpControllerContextTypeName = "System.Web.Http.Controllers.HttpControllerContext";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(AspNetWebApi2Integration));
/// <summary>
/// Calls the underlying ExecuteAsync and traces the request.
/// </summary>
/// <param name="apiController">The Api Controller</param>
/// <param name="controllerContext">The controller context for the call</param>
/// <param name="cancellationTokenSource">The cancellation token source</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>A task with the result</returns>
[InterceptMethod(
TargetAssembly = SystemWebHttpAssemblyName,
TargetType = HttpControllerTypeName,
TargetSignatureTypes = new[] { ClrNames.HttpResponseMessageTask, HttpControllerContextTypeName, ClrNames.CancellationToken },
TargetMinimumVersion = Major5Minor2,
TargetMaximumVersion = Major5)]
public static object ExecuteAsync(
object apiController,
object controllerContext,
object cancellationTokenSource,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (apiController == null) { throw new ArgumentNullException(nameof(apiController)); }
var tokenSource = cancellationTokenSource as CancellationTokenSource;
var cancellationToken = tokenSource?.Token ?? CancellationToken.None;
return ExecuteAsyncInternal(apiController, controllerContext, cancellationToken, opCode, mdToken, moduleVersionPtr);
}
/// <summary>
/// Calls the underlying ExecuteAsync and traces the request.
/// </summary>
/// <param name="apiController">The Api Controller</param>
/// <param name="controllerContext">The controller context for the call</param>
/// <param name="cancellationToken">The cancellation token</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>A task with the result</returns>
private static async Task<HttpResponseMessage> ExecuteAsyncInternal(
object apiController,
object controllerContext,
CancellationToken cancellationToken,
int opCode,
int mdToken,
long moduleVersionPtr)
{
Func<object, object, CancellationToken, Task<HttpResponseMessage>> instrumentedMethod;
try
{
var httpControllerType = apiController.GetInstrumentedInterface(HttpControllerTypeName);
instrumentedMethod = MethodBuilder<Func<object, object, CancellationToken, Task<HttpResponseMessage>>>
.Start(moduleVersionPtr, mdToken, opCode, nameof(ExecuteAsync))
.WithConcreteType(httpControllerType)
.WithParameters(controllerContext, cancellationToken)
.WithNamespaceAndNameFilters(
ClrNames.HttpResponseMessageTask,
HttpControllerContextTypeName,
ClrNames.CancellationToken)
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: HttpControllerTypeName,
methodName: nameof(ExecuteAsync),
instanceType: apiController.GetType().AssemblyQualifiedName);
throw;
}
using (Scope scope = CreateScope(controllerContext))
{
try
{
// call the original method, inspecting (but not catching) any unhandled exceptions
var responseMessage = await instrumentedMethod(apiController, controllerContext, cancellationToken).ConfigureAwait(false);
if (scope != null)
{
// some fields aren't set till after execution, so populate anything missing
UpdateSpan(controllerContext, scope.Span);
}
return responseMessage;
}
catch (Exception ex)
{
scope?.Span.SetException(ex);
throw;
}
}
}
private static Scope CreateScope(object controllerContext)
{
Scope scope = null;
try
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
var tracer = Tracer.Instance;
var request = controllerContext.GetProperty<HttpRequestMessage>("Request").GetValueOrDefault();
SpanContext propagatedContext = null;
if (request != null && tracer.ActiveScope == null)
{
try
{
// extract propagated http headers
var headers = request.Headers.Wrap();
propagatedContext = SpanContextPropagator.Instance.Extract(headers);
}
catch (Exception ex)
{
Log.Error(ex, "Error extracting propagated HTTP headers.");
}
}
scope = tracer.StartActive(OperationName, propagatedContext);
UpdateSpan(controllerContext, scope.Span);
// set analytics sample rate if enabled
var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(IntegrationName, enabledWithGlobalSetting: true);
scope.Span.SetMetric(Tags.Analytics, analyticsSampleRate);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating scope.");
}
return scope;
}
private static void UpdateSpan(dynamic controllerContext, Span span)
{
try
{
var req = controllerContext?.Request as HttpRequestMessage;
string host = req?.Headers?.Host ?? string.Empty;
string rawUrl = req?.RequestUri?.ToString().ToLowerInvariant() ?? string.Empty;
string absoluteUri = req?.RequestUri?.AbsoluteUri?.ToLowerInvariant() ?? string.Empty;
string method = controllerContext?.Request?.Method?.Method?.ToUpperInvariant() ?? "GET";
string route = null;
try
{
route = controllerContext?.RouteData?.Route?.RouteTemplate;
}
catch
{
}
string resourceName = $"{method} {absoluteUri.ToLowerInvariant()}";
if (route != null)
{
resourceName = $"{method} {route.ToLowerInvariant()}";
}
else if (req?.RequestUri != null)
{
var cleanUri = UriHelpers.GetRelativeUrl(req?.RequestUri, tryRemoveIds: true);
resourceName = $"{method} {cleanUri.ToLowerInvariant()}";
}
string controller = string.Empty;
string action = string.Empty;
try
{
if (controllerContext?.RouteData?.Values is IDictionary<string, object> routeValues)
{
controller = (routeValues.GetValueOrDefault("controller") as string)?.ToLowerInvariant();
action = (routeValues.GetValueOrDefault("action") as string)?.ToLowerInvariant();
}
}
catch
{
}
// Fail safe to catch templates in routing values
resourceName =
resourceName
.Replace("{controller}", controller)
.Replace("{action}", action);
span.DecorateWebServerSpan(
resourceName: resourceName,
method: method,
host: host,
httpUrl: rawUrl);
span.SetTag(Tags.AspNetAction, action);
span.SetTag(Tags.AspNetController, controller);
span.SetTag(Tags.AspNetRoute, route);
}
catch (Exception ex)
{
Log.Error(ex, "Error populating scope data.");
}
}
}
}
#endif
| 1 | 16,376 | Zach, is `UpdateSpan` of concern too? I see that it is called from `ExecuteAsyncInternal`. | DataDog-dd-trace-dotnet | .cs |
@@ -26,7 +26,9 @@ import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.orc.TypeDescription;
-public class TypeConversion {
+public final class TypeConversion {
+
+ private TypeConversion() {}
/**
* Convert a given Iceberg schema to ORC. | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.orc;
import java.util.ArrayList;
import java.util.List;
import org.apache.iceberg.Schema;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.orc.TypeDescription;
public class TypeConversion {
/**
* Convert a given Iceberg schema to ORC.
* @param schema the Iceberg schema to convert
* @param columnIds an output with the column ids
* @return the ORC schema
*/
public static TypeDescription toOrc(Schema schema,
ColumnIdMap columnIds) {
return toOrc(null, schema.asStruct(), columnIds);
}
static TypeDescription toOrc(Integer fieldId,
Type type,
ColumnIdMap columnIds) {
TypeDescription result;
switch (type.typeId()) {
case BOOLEAN:
result = TypeDescription.createBoolean();
break;
case INTEGER:
result = TypeDescription.createInt();
break;
case LONG:
result = TypeDescription.createLong();
break;
case FLOAT:
result = TypeDescription.createFloat();
break;
case DOUBLE:
result = TypeDescription.createDouble();
break;
case DATE:
result = TypeDescription.createDate();
break;
case TIME:
result = TypeDescription.createInt();
break;
case TIMESTAMP:
result = TypeDescription.createTimestamp();
break;
case STRING:
result = TypeDescription.createString();
break;
case UUID:
result = TypeDescription.createBinary();
break;
case FIXED:
result = TypeDescription.createBinary();
break;
case BINARY:
result = TypeDescription.createBinary();
break;
case DECIMAL: {
Types.DecimalType decimal = (Types.DecimalType) type;
result = TypeDescription.createDecimal()
.withScale(decimal.scale())
.withPrecision(decimal.precision());
break;
}
case STRUCT: {
result = TypeDescription.createStruct();
for (Types.NestedField field : type.asStructType().fields()) {
result.addField(field.name(), toOrc(field.fieldId(), field.type(), columnIds));
}
break;
}
case LIST: {
Types.ListType list = (Types.ListType) type;
result = TypeDescription.createList(toOrc(list.elementId(), list.elementType(),
columnIds));
break;
}
case MAP: {
Types.MapType map = (Types.MapType) type;
TypeDescription key = toOrc(map.keyId(), map.keyType(), columnIds);
result = TypeDescription.createMap(key,
toOrc(map.valueId(), map.valueType(), columnIds));
break;
}
default:
throw new IllegalArgumentException("Unhandled type " + type.typeId());
}
if (fieldId != null) {
columnIds.put(result, fieldId);
}
return result;
}
/**
* Convert an ORC schema to an Iceberg schema.
* @param schema the ORC schema
* @param columnIds the column ids
* @return the Iceberg schema
*/
public Schema fromOrc(TypeDescription schema, ColumnIdMap columnIds) {
return new Schema(convertOrcToType(schema, columnIds).asStructType().fields());
}
Type convertOrcToType(TypeDescription schema, ColumnIdMap columnIds) {
switch (schema.getCategory()) {
case BOOLEAN:
return Types.BooleanType.get();
case BYTE:
case SHORT:
case INT:
return Types.IntegerType.get();
case LONG:
return Types.LongType.get();
case FLOAT:
return Types.FloatType.get();
case DOUBLE:
return Types.DoubleType.get();
case STRING:
case CHAR:
case VARCHAR:
return Types.StringType.get();
case BINARY:
return Types.BinaryType.get();
case DATE:
return Types.DateType.get();
case TIMESTAMP:
return Types.TimestampType.withoutZone();
case DECIMAL:
return Types.DecimalType.of(schema.getPrecision(), schema.getScale());
case STRUCT: {
List<String> fieldNames = schema.getFieldNames();
List<TypeDescription> fieldTypes = schema.getChildren();
List<Types.NestedField> fields = new ArrayList<>(fieldNames.size());
for (int c = 0; c < fieldNames.size(); ++c) {
String name = fieldNames.get(c);
TypeDescription type = fieldTypes.get(c);
fields.add(Types.NestedField.optional(columnIds.get(type), name,
convertOrcToType(type, columnIds)));
}
return Types.StructType.of(fields);
}
case LIST: {
TypeDescription child = schema.getChildren().get(0);
return Types.ListType.ofOptional(columnIds.get(child),
convertOrcToType(child, columnIds));
}
case MAP: {
TypeDescription key = schema.getChildren().get(0);
TypeDescription value = schema.getChildren().get(1);
return Types.MapType.ofOptional(columnIds.get(key), columnIds.get(value),
convertOrcToType(key, columnIds), convertOrcToType(value, columnIds));
}
default:
// We don't have an answer for union types.
throw new IllegalArgumentException("Can't handle " + schema);
}
}
}
| 1 | 13,947 | Nit: its scope can be reduced to package default | apache-iceberg | java |
@@ -15,5 +15,7 @@ namespace Microsoft.AspNetCore.Protocols.Features
PipeScheduler InputWriterScheduler { get; }
PipeScheduler OutputReaderScheduler { get; }
+
+ PipeScheduler ApplicationScheduler { get; set; }
}
} | 1 | using System.Buffers;
using System.IO.Pipelines;
using System.Threading;
namespace Microsoft.AspNetCore.Protocols.Features
{
public interface IConnectionTransportFeature
{
MemoryPool MemoryPool { get; }
IDuplexPipe Transport { get; set; }
IDuplexPipe Application { get; set; }
PipeScheduler InputWriterScheduler { get; }
PipeScheduler OutputReaderScheduler { get; }
}
}
| 1 | 14,793 | Why is this get;set;? | aspnet-KestrelHttpServer | .cs |
@@ -77,6 +77,9 @@ bool StatefulReader::matched_writer_add(RemoteWriterAttributes& wdata)
getRTPSParticipant()->createSenderResources(wdata.endpoint.remoteLocatorList, false);
+
+ wdata.endpoint.unicastLocatorList =
+ mp_RTPSParticipant->network_factory().ShrinkLocatorLists({wdata.endpoint.unicastLocatorList});
WriterProxy* wp = new WriterProxy(wdata, this);
wp->mp_initialAcknack->restart_timer(); | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file StatefulReader.cpp
*
*/
#include <fastrtps/rtps/reader/StatefulReader.h>
#include <fastrtps/rtps/reader/WriterProxy.h>
#include <fastrtps/rtps/reader/ReaderListener.h>
#include <fastrtps/rtps/history/ReaderHistory.h>
#include <fastrtps/rtps/reader/timedevent/HeartbeatResponseDelay.h>
#include <fastrtps/rtps/reader/timedevent/InitialAckNack.h>
#include <fastrtps/log/Log.h>
#include <fastrtps/rtps/messages/RTPSMessageCreator.h>
#include "../participant/RTPSParticipantImpl.h"
#include "FragmentedChangePitStop.h"
#include <fastrtps/utils/TimeConversion.h>
#include <mutex>
#include <thread>
#include <cassert>
#define IDSTRING "(ID:"<< std::this_thread::get_id() <<") "<<
using namespace eprosima::fastrtps::rtps;
StatefulReader::~StatefulReader()
{
logInfo(RTPS_READER,"StatefulReader destructor.";);
for(std::vector<WriterProxy*>::iterator it = matched_writers.begin();
it!=matched_writers.end();++it)
{
delete(*it);
}
}
StatefulReader::StatefulReader(RTPSParticipantImpl* pimpl,GUID_t& guid,
ReaderAttributes& att,ReaderHistory* hist,ReaderListener* listen):
RTPSReader(pimpl,guid,att,hist, listen),
m_acknackCount(0),
m_nackfragCount(0),
m_times(att.times)
{
}
bool StatefulReader::matched_writer_add(RemoteWriterAttributes& wdata)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for(std::vector<WriterProxy*>::iterator it=matched_writers.begin();
it!=matched_writers.end();++it)
{
if((*it)->m_att.guid == wdata.guid)
{
logInfo(RTPS_READER,"Attempting to add existing writer");
return false;
}
}
getRTPSParticipant()->createSenderResources(wdata.endpoint.remoteLocatorList, false);
WriterProxy* wp = new WriterProxy(wdata, this);
wp->mp_initialAcknack->restart_timer();
add_persistence_guid(wdata);
wp->loaded_from_storage_nts(get_last_notified(wdata.guid));
matched_writers.push_back(wp);
logInfo(RTPS_READER,"Writer Proxy " <<wp->m_att.guid <<" added to " <<m_guid.entityId);
return true;
}
bool StatefulReader::matched_writer_remove(const RemoteWriterAttributes& wdata)
{
WriterProxy *wproxy = nullptr;
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
//Remove cachechanges belonging to the unmatched writer
mp_history->remove_changes_with_guid(wdata.guid);
for(std::vector<WriterProxy*>::iterator it=matched_writers.begin();it!=matched_writers.end();++it)
{
if((*it)->m_att.guid == wdata.guid)
{
logInfo(RTPS_READER,"Writer Proxy removed: " <<(*it)->m_att.guid);
wproxy = *it;
matched_writers.erase(it);
remove_persistence_guid(wdata);
break;
}
}
lock.unlock();
if(wproxy != nullptr)
{
delete wproxy;
return true;
}
logInfo(RTPS_READER,"Writer Proxy " << wdata.guid << " doesn't exist in reader "<<this->getGuid().entityId);
return false;
}
bool StatefulReader::matched_writer_remove(const RemoteWriterAttributes& wdata, bool deleteWP)
{
WriterProxy *wproxy = nullptr;
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
//Remove cachechanges belonging to the unmatched writer
mp_history->remove_changes_with_guid(wdata.guid);
for(std::vector<WriterProxy*>::iterator it=matched_writers.begin();it!=matched_writers.end();++it)
{
if((*it)->m_att.guid == wdata.guid)
{
logInfo(RTPS_READER,"Writer Proxy removed: " <<(*it)->m_att.guid);
wproxy = *it;
matched_writers.erase(it);
remove_persistence_guid(wdata);
break;
}
}
lock.unlock();
if(wproxy != nullptr && deleteWP)
{
delete(wproxy);
return true;
}
logInfo(RTPS_READER,"Writer Proxy " << wdata.guid << " doesn't exist in reader "<<this->getGuid().entityId);
return false;
}
bool StatefulReader::matched_writer_is_matched(const RemoteWriterAttributes& wdata)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
for(std::vector<WriterProxy*>::iterator it=matched_writers.begin();it!=matched_writers.end();++it)
{
if((*it)->m_att.guid == wdata.guid)
{
return true;
}
}
return false;
}
bool StatefulReader::matched_writer_lookup(const GUID_t& writerGUID, WriterProxy** WP)
{
assert(WP);
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
bool returnedValue = findWriterProxy(writerGUID, WP);
if(returnedValue)
{
logInfo(RTPS_READER,this->getGuid().entityId<<" FINDS writerProxy "<< writerGUID<<" from "<< matched_writers.size());
}
else
{
logInfo(RTPS_READER,this->getGuid().entityId<<" NOT FINDS writerProxy "<< writerGUID<<" from "<< matched_writers.size());
}
return returnedValue;
}
bool StatefulReader::findWriterProxy(const GUID_t& writerGUID, WriterProxy** WP)
{
assert(WP);
for(std::vector<WriterProxy*>::iterator it = matched_writers.begin(); it != matched_writers.end(); ++it)
{
if((*it)->m_att.guid == writerGUID)
{
*WP = *it;
return true;
}
}
return false;
}
bool StatefulReader::processDataMsg(CacheChange_t *change)
{
WriterProxy *pWP = nullptr;
assert(change);
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
if(acceptMsgFrom(change->writerGUID, &pWP))
{
// Check if CacheChange was received.
if(!pWP->change_was_received(change->sequenceNumber))
{
logInfo(RTPS_MSG_IN,IDSTRING"Trying to add change " << change->sequenceNumber <<" TO reader: "<< getGuid().entityId);
CacheChange_t* change_to_add;
if(reserveCache(&change_to_add, change->serializedPayload.length)) //Reserve a new cache from the corresponding cache pool
{
#if HAVE_SECURITY
if(getAttributes().security_attributes().is_payload_protected)
{
change_to_add->copy_not_memcpy(change);
if(!getRTPSParticipant()->security_manager().decode_serialized_payload(change->serializedPayload,
change_to_add->serializedPayload, m_guid, change->writerGUID))
{
releaseCache(change_to_add);
logWarning(RTPS_MSG_IN, "Cannont decode serialized payload");
return false;
}
}
else
{
#endif
if (!change_to_add->copy(change))
{
logWarning(RTPS_MSG_IN,IDSTRING"Problem copying CacheChange, received data is: " << change->serializedPayload.length
<< " bytes and max size in reader " << getGuid().entityId << " is " << change_to_add->serializedPayload.max_size);
releaseCache(change_to_add);
return false;
}
#if HAVE_SECURITY
}
#endif
}
else
{
logError(RTPS_MSG_IN,IDSTRING"Problem reserving CacheChange in reader: " << getGuid().entityId);
return false;
}
// Assertion has to be done before call change_received,
// because this function can unlock the StatefulReader mutex.
if(pWP != nullptr)
{
pWP->assertLiveliness(); //Asser liveliness since you have received a DATA MESSAGE.
}
if(!change_received(change_to_add, pWP))
{
logInfo(RTPS_MSG_IN,IDSTRING"MessageReceiver not add change "<<change_to_add->sequenceNumber);
releaseCache(change_to_add);
if(pWP == nullptr && getGuid().entityId == c_EntityId_SPDPReader)
{
mp_RTPSParticipant->assertRemoteRTPSParticipantLiveliness(change->writerGUID.guidPrefix);
}
}
}
}
return true;
}
bool StatefulReader::processDataFragMsg(CacheChange_t *incomingChange, uint32_t sampleSize, uint32_t fragmentStartingNum)
{
WriterProxy *pWP = nullptr;
assert(incomingChange);
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
if(acceptMsgFrom(incomingChange->writerGUID, &pWP))
{
// Check if CacheChange was received.
if(!pWP->change_was_received(incomingChange->sequenceNumber))
{
logInfo(RTPS_MSG_IN, IDSTRING"Trying to add fragment " << incomingChange->sequenceNumber.to64long() << " TO reader: " << getGuid().entityId);
CacheChange_t* change_to_add = incomingChange;
#if HAVE_SECURITY
if(getAttributes().security_attributes().is_payload_protected)
{
if(reserveCache(&change_to_add, incomingChange->serializedPayload.length)) //Reserve a new cache from the corresponding cache pool
{
change_to_add->copy_not_memcpy(incomingChange);
if(!getRTPSParticipant()->security_manager().decode_serialized_payload(incomingChange->serializedPayload,
change_to_add->serializedPayload, m_guid, incomingChange->writerGUID))
{
releaseCache(change_to_add);
logWarning(RTPS_MSG_IN, "Cannont decode serialized payload");
return false;
}
}
}
#endif
// Fragments manager has to process incomming fragments.
// If CacheChange_t is completed, it will be returned;
CacheChange_t* change_completed = fragmentedChangePitStop_->process(change_to_add, sampleSize, fragmentStartingNum);
#if HAVE_SECURITY
if(getAttributes().security_attributes().is_payload_protected)
releaseCache(change_to_add);
#endif
// Assertion has to be done before call change_received,
// because this function can unlock the StatefulReader mutex.
if(pWP != nullptr)
{
pWP->assertLiveliness(); //Asser liveliness since you have received a DATA MESSAGE.
}
if(change_completed != nullptr)
{
if(!change_received(change_completed, pWP))
{
logInfo(RTPS_MSG_IN, IDSTRING"MessageReceiver not add change " << change_completed->sequenceNumber.to64long());
// Assert liveliness because it is a participant discovery info.
if(pWP == nullptr && getGuid().entityId == c_EntityId_SPDPReader)
{
mp_RTPSParticipant->assertRemoteRTPSParticipantLiveliness(incomingChange->writerGUID.guidPrefix);
}
releaseCache(change_completed);
}
}
}
}
return true;
}
bool StatefulReader::processHeartbeatMsg(GUID_t &writerGUID, uint32_t hbCount, SequenceNumber_t &firstSN,
SequenceNumber_t &lastSN, bool finalFlag, bool livelinessFlag)
{
WriterProxy *pWP = nullptr;
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
if(acceptMsgFrom(writerGUID, &pWP))
{
std::unique_lock<std::recursive_mutex> wpLock(*pWP->getMutex());
if(pWP->m_lastHeartbeatCount < hbCount)
{
pWP->m_lastHeartbeatCount = hbCount;
pWP->lost_changes_update(firstSN);
fragmentedChangePitStop_->try_to_remove_until(firstSN, pWP->m_att.guid);
pWP->missing_changes_update(lastSN);
pWP->m_heartbeatFinalFlag = finalFlag;
//Analyze wheter a acknack message is needed:
if(!finalFlag)
{
pWP->mp_heartbeatResponse->restart_timer();
}
else if(finalFlag && !livelinessFlag)
{
if(pWP->areThereMissing())
pWP->mp_heartbeatResponse->restart_timer();
}
//FIXME: livelinessFlag
if(livelinessFlag )//TODOG && WP->m_att->m_qos.m_liveliness.kind == MANUAL_BY_TOPIC_LIVELINESS_QOS)
{
pWP->assertLiveliness();
}
GUID_t proxGUID = pWP->m_att.guid;
wpLock.unlock();
// Maybe now we have to notify user from new CacheChanges.
SequenceNumber_t last_notified = update_last_notified(proxGUID, pWP->available_changes_max());
SequenceNumber_t nextChangeToNotify = pWP->nextCacheChangeToBeNotified();
while(nextChangeToNotify != SequenceNumber_t::unknown())
{
if( (getListener()!=nullptr) && (nextChangeToNotify > last_notified) )
{
mp_history->postSemaphore();
CacheChange_t* ch_to_give = nullptr;
if(mp_history->get_change(nextChangeToNotify, proxGUID, &ch_to_give))
{
if(!ch_to_give->isRead)
{
getListener()->onNewCacheChangeAdded((RTPSReader*)this,ch_to_give);
}
}
// Search again the WriterProxy because could be removed after the unlock.
if(!findWriterProxy(proxGUID, &pWP))
break;
}
nextChangeToNotify = pWP->nextCacheChangeToBeNotified();
}
}
}
return true;
}
bool StatefulReader::processGapMsg(GUID_t &writerGUID, SequenceNumber_t &gapStart, SequenceNumberSet_t &gapList)
{
WriterProxy *pWP = nullptr;
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
if(acceptMsgFrom(writerGUID, &pWP))
{
std::lock_guard<std::recursive_mutex> guardWriterProxy(*pWP->getMutex());
SequenceNumber_t auxSN;
SequenceNumber_t finalSN = gapList.base -1;
for(auxSN = gapStart; auxSN<=finalSN;auxSN++)
{
if(pWP->irrelevant_change_set(auxSN))
fragmentedChangePitStop_->try_to_remove(auxSN, pWP->m_att.guid);
}
for(auto it = gapList.get_begin(); it != gapList.get_end();++it)
{
if(pWP->irrelevant_change_set((*it)))
fragmentedChangePitStop_->try_to_remove((*it), pWP->m_att.guid);
}
}
return true;
}
bool StatefulReader::acceptMsgFrom(GUID_t &writerId, WriterProxy **wp)
{
assert(wp != nullptr);
for(std::vector<WriterProxy*>::iterator it = this->matched_writers.begin();
it!=matched_writers.end();++it)
{
if((*it)->m_att.guid == writerId)
{
*wp = *it;
return true;
}
}
return false;
}
bool StatefulReader::change_removed_by_history(CacheChange_t* a_change, WriterProxy* wp)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
if(wp != nullptr || matched_writer_lookup(a_change->writerGUID,&wp))
{
wp->setNotValid(a_change->sequenceNumber);
return true;
}
else
{
logError(RTPS_READER," You should always find the WP associated with a change, something is very wrong");
}
return false;
}
bool StatefulReader::change_received(CacheChange_t* a_change, WriterProxy* prox)
{
//First look for WriterProxy in case is not provided
if(prox == nullptr)
{
if(!findWriterProxy(a_change->writerGUID, &prox))
{
logInfo(RTPS_READER, "Writer Proxy " << a_change->writerGUID <<" not matched to this Reader "<< m_guid.entityId);
return false;
}
}
std::unique_lock<std::recursive_mutex> writerProxyLock(*prox->getMutex());
size_t unknown_missing_changes_up_to = prox->unknown_missing_changes_up_to(a_change->sequenceNumber);
if(this->mp_history->received_change(a_change, unknown_missing_changes_up_to))
{
bool ret = prox->received_change_set(a_change->sequenceNumber);
GUID_t proxGUID = prox->m_att.guid;
// If KEEP_LAST and history full, make older changes as lost.
CacheChange_t* aux_change = nullptr;
if(this->mp_history->isFull() && mp_history->get_min_change_from(&aux_change, proxGUID))
{
prox->lost_changes_update(aux_change->sequenceNumber);
fragmentedChangePitStop_->try_to_remove_until(aux_change->sequenceNumber, proxGUID);
}
writerProxyLock.unlock();
SequenceNumber_t last_notified = update_last_notified(proxGUID, prox->available_changes_max());
SequenceNumber_t nextChangeToNotify = prox->nextCacheChangeToBeNotified();
while(nextChangeToNotify != SequenceNumber_t::unknown())
{
if (nextChangeToNotify > last_notified)
{
mp_history->postSemaphore();
if (getListener() != nullptr)
{
CacheChange_t* ch_to_give = nullptr;
if (mp_history->get_change(nextChangeToNotify, proxGUID, &ch_to_give))
{
if (!ch_to_give->isRead)
{
getListener()->onNewCacheChangeAdded((RTPSReader*)this, ch_to_give);
}
}
// Search again the WriterProxy because could be removed after the unlock.
if (!findWriterProxy(proxGUID, &prox))
break;
}
}
nextChangeToNotify = prox->nextCacheChangeToBeNotified();
}
return ret;
}
return false;
}
bool StatefulReader::nextUntakenCache(CacheChange_t** change,WriterProxy** wpout)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
std::vector<CacheChange_t*> toremove;
bool takeok = false;
for(std::vector<CacheChange_t*>::iterator it = mp_history->changesBegin();
it!=mp_history->changesEnd();++it)
{
WriterProxy* wp;
if(this->matched_writer_lookup((*it)->writerGUID, &wp))
{
// TODO Revisar la comprobacion
SequenceNumber_t seq = wp->available_changes_max();
if(seq >= (*it)->sequenceNumber)
{
*change = *it;
if(wpout !=nullptr)
*wpout = wp;
takeok = true;
break;
// if((*it)->kind == ALIVE)
// {
// this->mp_type->deserialize(&(*it)->serializedPayload,data);
// }
// (*it)->isRead = true;
// if(info!=NULL)
// {
// info->sampleKind = (*it)->kind;
// info->writerGUID = (*it)->writerGUID;
// info->sourceTimestamp = (*it)->sourceTimestamp;
// info->iHandle = (*it)->instanceHandle;
// if(this->m_qos.m_ownership.kind == EXCLUSIVE_OWNERSHIP_QOS)
// info->ownershipStrength = wp->m_data->m_qos.m_ownershipStrength.value;
// }
// m_reader_cache.decreaseUnreadCount();
// logInfo(RTPS_READER,this->getGuid().entityId<<": reading change "<< (*it)->sequenceNumber.to64long());
// readok = true;
// break;
}
}
else
{
toremove.push_back((*it));
}
}
for(std::vector<CacheChange_t*>::iterator it = toremove.begin();
it!=toremove.end();++it)
{
logWarning(RTPS_READER,"Removing change "<<(*it)->sequenceNumber << " from " << (*it)->writerGUID << " because is no longer paired");
mp_history->remove_change(*it);
}
return takeok;
}
// TODO Porque elimina aqui y no cuando hay unpairing
bool StatefulReader::nextUnreadCache(CacheChange_t** change,WriterProxy** wpout)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
std::vector<CacheChange_t*> toremove;
bool readok = false;
for(std::vector<CacheChange_t*>::iterator it = mp_history->changesBegin();
it!=mp_history->changesEnd();++it)
{
if((*it)->isRead)
continue;
WriterProxy* wp;
if(this->matched_writer_lookup((*it)->writerGUID,&wp))
{
SequenceNumber_t seq;
seq = wp->available_changes_max();
if(seq >= (*it)->sequenceNumber)
{
*change = *it;
if(wpout !=nullptr)
*wpout = wp;
readok = true;
break;
// if((*it)->kind == ALIVE)
// {
// this->mp_type->deserialize(&(*it)->serializedPayload,data);
// }
// (*it)->isRead = true;
// if(info!=NULL)
// {
// info->sampleKind = (*it)->kind;
// info->writerGUID = (*it)->writerGUID;
// info->sourceTimestamp = (*it)->sourceTimestamp;
// info->iHandle = (*it)->instanceHandle;
// if(this->m_qos.m_ownership.kind == EXCLUSIVE_OWNERSHIP_QOS)
// info->ownershipStrength = wp->m_data->m_qos.m_ownershipStrength.value;
// }
// m_reader_cache.decreaseUnreadCount();
// logInfo(RTPS_READER,this->getGuid().entityId<<": reading change "<< (*it)->sequenceNumber.to64long());
// readok = true;
// break;
}
}
else
{
toremove.push_back((*it));
}
}
for(std::vector<CacheChange_t*>::iterator it = toremove.begin();
it!=toremove.end();++it)
{
logWarning(RTPS_READER,"Removing change "<<(*it)->sequenceNumber << " from " << (*it)->writerGUID << " because is no longer paired");
mp_history->remove_change(*it);
}
return readok;
}
bool StatefulReader::updateTimes(ReaderTimes& ti)
{
std::lock_guard<std::recursive_mutex> guard(*mp_mutex);
if(m_times.heartbeatResponseDelay != ti.heartbeatResponseDelay)
{
m_times = ti;
for(std::vector<WriterProxy*>::iterator wit = this->matched_writers.begin();
wit!=this->matched_writers.end();++wit)
{
(*wit)->mp_heartbeatResponse->update_interval(m_times.heartbeatResponseDelay);
}
}
return true;
}
bool StatefulReader::isInCleanState() const
{
bool cleanState = true;
std::unique_lock<std::recursive_mutex> lock(*mp_mutex);
for (WriterProxy* wp : matched_writers)
{
if (wp->numberOfChangeFromWriter() != 0)
{
cleanState = false;
break;
}
}
return cleanState;
}
| 1 | 13,055 | Should we also create sender resources for unicast locators? Should we make a similar change on StatelessReader? | eProsima-Fast-DDS | cpp |
@@ -130,7 +130,7 @@ namespace Datadog.Trace.RuntimeMetrics
// Catching error UnauthorizedAccessException: Access to the registry key 'Global' is denied.
// The 'Global' part seems consistent across localizations
- Log.Error(ex, "The process does not have sufficient permissions to read performance counters. Please refer to https://docs.datadoghq.com/tracing/runtime_metrics/dotnet/#additional-permissions-for-iis to learn how to grant those permissions.");
+ Log.Error(ex, "The process does not have sufficient permissions to read performance counters. Please refer to https://dtdg.co/net-runtime-metrics to learn how to grant those permissions.");
throw;
}
catch (Exception ex) | 1 | // <copyright file="PerformanceCountersListener.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
#if NETFRAMEWORK
using System;
using System.Diagnostics;
using System.Linq;
using System.Threading.Tasks;
using Datadog.Trace.Logging;
using Datadog.Trace.Util;
using Datadog.Trace.Vendors.StatsdClient;
namespace Datadog.Trace.RuntimeMetrics
{
internal class PerformanceCountersListener : IRuntimeMetricsListener
{
private const string MemoryCategoryName = ".NET CLR Memory";
private const string ThreadingCategoryName = ".NET CLR LocksAndThreads";
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<PerformanceCountersListener>();
private readonly IDogStatsd _statsd;
private readonly string _processName;
private readonly int _processId;
private string _instanceName;
private PerformanceCounterCategory _memoryCategory;
private bool _fullInstanceName;
private PerformanceCounterWrapper _gen0Size;
private PerformanceCounterWrapper _gen1Size;
private PerformanceCounterWrapper _gen2Size;
private PerformanceCounterWrapper _lohSize;
private PerformanceCounterWrapper _contentionCount;
private int? _previousGen0Count;
private int? _previousGen1Count;
private int? _previousGen2Count;
private double? _lastContentionCount;
private Task _initializationTask;
public PerformanceCountersListener(IDogStatsd statsd)
{
_statsd = statsd;
ProcessHelpers.GetCurrentProcessInformation(out _processName, out _, out _processId);
// To prevent a potential deadlock when hosted in a service, performance counter initialization must be asynchronous
// That's because performance counters may rely on wmiApSrv being started,
// and the windows service manager only allows one service at a time to be starting: https://docs.microsoft.com/en-us/windows/win32/services/service-startup
_initializationTask = Task.Run(InitializePerformanceCounters);
}
public Task WaitForInitialization() => _initializationTask;
public void Dispose()
{
_gen0Size?.Dispose();
_gen1Size?.Dispose();
_gen2Size?.Dispose();
_lohSize?.Dispose();
_contentionCount?.Dispose();
}
public void Refresh()
{
if (_initializationTask.Status != TaskStatus.RanToCompletion)
{
return;
}
if (!_fullInstanceName)
{
_instanceName = GetSimpleInstanceName();
}
TryUpdateGauge(MetricsNames.Gen0HeapSize, _gen0Size);
TryUpdateGauge(MetricsNames.Gen1HeapSize, _gen1Size);
TryUpdateGauge(MetricsNames.Gen2HeapSize, _gen2Size);
TryUpdateGauge(MetricsNames.LohSize, _lohSize);
TryUpdateCounter(MetricsNames.ContentionCount, _contentionCount, ref _lastContentionCount);
var gen0 = GC.CollectionCount(0);
var gen1 = GC.CollectionCount(1);
var gen2 = GC.CollectionCount(2);
if (_previousGen0Count != null)
{
_statsd.Increment(MetricsNames.Gen0CollectionsCount, gen0 - _previousGen0Count.Value);
}
if (_previousGen1Count != null)
{
_statsd.Increment(MetricsNames.Gen1CollectionsCount, gen1 - _previousGen1Count.Value);
}
if (_previousGen2Count != null)
{
_statsd.Increment(MetricsNames.Gen2CollectionsCount, gen2 - _previousGen2Count.Value);
}
_previousGen0Count = gen0;
_previousGen1Count = gen1;
_previousGen2Count = gen2;
}
protected virtual void InitializePerformanceCounters()
{
try
{
_memoryCategory = new PerformanceCounterCategory(MemoryCategoryName);
var instanceName = GetInstanceName();
_fullInstanceName = instanceName.Item2;
_instanceName = instanceName.Item1;
_gen0Size = new PerformanceCounterWrapper(MemoryCategoryName, "Gen 0 heap size", _instanceName);
_gen1Size = new PerformanceCounterWrapper(MemoryCategoryName, "Gen 1 heap size", _instanceName);
_gen2Size = new PerformanceCounterWrapper(MemoryCategoryName, "Gen 2 heap size", _instanceName);
_lohSize = new PerformanceCounterWrapper(MemoryCategoryName, "Large Object Heap size", _instanceName);
_contentionCount = new PerformanceCounterWrapper(ThreadingCategoryName, "Total # of Contentions", _instanceName);
}
catch (UnauthorizedAccessException ex) when (ex.Message.Contains("'Global'"))
{
// Catching error UnauthorizedAccessException: Access to the registry key 'Global' is denied.
// The 'Global' part seems consistent across localizations
Log.Error(ex, "The process does not have sufficient permissions to read performance counters. Please refer to https://docs.datadoghq.com/tracing/runtime_metrics/dotnet/#additional-permissions-for-iis to learn how to grant those permissions.");
throw;
}
catch (Exception ex)
{
Log.Error(ex, "An error occured while initializing the performance counters");
throw;
}
}
private void TryUpdateGauge(string path, PerformanceCounterWrapper counter)
{
var value = counter.GetValue(_instanceName);
if (value != null)
{
_statsd.Gauge(path, value.Value);
}
}
private void TryUpdateCounter(string path, PerformanceCounterWrapper counter, ref double? lastValue)
{
var value = counter.GetValue(_instanceName);
if (value == null)
{
return;
}
if (lastValue == null)
{
lastValue = value;
return;
}
_statsd.Counter(path, value.Value - lastValue.Value);
lastValue = value;
}
private Tuple<string, bool> GetInstanceName()
{
var instanceNames = _memoryCategory.GetInstanceNames().Where(n => n.StartsWith(_processName)).ToArray();
var fullName = instanceNames.FirstOrDefault(n => n.StartsWith($"{_processName}_p{_processId}_r"));
if (fullName != null)
{
return Tuple.Create(fullName, true);
}
if (instanceNames.Length == 1)
{
return Tuple.Create(instanceNames[0], false);
}
return Tuple.Create(GetSimpleInstanceName(), false);
}
private string GetSimpleInstanceName()
{
var instanceNames = _memoryCategory.GetInstanceNames().Where(n => n.StartsWith(_processName)).ToArray();
if (instanceNames.Length == 1)
{
return instanceNames[0];
}
foreach (var name in instanceNames)
{
int instancePid;
using (var counter = new PerformanceCounter(MemoryCategoryName, "Process ID", name, true))
{
instancePid = (int)counter.NextValue();
}
if (instancePid == _processId)
{
return name;
}
}
return null;
}
}
}
#endif
| 1 | 21,670 | Can we use `dotnet` instead of `net`? | DataDog-dd-trace-dotnet | .cs |
@@ -372,6 +372,13 @@ namespace Datadog.Trace
return StartSpan(operationName, tags: null, parent, serviceName, startTime, ignoreActiveScope, spanId: null);
}
+ /// <summary>
+ /// Forces the tracer to immediately flush pending traces and send them to the agent.
+ /// To be called when the appdomain or the process is about to be killed in a non-graceful way.
+ /// </summary>
+ /// <returns>Task used to track the async flush operation</returns>
+ public Task ForceFlush() => FlushAsync();
+
/// <summary>
/// Writes the specified <see cref="Span"/> collection to the agent writer.
/// </summary> | 1 | // <copyright file="Tracer.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.Agent;
using Datadog.Trace.Configuration;
using Datadog.Trace.DiagnosticListeners;
using Datadog.Trace.DogStatsd;
using Datadog.Trace.Logging;
using Datadog.Trace.PlatformHelpers;
using Datadog.Trace.RuntimeMetrics;
using Datadog.Trace.Sampling;
using Datadog.Trace.Tagging;
using Datadog.Trace.Util;
using Datadog.Trace.Vendors.Newtonsoft.Json;
using Datadog.Trace.Vendors.StatsdClient;
namespace Datadog.Trace
{
/// <summary>
/// The tracer is responsible for creating spans and flushing them to the Datadog agent
/// </summary>
public class Tracer : IDatadogTracer
{
private const string UnknownServiceName = "UnknownService";
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<Tracer>();
private static string _runtimeId;
/// <summary>
/// The number of Tracer instances that have been created and not yet destroyed.
/// This is used in the heartbeat metrics to estimate the number of
/// "live" Tracers that could potentially be sending traces to the Agent.
/// </summary>
private static int _liveTracerCount;
/// <summary>
/// Indicates whether we're initializing a tracer for the first time
/// </summary>
private static int _firstInitialization = 1;
private static Tracer _instance;
private static bool _globalInstanceInitialized;
private static object _globalInstanceLock = new object();
private static RuntimeMetricsWriter _runtimeMetricsWriter;
private readonly IScopeManager _scopeManager;
private readonly Timer _heartbeatTimer;
private readonly IAgentWriter _agentWriter;
private string _agentVersion;
static Tracer()
{
TracingProcessManager.Initialize();
}
/// <summary>
/// Initializes a new instance of the <see cref="Tracer"/> class with default settings.
/// </summary>
public Tracer()
: this(settings: null, agentWriter: null, sampler: null, scopeManager: null, statsd: null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Tracer"/>
/// class using the specified <see cref="IConfigurationSource"/>.
/// </summary>
/// <param name="settings">
/// A <see cref="TracerSettings"/> instance with the desired settings,
/// or null to use the default configuration sources.
/// </param>
public Tracer(TracerSettings settings)
: this(settings, agentWriter: null, sampler: null, scopeManager: null, statsd: null)
{
}
internal Tracer(TracerSettings settings, IAgentWriter agentWriter, ISampler sampler, IScopeManager scopeManager, IDogStatsd statsd)
{
// update the count of Tracer instances
Interlocked.Increment(ref _liveTracerCount);
Settings = settings ?? TracerSettings.FromDefaultSources();
Settings.Freeze();
// if not configured, try to determine an appropriate service name
DefaultServiceName = Settings.ServiceName ??
GetApplicationName() ??
UnknownServiceName;
// only set DogStatsdClient if tracer metrics are enabled
if (Settings.TracerMetricsEnabled)
{
Statsd = statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName, Settings.DogStatsdPort);
}
if (agentWriter == null)
{
_agentWriter = new AgentWriter(new Api(Settings.AgentUri, TransportStrategy.Get(Settings), Statsd), Statsd, maxBufferSize: Settings.TraceBufferSize);
}
else
{
_agentWriter = agentWriter;
}
_scopeManager = scopeManager ?? new AsyncLocalScopeManager();
Sampler = sampler ?? new RuleBasedSampler(new RateLimiter(Settings.MaxTracesSubmittedPerSecond));
if (!string.IsNullOrWhiteSpace(Settings.CustomSamplingRules))
{
foreach (var rule in CustomSamplingRule.BuildFromConfigurationString(Settings.CustomSamplingRules))
{
Sampler.RegisterRule(rule);
}
}
if (Settings.GlobalSamplingRate != null)
{
var globalRate = (float)Settings.GlobalSamplingRate;
if (globalRate < 0f || globalRate > 1f)
{
Log.Warning("{ConfigurationKey} configuration of {ConfigurationValue} is out of range", ConfigurationKeys.GlobalSamplingRate, Settings.GlobalSamplingRate);
}
else
{
Sampler.RegisterRule(new GlobalSamplingRule(globalRate));
}
}
// Register callbacks to make sure we flush the traces before exiting
AppDomain.CurrentDomain.ProcessExit += CurrentDomain_ProcessExit;
AppDomain.CurrentDomain.DomainUnload += CurrentDomain_DomainUnload;
try
{
// Registering for the AppDomain.UnhandledException event cannot be called by a security transparent method
// This will only happen if the Tracer is not run full-trust
AppDomain.CurrentDomain.UnhandledException += CurrentDomain_UnhandledException;
}
catch (Exception ex)
{
Log.Warning(ex, "Unable to register a callback to the AppDomain.UnhandledException event.");
}
try
{
// Registering for the cancel key press event requires the System.Security.Permissions.UIPermission
Console.CancelKeyPress += Console_CancelKeyPress;
}
catch (Exception ex)
{
Log.Warning(ex, "Unable to register a callback to the Console.CancelKeyPress event.");
}
// start the heartbeat loop
_heartbeatTimer = new Timer(HeartbeatCallback, state: null, dueTime: TimeSpan.Zero, period: TimeSpan.FromMinutes(1));
// If configured, add/remove the correlation identifiers into the
// LibLog logging context when a scope is activated/closed
if (Settings.LogsInjectionEnabled)
{
InitializeLibLogScopeEventSubscriber(_scopeManager, DefaultServiceName, Settings.ServiceVersion, Settings.Environment);
}
if (Interlocked.Exchange(ref _firstInitialization, 0) == 1)
{
if (Settings.StartupDiagnosticLogEnabled)
{
_ = Task.Run(WriteDiagnosticLog);
}
if (Settings.RuntimeMetricsEnabled)
{
_runtimeMetricsWriter = new RuntimeMetricsWriter(Statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName, Settings.DogStatsdPort), TimeSpan.FromSeconds(10));
}
}
}
/// <summary>
/// Finalizes an instance of the <see cref="Tracer"/> class.
/// </summary>
~Tracer()
{
// update the count of Tracer instances
Interlocked.Decrement(ref _liveTracerCount);
}
/// <summary>
/// Gets or sets the global <see cref="Tracer"/> instance.
/// Used by all automatic instrumentation and recommended
/// as the entry point for manual instrumentation.
/// </summary>
public static Tracer Instance
{
get
{
return LazyInitializer.EnsureInitialized(ref _instance, ref _globalInstanceInitialized, ref _globalInstanceLock);
}
set
{
lock (_globalInstanceLock)
{
_instance = value;
_globalInstanceInitialized = true;
}
}
}
/// <summary>
/// Gets the active scope
/// </summary>
public Scope ActiveScope => _scopeManager.Active;
/// <summary>
/// Gets the default service name for traces where a service name is not specified.
/// </summary>
public string DefaultServiceName { get; }
/// <summary>
/// Gets this tracer's settings.
/// </summary>
public TracerSettings Settings { get; }
/// <summary>
/// Gets or sets the detected version of the agent
/// </summary>
string IDatadogTracer.AgentVersion
{
get
{
return _agentVersion;
}
set
{
if (ShouldLogPartialFlushWarning(value))
{
var detectedVersion = string.IsNullOrEmpty(value) ? "{detection failed}" : value;
Log.Warning("DATADOG TRACER DIAGNOSTICS - Partial flush should only be enabled with agent 7.26.0+ (detected version: {version})", detectedVersion);
}
}
}
/// <summary>
/// Gets the tracer's scope manager, which determines which span is currently active, if any.
/// </summary>
IScopeManager IDatadogTracer.ScopeManager => _scopeManager;
/// <summary>
/// Gets the <see cref="ISampler"/> instance used by this <see cref="IDatadogTracer"/> instance.
/// </summary>
ISampler IDatadogTracer.Sampler => Sampler;
internal static string RuntimeId => LazyInitializer.EnsureInitialized(ref _runtimeId, () => Guid.NewGuid().ToString());
internal IDiagnosticManager DiagnosticManager { get; set; }
internal ISampler Sampler { get; }
internal IDogStatsd Statsd { get; private set; }
/// <summary>
/// Create a new Tracer with the given parameters
/// </summary>
/// <param name="agentEndpoint">The agent endpoint where the traces will be sent (default is http://localhost:8126).</param>
/// <param name="defaultServiceName">Default name of the service (default is the name of the executing assembly).</param>
/// <param name="isDebugEnabled">Turns on all debug logging (this may have an impact on application performance).</param>
/// <returns>The newly created tracer</returns>
public static Tracer Create(Uri agentEndpoint = null, string defaultServiceName = null, bool isDebugEnabled = false)
{
// Keep supporting this older public method by creating a TracerConfiguration
// from default sources, overwriting the specified settings, and passing that to the constructor.
var configuration = TracerSettings.FromDefaultSources();
GlobalSettings.SetDebugEnabled(isDebugEnabled);
if (agentEndpoint != null)
{
configuration.AgentUri = agentEndpoint;
}
if (defaultServiceName != null)
{
configuration.ServiceName = defaultServiceName;
}
return new Tracer(configuration);
}
/// <summary>
/// Make a span the active span and return its new scope.
/// </summary>
/// <param name="span">The span to activate.</param>
/// <returns>A Scope object wrapping this span.</returns>
Scope IDatadogTracer.ActivateSpan(Span span)
{
return ActivateSpan(span);
}
/// <summary>
/// Make a span the active span and return its new scope.
/// </summary>
/// <param name="span">The span to activate.</param>
/// <param name="finishOnClose">Determines whether closing the returned scope will also finish the span.</param>
/// <returns>A Scope object wrapping this span.</returns>
public Scope ActivateSpan(Span span, bool finishOnClose = true)
{
return _scopeManager.Activate(span, finishOnClose);
}
/// <summary>
/// This is a shortcut for <see cref="StartSpan(string, ISpanContext, string, DateTimeOffset?, bool)"/>
/// and <see cref="ActivateSpan(Span, bool)"/>, it creates a new span with the given parameters and makes it active.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <param name="serviceName">The span's service name</param>
/// <param name="startTime">An explicit start time for that span</param>
/// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param>
/// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param>
/// <returns>A scope wrapping the newly created span</returns>
public Scope StartActive(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true)
{
var span = StartSpan(operationName, parent, serviceName, startTime, ignoreActiveScope);
return _scopeManager.Activate(span, finishOnClose);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <returns>The newly created span</returns>
Span IDatadogTracer.StartSpan(string operationName)
{
return StartSpan(operationName);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <returns>The newly created span</returns>
Span IDatadogTracer.StartSpan(string operationName, ISpanContext parent)
{
return StartSpan(operationName, parent);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <param name="serviceName">The span's service name</param>
/// <param name="startTime">An explicit start time for that span</param>
/// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param>
/// <returns>The newly created span</returns>
public Span StartSpan(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false)
{
return StartSpan(operationName, tags: null, parent, serviceName, startTime, ignoreActiveScope, spanId: null);
}
/// <summary>
/// Writes the specified <see cref="Span"/> collection to the agent writer.
/// </summary>
/// <param name="trace">The <see cref="Span"/> collection to write.</param>
void IDatadogTracer.Write(ArraySegment<Span> trace)
{
if (Settings.TraceEnabled)
{
_agentWriter.WriteTrace(trace);
}
}
internal SpanContext CreateSpanContext(ISpanContext parent = null, string serviceName = null, bool ignoreActiveScope = false, ulong? spanId = null)
{
if (parent == null && !ignoreActiveScope)
{
parent = _scopeManager.Active?.Span?.Context;
}
ITraceContext traceContext;
// try to get the trace context (from local spans) or
// sampling priority (from propagated spans),
// otherwise start a new trace context
if (parent is SpanContext parentSpanContext)
{
traceContext = parentSpanContext.TraceContext ??
new TraceContext(this) { SamplingPriority = parentSpanContext.SamplingPriority };
}
else
{
traceContext = new TraceContext(this);
}
var finalServiceName = serviceName ?? parent?.ServiceName ?? DefaultServiceName;
var spanContext = new SpanContext(parent, traceContext, finalServiceName, spanId);
return spanContext;
}
internal Scope StartActiveWithTags(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true, ITags tags = null, ulong? spanId = null)
{
var span = StartSpan(operationName, tags, parent, serviceName, startTime, ignoreActiveScope, spanId);
return _scopeManager.Activate(span, finishOnClose);
}
internal Span StartSpan(string operationName, ITags tags, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, ulong? spanId = null)
{
var spanContext = CreateSpanContext(parent, serviceName, ignoreActiveScope, spanId);
var span = new Span(spanContext, startTime, tags)
{
OperationName = operationName,
};
// Apply any global tags
if (Settings.GlobalTags.Count > 0)
{
foreach (var entry in Settings.GlobalTags)
{
span.SetTag(entry.Key, entry.Value);
}
}
// automatically add the "env" tag if defined, taking precedence over an "env" tag set from a global tag
var env = Settings.Environment;
if (!string.IsNullOrWhiteSpace(env))
{
span.SetTag(Tags.Env, env);
}
// automatically add the "version" tag if defined, taking precedence over an "version" tag set from a global tag
var version = Settings.ServiceVersion;
if (!string.IsNullOrWhiteSpace(version) && string.Equals(spanContext.ServiceName, DefaultServiceName))
{
span.SetTag(Tags.Version, version);
}
spanContext.TraceContext.AddSpan(span);
return span;
}
internal Task FlushAsync()
{
return _agentWriter.FlushTracesAsync();
}
internal async Task WriteDiagnosticLog()
{
string agentError = null;
// In AAS, the trace agent is deployed alongside the tracer and managed by the tracer
// Disable this check as it may hit the trace agent before it is ready to receive requests and give false negatives
if (!AzureAppServices.Metadata.IsRelevant)
{
try
{
var success = await _agentWriter.Ping().ConfigureAwait(false);
if (!success)
{
agentError = "An error occurred while sending traces to the agent";
}
}
catch (Exception ex)
{
agentError = ex.Message;
}
}
try
{
var stringWriter = new StringWriter();
using (var writer = new JsonTextWriter(stringWriter))
{
writer.WriteStartObject();
writer.WritePropertyName("date");
writer.WriteValue(DateTime.Now);
writer.WritePropertyName("os_name");
writer.WriteValue(FrameworkDescription.Instance.OSPlatform);
writer.WritePropertyName("os_version");
writer.WriteValue(Environment.OSVersion.ToString());
writer.WritePropertyName("version");
writer.WriteValue(TracerConstants.AssemblyVersion);
writer.WritePropertyName("platform");
writer.WriteValue(FrameworkDescription.Instance.ProcessArchitecture);
writer.WritePropertyName("lang");
writer.WriteValue(FrameworkDescription.Instance.Name);
writer.WritePropertyName("lang_version");
writer.WriteValue(FrameworkDescription.Instance.ProductVersion);
writer.WritePropertyName("env");
writer.WriteValue(Settings.Environment);
writer.WritePropertyName("enabled");
writer.WriteValue(Settings.TraceEnabled);
writer.WritePropertyName("service");
writer.WriteValue(DefaultServiceName);
writer.WritePropertyName("agent_url");
writer.WriteValue(Settings.AgentUri);
writer.WritePropertyName("debug");
writer.WriteValue(GlobalSettings.Source.DebugEnabled);
writer.WritePropertyName("analytics_enabled");
writer.WriteValue(Settings.AnalyticsEnabled);
writer.WritePropertyName("sample_rate");
writer.WriteValue(Settings.GlobalSamplingRate);
writer.WritePropertyName("sampling_rules");
writer.WriteValue(Settings.CustomSamplingRules);
writer.WritePropertyName("tags");
writer.WriteStartArray();
foreach (var entry in Settings.GlobalTags)
{
writer.WriteValue(string.Concat(entry.Key, ":", entry.Value));
}
writer.WriteEndArray();
writer.WritePropertyName("log_injection_enabled");
writer.WriteValue(Settings.LogsInjectionEnabled);
writer.WritePropertyName("runtime_metrics_enabled");
writer.WriteValue(Settings.RuntimeMetricsEnabled);
writer.WritePropertyName("disabled_integrations");
writer.WriteStartArray();
foreach (var integration in Settings.DisabledIntegrationNames)
{
writer.WriteValue(integration);
}
writer.WriteEndArray();
writer.WritePropertyName("netstandard_enabled");
writer.WriteValue(Settings.IsNetStandardFeatureFlagEnabled());
writer.WritePropertyName("routetemplate_resourcenames_enabled");
writer.WriteValue(Settings.RouteTemplateResourceNamesEnabled);
writer.WritePropertyName("partialflush_enabled");
writer.WriteValue(Settings.PartialFlushEnabled);
writer.WritePropertyName("partialflush_minspans");
writer.WriteValue(Settings.PartialFlushMinSpans);
writer.WritePropertyName("runtime_id");
writer.WriteValue(RuntimeId);
writer.WritePropertyName("agent_reachable");
writer.WriteValue(agentError == null);
writer.WritePropertyName("agent_error");
writer.WriteValue(agentError ?? string.Empty);
writer.WriteEndObject();
}
Log.Information("DATADOG TRACER CONFIGURATION - {Configuration}", stringWriter.ToString());
}
catch (Exception ex)
{
Log.Warning(ex, "DATADOG TRACER DIAGNOSTICS - Error fetching configuration");
}
}
internal bool ShouldLogPartialFlushWarning(string agentVersion)
{
if (agentVersion != _agentVersion)
{
_agentVersion = agentVersion;
if (Settings.PartialFlushEnabled)
{
if (!Version.TryParse(agentVersion, out var parsedVersion) || parsedVersion < new Version(7, 26, 0))
{
return true;
}
}
}
return false;
}
/// <summary>
/// Gets an "application name" for the executing application by looking at
/// the hosted app name (.NET Framework on IIS only), assembly name, and process name.
/// </summary>
/// <returns>The default service name.</returns>
private static string GetApplicationName()
{
try
{
try
{
if (TryLoadAspNetSiteName(out var siteName))
{
return siteName;
}
}
catch (Exception ex)
{
// Unable to call into System.Web.dll
Log.Error(ex, "Unable to get application name through ASP.NET settings");
}
return Assembly.GetEntryAssembly()?.GetName().Name ??
ProcessHelpers.GetCurrentProcessName();
}
catch (Exception ex)
{
Log.Error(ex, "Error creating default service name.");
return null;
}
}
private static bool TryLoadAspNetSiteName(out string siteName)
{
#if NETFRAMEWORK
// System.Web.dll is only available on .NET Framework
if (System.Web.Hosting.HostingEnvironment.IsHosted)
{
// if this app is an ASP.NET application, return "SiteName/ApplicationVirtualPath".
// note that ApplicationVirtualPath includes a leading slash.
siteName = (System.Web.Hosting.HostingEnvironment.SiteName + System.Web.Hosting.HostingEnvironment.ApplicationVirtualPath).TrimEnd('/');
return true;
}
#endif
siteName = default;
return false;
}
private static IDogStatsd CreateDogStatsdClient(TracerSettings settings, string serviceName, int port)
{
try
{
var constantTags = new List<string>
{
"lang:.NET",
$"lang_interpreter:{FrameworkDescription.Instance.Name}",
$"lang_version:{FrameworkDescription.Instance.ProductVersion}",
$"tracer_version:{TracerConstants.AssemblyVersion}",
$"service:{serviceName}",
$"{Tags.RuntimeId}:{RuntimeId}"
};
if (settings.Environment != null)
{
constantTags.Add($"env:{settings.Environment}");
}
if (settings.ServiceVersion != null)
{
constantTags.Add($"version:{settings.ServiceVersion}");
}
var statsd = new DogStatsdService();
if (AzureAppServices.Metadata.IsRelevant)
{
// Environment variables set by the Azure App Service extension are used internally.
// Setting the server name will force UDP, when we need named pipes.
statsd.Configure(new StatsdConfig
{
ConstantTags = constantTags.ToArray()
});
}
else
{
statsd.Configure(new StatsdConfig
{
StatsdServerName = settings.AgentUri.DnsSafeHost,
StatsdPort = port,
ConstantTags = constantTags.ToArray()
});
}
return statsd;
}
catch (Exception ex)
{
Log.Error(ex, $"Unable to instantiate {nameof(Statsd)} client.");
return new NoOpStatsd();
}
}
private void InitializeLibLogScopeEventSubscriber(IScopeManager scopeManager, string defaultServiceName, string version, string env)
{
new LibLogScopeEventSubscriber(this, scopeManager, defaultServiceName, version ?? string.Empty, env ?? string.Empty);
}
private void CurrentDomain_ProcessExit(object sender, EventArgs e)
{
RunShutdownTasks();
}
private void CurrentDomain_UnhandledException(object sender, UnhandledExceptionEventArgs e)
{
Log.Warning("Application threw an unhandled exception: {Exception}", e.ExceptionObject);
RunShutdownTasks();
}
private void Console_CancelKeyPress(object sender, ConsoleCancelEventArgs e)
{
RunShutdownTasks();
}
private void CurrentDomain_DomainUnload(object sender, EventArgs e)
{
RunShutdownTasks();
}
private void RunShutdownTasks()
{
try
{
_agentWriter.FlushAndCloseAsync().Wait();
}
catch (Exception ex)
{
Log.Error(ex, "Error flushing traces on shutdown.");
}
}
private void HeartbeatCallback(object state)
{
// use the count of Tracer instances as the heartbeat value
// to estimate the number of "live" Tracers than can potentially
// send traces to the Agent
Statsd?.Gauge(TracerMetricNames.Health.Heartbeat, _liveTracerCount);
}
}
}
| 1 | 21,292 | Should this be `ForceFlushAsync`? Given we have the `async` suffix elsewhere. | DataDog-dd-trace-dotnet | .cs |
@@ -48,8 +48,8 @@ class TestJMeterExecutor(BZTestCase):
self.obj = get_jmeter()
def tearDown(self):
- if self.obj.modified_jmx and os.path.exists(self.obj.modified_jmx):
- os.remove(self.obj.modified_jmx)
+ # if self.obj.modified_jmx and os.path.exists(self.obj.modified_jmx):
+ # os.remove(self.obj.modified_jmx)
super(TestJMeterExecutor, self).tearDown()
def configure(self, config): | 1 | # coding=utf-8
import json
import logging
import os
import shutil
import sys
import time
from math import ceil
import yaml
from bzt import ToolError, TaurusConfigError, TaurusInternalException
from bzt.jmx import JMX
from bzt.modules.aggregator import ConsolidatingAggregator, DataPoint
from bzt.modules.blazemeter import CloudProvisioning
from bzt.modules.functional import FunctionalAggregator
from bzt.modules.jmeter import JMeterExecutor, JTLErrorsReader, JTLReader, FuncJTLReader
from bzt.modules.jmeter import JMeterScenarioBuilder
from bzt.modules.provisioning import Local
from bzt.six import etree, u
from bzt.utils import EXE_SUFFIX, get_full_path, BetterDict
from tests import BZTestCase, __dir__
from tests.mocks import EngineEmul
def get_jmeter():
dir_name = os.path.dirname(__file__)
path = dir_name + "/../resources/jmeter/jmeter-loader" + EXE_SUFFIX
obj = JMeterExecutor()
obj.engine = EngineEmul()
obj.settings.merge({'path': path})
return obj
def get_jmeter_executor_vars():
return (JMeterExecutor.JMETER_DOWNLOAD_LINK, JMeterExecutor.JMETER_VER,
JMeterExecutor.MIRRORS_SOURCE, JMeterExecutor.CMDRUNNER, JMeterExecutor.PLUGINS_MANAGER)
def set_jmeter_executor_vars(jmeter_vars):
(JMeterExecutor.JMETER_DOWNLOAD_LINK, JMeterExecutor.JMETER_VER,
JMeterExecutor.MIRRORS_SOURCE, JMeterExecutor.CMDRUNNER, JMeterExecutor.PLUGINS_MANAGER) = jmeter_vars
class TestJMeterExecutor(BZTestCase):
def setUp(self):
super(TestJMeterExecutor, self).setUp()
self.obj = get_jmeter()
def tearDown(self):
if self.obj.modified_jmx and os.path.exists(self.obj.modified_jmx):
os.remove(self.obj.modified_jmx)
super(TestJMeterExecutor, self).tearDown()
def configure(self, config):
"""
Merge config into engine, setup provisioning,
setup execution and settings attributes for executor.
:return:
"""
self.obj.engine.config.merge(config)
execution = self.obj.engine.config['execution']
if isinstance(execution, list):
self.obj.execution = execution[0]
else:
self.obj.execution = execution
self.obj.settings.merge(self.obj.engine.config.get('modules').get('jmeter'))
prov = self.obj.engine.config.get('provisioning', None)
if prov == 'local':
self.obj.engine.provisioning = Local()
elif prov == 'cloud':
self.obj.engine.provisioning = CloudProvisioning()
else:
raise ('Wrong provisioning value: %s' % prov)
def test_jmx(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}})
self.obj.engine.create_artifacts_dir()
self.obj.prepare()
def test_jmx_with_props(self):
self.obj.execution.merge({
"concurrency": 10,
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/props_tg.jmx"}
})
self.obj.engine.create_artifacts_dir()
self.obj.prepare()
def test_jmx_2tg(self):
self.obj.execution.merge({
"concurrency": 1051,
"ramp-up": 15,
"iterations": 100,
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/two_tg.jmx"}
})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
selector = 'jmeterTestPlan>hashTree>hashTree>ThreadGroup'
selector += '>stringProp[name=ThreadGroup\.num_threads]'
thr = jmx.get(selector)
self.assertEquals('420', thr[0].text)
self.assertEquals('631', thr[1].text)
def test_regexp_extractors(self):
self.obj.execution.merge(
{"scenario":
{"requests": [{
"url": "http://localhost",
"extract-regexp": {
"test_name": "???"}}]}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
self.assertEqual("body", xml_tree.findall(".//stringProp[@name='RegexExtractor.useHeaders']")[0].text)
self.assertEqual("???", xml_tree.findall(".//stringProp[@name='RegexExtractor.regex']")[0].text)
self.assertEqual("parent", xml_tree.findall(".//stringProp[@name='Sample.scope']")[0].text)
def test_not_jmx(self):
self.obj.execution = {"scenario": {"script": __file__}}
self.assertRaises(TaurusInternalException, self.obj.prepare)
def test_broken_xml(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/broken.jmx"}})
self.assertRaises(TaurusInternalException, self.obj.prepare)
def test_not_jmx_xml(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/not-jmx.xml"}})
self.assertRaises(TaurusInternalException, self.obj.prepare)
def test_requests(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
self.obj.log.debug("%s: %s", self.obj.modified_jmx, open(self.obj.modified_jmx).read())
self.obj.log.debug("%s", json.dumps(self.obj.execution, indent=True))
try:
self.obj.startup()
while not self.obj.check():
self.obj.log.debug("Check...")
time.sleep(1)
self.obj.shutdown()
self.obj.post_process()
except:
pass
finally:
if self.obj.jmeter_log and os.path.exists(self.obj.jmeter_log):
self.obj.log.debug("%s", open(self.obj.jmeter_log).read())
def test_issue_no_iterations(self):
self.obj.execution.merge({
"concurrency": 10,
"ramp-up": 10,
"scenario": {
"script": __dir__() + "/../resources/jmeter/jmx/issue_no_iterations.jmx"
}
})
self.obj.prepare()
def test_body_file(self):
body_file1 = __dir__() + "/../resources/jmeter/body-file.dat"
body_file2 = __dir__() + "/../resources/jmeter/jmx/http.jmx"
self.configure({
'execution': [{
'iterations': 1,
'scenario': 'bf'}],
'scenarios': {
'bf': {
"requests": [
{
'url': 'http://first.com',
'body-file': body_file1
}, {
'url': 'http://second.com',
'body': 'body2',
'body-file': body_file2}]}}})
res_files = self.obj.get_resource_files()
scenario = self.obj.get_scenario()
body_files = [req.get('body-file') for req in scenario.get('requests')]
body_fields = [req.get('body') for req in scenario.get('requests')]
self.assertIn(body_file1, res_files)
self.assertIn(body_file2, res_files)
self.assertEqual(body_fields, [None, 'body2'])
self.assertEqual(body_files, [body_file1, body_file2])
def test_datasources_with_delimiter(self):
self.obj.execution.merge({"scenario":
{"requests": ["http://localhost"],
"data-sources": [
{"path": __dir__() + "/../resources/test2.csv",
"delimiter": ","}]}})
self.obj.prepare()
def test_datasources_jmeter_var(self):
self.obj.execution.merge({"scenario":
{"requests": ["http://localhost"],
"data-sources": [
{"path": "${some_jmeter_variable}"}]}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
elements = xml_tree.findall(".//CSVDataSet[@testclass='CSVDataSet']")
self.assertEqual(1, len(elements))
element = elements[0]
self.assertEqual("${some_jmeter_variable}", element.find(".//stringProp[@name='filename']").text)
self.assertEqual(",", element.find(".//stringProp[@name='delimiter']").text)
def test_datasources_wrong_path(self):
self.obj.execution.merge({"scenario":
{"requests": ["http://localhost"],
"data-sources": [
{"path": "really_wrong_path"}]}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_datasources_without_delimiter(self):
self.obj.execution.merge({"scenario":
{"requests": ["http://localhost"],
"data-sources": [
{"path": __dir__() + "/../resources/test2.csv"}]}})
self.obj.prepare()
def test_path_processing(self):
class FakeTool(object):
tool_path = ''
installed = None
def set(self, tool_path, installed):
self.tool_path = tool_path
self.installed = installed
def check_if_installed(self):
return self.installed
fake = FakeTool()
end_str = os.path.join('bin', 'jmeter' + EXE_SUFFIX)
fake.set(__file__, True) # real file, jmeter works: do nothing
self.assertEqual(JMeterExecutor._need_to_install(fake), False)
fake.set(__file__, False) # real file, jmeter doesn't work: raise
with self.assertRaises(TaurusConfigError):
JMeterExecutor._need_to_install(fake)
fake.set(os.path.curdir, True) # real dir, $dir/bin/jmeter.EXT works: fix path only
self.assertEqual(JMeterExecutor._need_to_install(fake), False)
self.assertEqual(fake.tool_path, os.path.join(os.path.curdir, end_str))
fake.set(os.path.curdir, False) # real dir, $dir/bin/jmeter.EXT doesn't work: install into $dir
self.assertEqual(JMeterExecutor._need_to_install(fake), True)
self.assertEqual(fake.tool_path, os.path.join(os.path.curdir, end_str))
# not real file/dir, looks like *bin/jmeter.EXT: make two steps up, use as dir, install jmeter into it
fake.set('*' + end_str, False)
self.assertEqual(JMeterExecutor._need_to_install(fake), True)
self.assertEqual(fake.tool_path, '*' + end_str)
# not real file/dir, doesn't look like *bin/jmeter.EXT: use as dir, install jmeter into it
fake.set('*', False)
self.assertEqual(JMeterExecutor._need_to_install(fake), True)
self.assertEqual(fake.tool_path, os.path.join('*', end_str))
def test_install_jmeter_2_13(self):
path = os.path.abspath(__dir__() + "/../../build/tmp/jmeter-taurus/bin/jmeter" + EXE_SUFFIX)
shutil.rmtree(os.path.dirname(os.path.dirname(path)), ignore_errors=True)
self.assertFalse(os.path.exists(path))
jmeter_vars = get_jmeter_executor_vars()
set_jmeter_executor_vars(jmeter_vars)
try:
jmeter_res_dir = "file:///" + __dir__() + "/../resources/jmeter/"
JMeterExecutor.MIRRORS_SOURCE = jmeter_res_dir + "unicode_file"
JMeterExecutor.JMETER_DOWNLOAD_LINK = jmeter_res_dir + "jmeter-dist-{version}.zip"
JMeterExecutor.PLUGINS_MANAGER = jmeter_res_dir + "jmeter-plugins-manager.jar"
JMeterExecutor.CMDRUNNER = jmeter_res_dir + "jmeter-plugins-manager.jar"
JMeterExecutor.PLUGINS = ['Alice', 'Bob']
JMeterExecutor.JMETER_VER = '2.13'
self.obj.settings.merge({"path": path})
self.configure({
"execution": [{"scenario": {"requests": ["http://localhost"]}}],
"settings": {
"proxy": {
"address": "http://myproxy.com:8080",
"username": "user",
"password": "pass"}}})
self.obj.prepare()
jars = os.listdir(os.path.abspath(os.path.join(path, '../../lib')))
old_jars = [
'httpcore-4.2.5.jar', 'httpmime-4.2.6.jar', 'xercesImpl-2.9.1.jar',
'commons-jexl-1.1.jar', 'httpclient-4.2.6.jar']
for old_jar in old_jars:
self.assertNotIn(old_jar, jars)
self.assertTrue(os.path.exists(path))
self.obj = get_jmeter()
self.obj.settings.merge({"path": path})
self.obj.execution.merge({"scenario": {"requests": ["http://localhost"]}})
self.obj.prepare()
finally:
set_jmeter_executor_vars(jmeter_vars)
def test_install_jmeter_3_0(self):
path = os.path.abspath(__dir__() + "/../../build/tmp/jmeter-taurus/bin/jmeter" + EXE_SUFFIX)
shutil.rmtree(os.path.dirname(os.path.dirname(path)), ignore_errors=True)
self.assertFalse(os.path.exists(path))
jmeter_vars = get_jmeter_executor_vars()
try:
jmeter_res_dir = "file:///" + __dir__() + "/../resources/jmeter/"
JMeterExecutor.MIRRORS_SOURCE = jmeter_res_dir + "unicode_file"
JMeterExecutor.JMETER_DOWNLOAD_LINK = jmeter_res_dir + "jmeter-dist-{version}.zip"
JMeterExecutor.PLUGINS_MANAGER = jmeter_res_dir + "jmeter-plugins-manager.jar"
JMeterExecutor.CMDRUNNER = jmeter_res_dir + "jmeter-plugins-manager.jar"
JMeterExecutor.PLUGINS = ['Alice', 'Bob']
JMeterExecutor.JMETER_VER = '3.0'
self.obj.settings.merge({"path": path})
self.configure({
"execution": [{"scenario": {"requests": ["http://localhost"]}}],
"settings": {
"proxy": {
"address": "http://myproxy.com:8080",
"username": "user",
"password": "pass"}}})
self.obj.prepare()
jars = os.listdir(os.path.abspath(os.path.join(path, '../../lib')))
self.assertNotIn('httpclient-4.5.jar', jars)
self.assertIn('httpclient-4.5.2.jar', jars)
self.assertTrue(os.path.exists(path))
self.obj = get_jmeter()
self.obj.settings.merge({"path": path})
self.obj.execution.merge({"scenario": {"requests": ["http://localhost"]}})
self.obj.prepare()
finally:
set_jmeter_executor_vars(jmeter_vars)
def test_think_time_bug(self):
self.configure({
'execution': {
'ramp-up': '1m',
'hold-for': '1m30s',
'concurrency': 10,
'scenario': {
'think-time': 0.75,
'requests': [
'http://blazedemo.com/',
'http://blazedemo.com/vacation.html']}}})
self.obj.prepare()
result = open(self.obj.modified_jmx).read()
self.assertIn('<stringProp name="ConstantTimer.delay">750</stringProp>', result)
def test_cookiemanager_3_2_bug_requests(self):
""" specify implementation of CookieManager for case of generation from requests """
self.configure({
'execution': {
'hold-for': '1s',
'concurrency': 10,
'scenario': {
'requests': [
'http://blazedemo.com/']}}})
self.obj.prepare()
jmx = JMX(self.obj.original_jmx)
resource_elements = jmx.tree.findall(".//stringProp[@name='CookieManager.implementation']")
self.assertEqual(1, len(resource_elements))
new_implementation = "org.apache.jmeter.protocol.http.control.HC4CookieHandler"
self.assertEqual(resource_elements[0].text, new_implementation)
def test_cookiemanager_3_2_bug_jmx(self):
""" specify implementation of CookieManager for existing jmx """
self.configure({
'execution': {
'hold-for': '1s',
'concurrency': 10,
'scenario': {
'script': __dir__() + '/../resources/jmeter/jmx/cookiemanagers.jmx'}}})
self.obj.prepare()
orig_jmx = JMX(self.obj.original_jmx)
mod_jmx = JMX(self.obj.modified_jmx)
orig_elements = orig_jmx.tree.findall(".//stringProp[@name='CookieManager.implementation']")
mod_elements = mod_jmx.tree.findall(".//stringProp[@name='CookieManager.implementation']")
self.assertEqual(0, len(orig_elements))
self.assertEqual(2, len(mod_elements))
new_implementation = "org.apache.jmeter.protocol.http.control.HC4CookieHandler"
self.assertTrue(all(re.text == new_implementation for re in mod_elements))
def test_body_parse(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
sampler_element = xml_tree.findall(".//HTTPSamplerProxy[@testname='With body params']")
arguments_element_prop = sampler_element[0][0]
self.assertEqual(11, len(sampler_element[0].getchildren()))
self.assertEqual(1, len(arguments_element_prop.getchildren()))
self.assertEqual(2, len(arguments_element_prop[0].getchildren()))
self.assertEqual(1, len(arguments_element_prop[0].findall(".//elementProp[@name='param1']")))
self.assertEqual(1, len(arguments_element_prop.findall(".//elementProp[@name='param2']")))
def test_resource_files_collection_remote_prov(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/files.jmx"}})
self.assertNotIn('files', self.obj.execution)
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 1)
self.assertIn('files', self.obj.execution)
self.assertEqual(4, len(self.obj.execution['files']))
def test_resource_files_paths(self):
"""
Check whether JMeter.resource_files() modifies filenames in JMX carefully
:return:
"""
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/files_paths.jmx"}})
file_in_home = get_full_path('~/file-in-home.csv')
file_was_created = False
if not os.path.exists(file_in_home):
file_was_created = True
with open(file_in_home, 'w') as _file: # real file is required by Engine.find_file()
_file.write('')
self.obj.engine.file_search_paths = ['tests'] # config not in cwd
self.obj.resource_files()
if file_was_created:
os.remove(file_in_home)
resource_files = []
jmx = JMX(self.obj.original_jmx)
resource_elements = jmx.tree.findall(".//stringProp[@name='filename']")
for resource_element in resource_elements:
if resource_element.text:
resource_files.append(resource_element.text)
self.assertEqual(2, len(resource_files))
for res_file in resource_files:
self.assertEqual(res_file, os.path.basename(res_file))
def test_resource_files_from_requests_remote_prov(self):
config = json.loads(open(__dir__() + "/../resources/json/get-post.json").read())
config['provisioning'] = 'cloud'
self.configure(config)
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 3)
self.assertEqual(len(set(res_files)), 2)
def test_resource_files_from_requests_local_prov(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
files = ['jmeter-bzt.properties', 'modified_requests.jmx']
files += ['requests.jmx', 'system.properties']
artifacts = os.listdir(self.obj.engine.artifacts_dir)
self.assertTrue(all([_file in artifacts for _file in files]))
def test_resource_files_data_sources_shorthand(self):
csv_file = __dir__() + '/../resources/test1.csv'
csv_file_uni = u(__dir__() + '/../resources/test2.csv')
self.configure({
'execution': {
'scenario': {
'data-sources': [csv_file, csv_file_uni]}}})
resource_files = self.obj.resource_files()
self.assertIn(csv_file, resource_files)
self.assertIn(csv_file_uni, resource_files)
def test_resource_files_data_sources_full_form(self):
csv_file = __dir__() + '/../resources/test1.csv'
csv_file_uni = u(__dir__() + '/../resources/test2.csv')
self.configure({
'execution': {
'scenario': {
'data-sources': [{
'path': csv_file,
'loop': False,
'quoted': True,
}, {
'path': csv_file_uni,
'loop': False,
'quoted': True}]}}})
resource_files = self.obj.resource_files()
self.assertIn(csv_file, resource_files)
self.assertIn(csv_file_uni, resource_files)
def test_resource_files_jsr223(self):
js_file = __dir__() + '/../resources/data.js'
self.configure({
'execution': {
'scenario': {
'requests': [{
'url': 'http://blazedemo.com/',
'jsr223': {
'language': 'javascript',
'script-file': js_file,
}}]}}})
resource_files = self.obj.resource_files()
self.assertIn(js_file, resource_files)
def test_resource_files_jsr223s(self):
js_file = __dir__() + '/../resources/data.js'
js_file2 = __dir__() + '/../resources/data2.js'
self.configure({
'execution': {
'scenario': {
'requests': [{
'url': 'http://blazedemo.com/',
'jsr223': [{
'language': 'javascript',
'script-file': js_file,
}, {
'language': 'javascript',
'script-file': js_file2,
}]}]}}})
resource_files = self.obj.resource_files()
self.assertEqual(2, len(resource_files))
self.assertIn(js_file, resource_files)
self.assertIn(js_file2, resource_files)
def test_http_request_defaults(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
default_elements = xml_tree.findall(".//ConfigTestElement[@testclass='ConfigTestElement']")
self.assertEqual(1, len(default_elements))
default_element = default_elements[0]
self.assertEqual("www.somehost.com", default_element.find(".//stringProp[@name='HTTPSampler.domain']").text)
self.assertEqual("884", default_element.find(".//stringProp[@name='HTTPSampler.port']").text)
self.assertEqual("https", default_element.find(".//stringProp[@name='HTTPSampler.protocol']").text)
self.assertEqual("true", default_element.find(".//boolProp[@name='HTTPSampler.image_parser']").text)
self.assertEqual("true", default_element.find(".//boolProp[@name='HTTPSampler.concurrentDwn']").text)
self.assertEqual("10", default_element.find(".//stringProp[@name='HTTPSampler.concurrentPool']").text)
# all keepalives in requests are disabled
requests = xml_tree.findall(".//HTTPSamplerProxy[@testclass='HTTPSamplerProxy']")
for request in requests:
self.assertEqual("false", request.find(".//boolProp[@name='HTTPSampler.use_keepalive']").text)
def test_http_request_defaults_property(self):
self.obj.engine.config.merge(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
addr = 'https://${__P(hostname)}:${__P(port)}'
self.obj.engine.config['scenarios']['get-post']['default-address'] = addr
self.obj.execution = self.obj.engine.config['execution']
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
default_elements = xml_tree.findall(".//ConfigTestElement[@testclass='ConfigTestElement']")
self.assertEqual(1, len(default_elements))
default_element = default_elements[0]
self.assertEqual("${__P(hostname)}", default_element.find(".//stringProp[@name='HTTPSampler.domain']").text)
self.assertEqual("${__P(port)}", default_element.find(".//stringProp[@name='HTTPSampler.port']").text)
self.assertEqual("https", default_element.find(".//stringProp[@name='HTTPSampler.protocol']").text)
def test_add_shaper_constant(self):
self.configure({'execution': {'concurrency': 200, 'throughput': 100, 'hold-for': '1m',
'scenario': {'script': __dir__() + '/../resources/jmeter/jmx/http.jmx'}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
timer_ = ".//kg.apc.jmeter.timers.VariableThroughputTimer"
timer_ += "[@testclass='kg.apc.jmeter.timers.VariableThroughputTimer']"
shaper_elements = xml_tree.findall(timer_)
self.assertEqual(1, len(shaper_elements))
shaper_coll_element = shaper_elements[0].find(".//collectionProp[@name='load_profile']")
self.assertEqual("100", shaper_coll_element.find(".//stringProp[@name='49']").text)
self.assertEqual("100", shaper_coll_element.find(".//stringProp[@name='1567']").text)
self.assertEqual("60", shaper_coll_element.find(".//stringProp[@name='53']").text)
def test_add_shaper_ramp_up(self):
self.configure(
{'execution': {'ramp-up': '1m', 'throughput': 10, 'hold-for': '2m', 'concurrency': 20,
'scenario': {'script': __dir__() + '/../resources/jmeter/jmx/http.jmx'}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
timer_ = ".//kg.apc.jmeter.timers.VariableThroughputTimer"
timer_ += "[@testclass='kg.apc.jmeter.timers.VariableThroughputTimer']"
shaper_elements = xml_tree.findall(timer_)
self.assertEqual(1, len(shaper_elements))
shaper_coll_element = shaper_elements[0].find(".//collectionProp[@name='load_profile']")
self.assertEqual("1", shaper_coll_element.findall(".//stringProp[@name='49']")[0].text)
self.assertEqual("10", shaper_coll_element.findall(".//stringProp[@name='1567']")[0].text)
self.assertEqual("60", shaper_coll_element.findall(".//stringProp[@name='53']")[0].text)
self.assertEqual("10", shaper_coll_element.findall(".//stringProp[@name='49']")[1].text)
self.assertEqual("10", shaper_coll_element.findall(".//stringProp[@name='1567']")[1].text)
self.assertEqual("120", shaper_coll_element.findall(".//stringProp[@name='53']")[1].text)
def test_user_def_vars_from_requests(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
udv_elements = xml_tree.findall(".//Arguments[@testclass='Arguments']")
self.assertEqual(1, len(udv_elements))
def test_user_def_vars_override(self):
self.configure(
{'execution': {'concurrency': 200, 'throughput': 100, 'hold-for': '1m', 'scenario': {
'variables': {'my_var': 'http://demo.blazemeter.com/api/user', 'myvar2': 'val2'},
'properties': {'log_level.jmeter': 'DEBUG'},
'script': __dir__() + '/../resources/jmeter/jmx/http.jmx'}}})
self.obj.prepare()
# no new properties in scenario properties list
self.assertEqual(1, len(self.obj.engine.config['scenarios']['http.jmx']['properties']))
# no properties in module properties list
self.assertEqual(0, len(self.obj.settings.get('properties')))
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
udv_elements = xml_tree.findall(".//Arguments[@testclass='Arguments']")
self.assertEqual(1, len(udv_elements))
def test_nonstandard_errors_format(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/nonstandard-errors.jtl", logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertNotEquals(values[''][0]['msg'].find('Cannot find function error in object FirefoxDriver'), -1)
def test_standard_errors_format(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/standard-errors.jtl", logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertEquals(3, len(values))
def test_tranctl_jtl(self):
obj = JTLReader(__dir__() + "/../resources/jmeter/jtl/tranctl.jtl", logging.getLogger(''), None)
values = [x for x in obj.datapoints(True)]
self.assertEquals(1, len(values))
def test_tabs_jtl(self):
obj = JTLReader(__dir__() + "/../resources/jmeter/jtl/tabs.jtl", logging.getLogger(''), None)
values = [x for x in obj.datapoints(True)]
self.assertEquals(4, len(values))
def test_distributed_th_hostnames(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/http.jmx"}})
self.obj.distributed_servers = ["127.0.0.1", "127.0.0.1"]
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
writers = xml_tree.findall(".//ResultCollector[@testname='KPI Writer']")
for writer in writers:
self.assertEqual('true', writer.find('objProp/value/hostname').text)
def test_distributed_props(self):
self.sniff_log(self.obj.log)
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/http.jmx"}})
self.obj.distributed_servers = ["127.0.0.1", "127.0.0.1"]
self.obj.settings['properties'] = BetterDict()
self.obj.settings['properties'].merge({"a": 1})
self.obj.prepare()
self.obj.startup()
self.assertIn("', '-G', '", self.log_recorder.debug_buff.getvalue())
def test_distributed_th_hostnames_complex(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.distributed_servers = ["127.0.0.1", "127.0.0.1"]
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
writers = xml_tree.findall(".//ResultCollector[@testname='KPI Writer']")
for writer in writers:
self.assertEqual('true', writer.find('objProp/value/hostname').text)
def test_dns_cache_mgr_scenario(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/http.jmx"}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
dns_element = xml_tree.findall(".//DNSCacheManager")
# no dns manager when using jmx, no system.properties file
self.assertEqual(len(dns_element), 0)
arts = os.listdir(self.obj.engine.artifacts_dir)
self.assertNotIn("system.properties", arts)
def test_dns_cache_mgr_requests(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
dns_managers = xml_tree.findall(".//DNSCacheManager")
# 1 dns_manager
self.assertEqual(len(dns_managers), 1)
# check system.properies file contents
sys_prop = open(os.path.join(self.obj.engine.artifacts_dir, "system.properties")).read()
self.assertTrue("any_prop=true" in sys_prop)
self.assertTrue("sun.net.inetaddr.ttl=0" in sys_prop)
def test_dns_cache_mgr_script(self):
self.configure({
'execution': {
'ramp-up': 10,
'throughput': 2,
'hold-for': 20,
'concurrency': 5,
'scenario': {
'think-time': '0.75s',
'script': __dir__() + '/../resources/jmeter/jmx/http.jmx'}},
'modules': {
'jmeter': {
'system-properties': {'any_prop': 'true'},
'properties': {
'log_level.jmeter': 'WARN',
'log_level.jmeter.threads': 'DEBUG',
'my-hostname': 'www.pre-test.com'}}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
dns_managers = xml_tree.findall(".//DNSCacheManager")
# 0 dns_managers
self.assertEqual(len(dns_managers), 0)
sys_prop = open(os.path.join(self.obj.engine.artifacts_dir, "system.properties")).read()
self.assertTrue("any_prop=true" in sys_prop)
self.assertFalse("sun.net.inetaddr.ttl=0" in sys_prop)
def test_stepping_tg_ramp_no_proportion(self):
self.configure({
'execution': {
'steps': 5,
'concurrency': 170,
'scenario': {
'script': __dir__() + '/../resources/jmeter/jmx/stepping_ramp_up.jmx'},
'ramp-up': '1m',
'distributed': ['127.0.0.1'],
'hold-for': '2m'}})
self.obj.prepare()
load = self.obj.get_load()
orig_xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
mod_stepping_tgs = modified_xml_tree.findall(".//kg.apc.jmeter.threads.SteppingThreadGroup")
orig_tgs = orig_xml_tree.findall(".//ThreadGroup")
self.assertEqual(len(mod_stepping_tgs), len(orig_tgs))
for orig_th, step_th in zip(orig_tgs, mod_stepping_tgs):
orig_num_threads = int(orig_th.find(".//stringProp[@name='ThreadGroup.num_threads']").text)
mod_num_threads = int(step_th.find(".//stringProp[@name='ThreadGroup.num_threads']").text)
self.assertEqual(orig_num_threads, mod_num_threads)
self.assertEqual(step_th.find(".//stringProp[@name='Start users period']").text,
str(int(load.ramp_up / load.steps)))
self.assertEqual(step_th.find(".//stringProp[@name='Start users count']").text,
str(int(orig_num_threads / load.steps)))
def test_stepping_tg_ramp_proportion(self):
"""
Tested with concurrency proportions
:return:
"""
self.configure({
'execution': {
'steps': 4, # from 5 to 4
'concurrency': 100, # from 170 to 100
'scenario': {
'script': __dir__() + '/../resources/jmeter/jmx/stepping_ramp_up.jmx'},
'ramp-up': '1m',
'distributed': ['127.0.0.1'],
'hold-for': '2m'}})
self.obj.prepare()
load = self.obj.get_load()
orig_xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
mod_stepping_tgs = modified_xml_tree.findall(".//kg.apc.jmeter.threads.SteppingThreadGroup")
orig_tgs = orig_xml_tree.findall(".//ThreadGroup")
self.assertEqual(len(mod_stepping_tgs), len(orig_tgs))
orig_summ_cnc = sum([int(x.find(".//stringProp[@name='ThreadGroup.num_threads']").text) for x in orig_tgs])
for orig_th, step_th in zip(orig_tgs, mod_stepping_tgs):
orig_num_threads = int(orig_th.find(".//stringProp[@name='ThreadGroup.num_threads']").text)
mod_num_threads = int(step_th.find(".//stringProp[@name='ThreadGroup.num_threads']").text)
self.assertEqual(round(orig_num_threads * (float(load.concurrency) / orig_summ_cnc)), mod_num_threads)
self.assertEqual(step_th.find(".//stringProp[@name='Start users period']").text,
str(int(load.ramp_up / load.steps)))
self.assertEqual(step_th.find(".//stringProp[@name='Start users count']").text,
str(int(ceil(float(load.concurrency) / orig_summ_cnc * orig_num_threads / load.steps))))
def test_step_shaper(self):
self.configure({
'execution': {
'steps': 5,
'throughput': 100,
'concurrency': 170,
'scenario': {
'script': __dir__() + '/../resources/jmeter/jmx/stepping_ramp_up.jmx'},
'ramp-up': '1m',
'distributed': ['127.0.0.1'],
'hold-for': '2m'}})
self.obj.prepare()
load = self.obj.get_load()
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
timer = modified_xml_tree.findall(".//kg.apc.jmeter.timers.VariableThroughputTimer")
self.assertEqual(len(timer), 1)
for num, step_collection in enumerate(timer[0].findall(".//load_profile")):
step_start_rps = step_collection.find(".//stringProp[@name='49']")
step_stop_rps = step_collection.find(".//stringProp[@name='1567']")
self.assertTrue(step_start_rps == step_stop_rps == str(int(round(float(load.throughput) / load.steps))))
if num + 1 == load.steps:
self.assertEqual(step_collection.find(".//stringProp[@name='53']"),
load.hold + load.ramp_up / load.steps)
else:
self.assertEqual(step_collection.find(".//stringProp[@name='53']"), load.ramp_up / load.steps)
def test_duration_loops_bug(self):
self.obj.execution.merge({
"concurrency": 10,
"ramp-up": 15,
"hold-for": "2m",
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/http.jmx"}})
self.obj.prepare()
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
tg = modified_xml_tree.find(".//ThreadGroup")
loop_ctrl = tg.find(".//elementProp[@name='ThreadGroup.main_controller']")
tg_loops = loop_ctrl.find(".//intProp[@name='LoopController.loops']")
tg_forever = loop_ctrl.find(".//boolProp[@name='LoopController.continue_forever']")
self.assertEqual(tg_loops.text, "-1")
self.assertEqual(tg_forever.text, "false")
def test_force_delimiters(self):
self.obj.execution.merge({
"iterations": 10,
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/delimiters.jmx"}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
delimiters = [delimiter.text for delimiter in jmx.get("CSVDataSet>stringProp[name='delimiter']")]
self.assertEqual(['1', '2', ','], delimiters)
def test_iterations_loop_bug(self):
self.obj.execution.merge({
"iterations": 10,
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/http.jmx"}})
self.obj.prepare()
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
tg = modified_xml_tree.find(".//ThreadGroup")
loop_ctrl = tg.find(".//elementProp[@name='ThreadGroup.main_controller']")
tg_loops = loop_ctrl.find(".//stringProp[@name='LoopController.loops']")
tg_forever = loop_ctrl.find(".//boolProp[@name='LoopController.continue_forever']")
self.assertEqual(tg_loops.text, "10")
self.assertEqual(tg_forever.text, "false")
self.obj1 = get_jmeter()
script_path = __dir__() + "/../resources/jmeter/jmx/http.jmx"
self.obj1.execution.merge({"scenario": {"script": script_path}})
try:
self.obj1.prepare()
modified_xml_tree = etree.fromstring(open(self.obj1.modified_jmx, "rb").read())
tg = modified_xml_tree.find(".//ThreadGroup")
loop_ctrl = tg.find(".//elementProp[@name='ThreadGroup.main_controller']")
tg_loops = loop_ctrl.find("*[@name='LoopController.loops']")
tg_forever = loop_ctrl.find(".//boolProp[@name='LoopController.continue_forever']")
self.assertEqual(tg_loops.text, "1") # default value, not disabled
self.assertEqual(tg_forever.text, "false")
finally:
self.obj1.post_process()
def test_distributed_gui(self):
self.configure(yaml.load(open(__dir__() + "/../resources/yaml/distributed_gui.yml").read()))
self.obj.prepare()
prop_file_path = os.path.join(self.obj.engine.artifacts_dir, "jmeter-bzt.properties")
self.assertTrue(os.path.exists(prop_file_path))
with open(prop_file_path) as prop_file:
contents = prop_file.read()
self.assertIn("remote_hosts=127.0.0.1,127.0.0.2", contents)
def test_empty_requests(self):
# https://groups.google.com/forum/#!topic/codename-taurus/iaT6O2UhfBE
self.configure({
'execution': {
'ramp-up': '10s',
'requests': ['http://blazedemo.com/',
'http://blazedemo.com/vacation.html'],
'hold-for': '30s',
'concurrency': 5,
'scenario': {'think-time': 0.75}}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_variable_csv_file(self):
self.obj.execution.merge({
"scenario": {
"script": __dir__() + "/../resources/jmeter/jmx/variable_csv.jmx"}})
self.obj.prepare()
artifacts = os.listdir(self.obj.engine.artifacts_dir)
self.assertEqual(len(artifacts), 5) # 2*effective, .properties, .out, .err
with open(self.obj.modified_jmx) as fds:
jmx = fds.read()
self.assertIn('<stringProp name="filename">${root}/csvfile.csv</stringProp>', jmx)
def test_css_jquery_extractor(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
target_jmx = os.path.join(self.obj.engine.artifacts_dir, "requests.jmx")
modified_xml_tree = etree.fromstring(open(target_jmx, "rb").read())
jq_css_extractors = modified_xml_tree.findall(".//HtmlExtractor")
self.assertEqual(2, len(jq_css_extractors))
simplified_extractor = modified_xml_tree.find(".//HtmlExtractor[@testname='Get name1']")
self.assertEqual(simplified_extractor.find(".//stringProp[@name='HtmlExtractor.refname']").text, "name1")
self.assertEqual(simplified_extractor.find(".//stringProp[@name='HtmlExtractor.expr']").text,
"input[name~=my_input]")
self.assertEqual(simplified_extractor.find(".//stringProp[@name='HtmlExtractor.attribute']").text, None)
self.assertEqual(simplified_extractor.find(".//stringProp[@name='HtmlExtractor.match_number']").text, "0")
self.assertEqual(simplified_extractor.find(".//stringProp[@name='HtmlExtractor.default']").text, "NOT_FOUND")
full_form_extractor = modified_xml_tree.find(".//HtmlExtractor[@testname='Get name2']")
self.assertEqual(full_form_extractor.find(".//stringProp[@name='HtmlExtractor.refname']").text, "name2")
self.assertEqual(full_form_extractor.find(".//stringProp[@name='HtmlExtractor.expr']").text,
"input[name=JMeter]")
self.assertEqual(full_form_extractor.find(".//stringProp[@name='HtmlExtractor.attribute']").text, "value")
self.assertEqual(full_form_extractor.find(".//stringProp[@name='HtmlExtractor.match_number']").text, "1")
self.assertEqual(full_form_extractor.find(".//stringProp[@name='HtmlExtractor.default']").text, "NV_JMETER")
def test_xpath_extractor(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
target_jmx = os.path.join(self.obj.engine.artifacts_dir, "requests.jmx")
modified_xml_tree = etree.fromstring(open(target_jmx, "rb").read())
xpath_extractors = modified_xml_tree.findall(".//XPathExtractor")
self.assertEqual(2, len(xpath_extractors))
simplified = modified_xml_tree.find(".//XPathExtractor[@testname='Get xpath1']")
self.assertEqual(simplified.find(".//stringProp[@name='XPathExtractor.refname']").text, "xpath1")
self.assertEqual(simplified.find(".//stringProp[@name='XPathExtractor.xpathQuery']").text,
"/html/head/title")
self.assertEqual(simplified.find(".//stringProp[@name='XPathExtractor.default']").text, "NOT_FOUND")
self.assertEqual(simplified.find(".//boolProp[@name='XPathExtractor.validate']").text, "false")
self.assertEqual(simplified.find(".//boolProp[@name='XPathExtractor.whitespace']").text, "true")
self.assertEqual(simplified.find(".//boolProp[@name='XPathExtractor.tolerant']").text, "false")
full_form = modified_xml_tree.find(".//XPathExtractor[@testname='Get xpath2']")
self.assertEqual(full_form.find(".//stringProp[@name='XPathExtractor.refname']").text, "xpath2")
self.assertEqual(full_form.find(".//stringProp[@name='XPathExtractor.xpathQuery']").text,
"/html/head/base")
self.assertEqual(full_form.find(".//stringProp[@name='XPathExtractor.default']").text, "<no base>")
self.assertEqual(full_form.find(".//boolProp[@name='XPathExtractor.validate']").text, "true")
self.assertEqual(full_form.find(".//boolProp[@name='XPathExtractor.whitespace']").text, "true")
self.assertEqual(full_form.find(".//boolProp[@name='XPathExtractor.tolerant']").text, "true")
def test_xpath_assertion(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
target_jmx = os.path.join(self.obj.engine.artifacts_dir, "requests.jmx")
modified_xml_tree = etree.fromstring(open(target_jmx, "rb").read())
assertions = modified_xml_tree.findall(".//XPathAssertion")
self.assertEqual(2, len(assertions))
simplified = assertions[0]
self.assertEqual(simplified.find(".//stringProp[@name='XPath.xpath']").text, "/note/to")
self.assertEqual(simplified.find(".//boolProp[@name='XPath.validate']").text, "false")
self.assertEqual(simplified.find(".//boolProp[@name='XPath.whitespace']").text, "true")
self.assertEqual(simplified.find(".//boolProp[@name='XPath.tolerant']").text, "false")
self.assertEqual(simplified.find(".//boolProp[@name='XPath.negate']").text, "false")
full_form = assertions[1]
self.assertEqual(full_form.find(".//stringProp[@name='XPath.xpath']").text, "/note/from")
self.assertEqual(full_form.find(".//boolProp[@name='XPath.validate']").text, "true")
self.assertEqual(full_form.find(".//boolProp[@name='XPath.whitespace']").text, "true")
self.assertEqual(full_form.find(".//boolProp[@name='XPath.tolerant']").text, "true")
self.assertEqual(full_form.find(".//boolProp[@name='XPath.negate']").text, "true")
def test_jsonpath_assertion(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
target_jmx = os.path.join(self.obj.engine.artifacts_dir, "requests.jmx")
modified_xml_tree = etree.fromstring(open(target_jmx, "rb").read())
path = ".//com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion.JSONPathAssertion"
assertions = modified_xml_tree.findall(path)
self.assertEqual(4, len(assertions))
vals = [
{'path': '$.', 'exp_val': None, 'valid': 'false',
'null': 'false', 'invert': 'false', 'regexp': 'true'},
{'path': '$.res[0].type', 'exp_val': 'some_value.1', 'valid': 'true',
'null': 'false', 'invert': 'false', 'regexp': 'true'},
{'path': '$.res[1].ip', 'exp_val': 'some_value.2', 'valid': 'true',
'null': 'false', 'invert': 'true', 'regexp': 'false'},
{'path': '$.res[2].default', 'exp_val': None, 'valid': 'false',
'null': 'true', 'invert': 'false', 'regexp': 'true'}]
for num in range(len(assertions)):
assertion = assertions[num]
val = vals[num]
self.assertEqual(val['path'], assertion.find(".//stringProp[@name='JSON_PATH']").text)
self.assertEqual(val['exp_val'], assertion.find(".//stringProp[@name='EXPECTED_VALUE']").text)
self.assertEqual(val['valid'], assertion.find(".//boolProp[@name='JSONVALIDATION']").text)
self.assertEqual(val['null'], assertion.find(".//boolProp[@name='EXPECT_NULL']").text)
self.assertEqual(val['invert'], assertion.find(".//boolProp[@name='INVERT']").text)
self.assertEqual(val['regexp'], assertion.find(".//boolProp[@name='ISREGEX']").text)
def test_shutdown_soft(self):
self.sniff_log(self.obj.log)
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}})
self.obj.prepare()
self.obj._env['TEST_MODE'] = 'server'
self.obj.startup()
time.sleep(1)
self.obj.management_port = 8089
self.obj.shutdown()
self.assertIn("JMeter stopped on Shutdown command", self.log_recorder.debug_buff.getvalue())
def test_embedded_resources_main_sample_fail_assert(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/resource-errors-main-assert.jtl",
logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertEqual(values.get('')[0].get("msg"), "Test failed")
self.assertEqual(values.get('HTTP Request')[0].get("msg"), "Test failed")
def test_embedded_resources_fail_child_no_assert(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/resource-errors-child-no-assert.jtl",
logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertEqual(values.get('')[0].get("msg"), "NOT FOUND")
self.assertEqual(values.get('HTTP Request')[0].get("msg"), "NOT FOUND")
def test_embedded_resources_fail_child_assert(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/resource-errors-child-assert.jtl",
logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertEqual(values.get('')[0].get("msg"), "subsample assertion error")
self.assertEqual(values.get('')[1].get("msg"), "NOT FOUND")
self.assertEqual(values.get('HTTP Request')[0].get("msg"), "subsample assertion error")
self.assertEqual(values.get('HTTP Request')[1].get("msg"), "NOT FOUND")
def test_resource_tc(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/resource_tc.jtl", logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertEqual(values.get('')[0].get("msg"), "message")
self.assertEqual(values.get('')[1].get("msg"), "FOUND")
self.assertEqual(values.get('')[2].get("msg"), "second message")
self.assertEqual(values.get('')[3].get("msg"), "NOT FOUND")
self.assertEqual(values.get('')[4].get("msg"), "Failed")
self.assertEqual(values.get('tc1')[0].get("msg"), "FOUND")
self.assertEqual(values.get('tc3')[0].get("msg"), "message")
self.assertEqual(values.get('tc3')[1].get("msg"), "second message")
self.assertEqual(values.get('tc4')[0].get("msg"), "NOT FOUND")
self.assertEqual(values.get('tc5')[0].get("msg"), "Failed")
def test_embedded_resources_no_fail(self):
obj = JTLErrorsReader(__dir__() + "/../resources/jmeter/jtl/resource-errors-no-fail.jtl", logging.getLogger(''))
obj.read_file()
values = obj.get_data(sys.maxsize)
self.assertEqual(len(values.get('HTTP Request')), 1)
self.assertEqual(values.get('HTTP Request')[0].get("msg"), "failed_resource_message")
def test_fail_on_zero_results(self):
self.obj.engine.aggregator = ConsolidatingAggregator()
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}})
self.obj.prepare()
self.obj.startup()
self.obj.shutdown()
self.obj.engine.prepared = [self.obj]
self.obj.engine.started = [self.obj]
prov = Local()
prov.engine = self.obj.engine
prov.executors = [self.obj]
self.obj.engine.provisioning = prov
self.assertRaises(ToolError, self.obj.engine.provisioning.post_process)
def test_ok_with_results(self):
self.obj.execution.merge({"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}})
self.obj.engine.aggregator = ConsolidatingAggregator()
self.obj.prepare()
self.obj.engine.prepared = [self.obj]
self.obj.engine.started = [self.obj]
prov = Local()
prov.engine = self.obj.engine
prov.executors = [self.obj]
self.obj.engine.provisioning = prov
self.obj.reader.read_records = 13
self.obj.engine.provisioning.post_process()
def test_convert_tgroups_no_load(self):
self.obj.execution.merge({
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/SteppingThreadGroup.jmx"}})
self.obj.prepare()
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
st_tg = modified_xml_tree.find(".//kg.apc.jmeter.threads.SteppingThreadGroup")
self.assertNotEqual(st_tg, None)
ul_tg = modified_xml_tree.find(".//kg.apc.jmeter.threads.UltimateThreadGroup")
self.assertNotEqual(ul_tg, None)
def test_convert_tgroups_load_modifications(self):
self.obj.execution.merge({
"iterations": 20,
"ramp-up": 10,
"hold-for": "2m",
"scenario": {"script": __dir__() + "/../resources/jmeter/jmx/SteppingThreadGroup.jmx"}})
self.obj.prepare()
modified_xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
st_tg = modified_xml_tree.find(".//kg.apc.jmeter.threads.SteppingThreadGroup")
self.assertEqual(st_tg, None)
ul_tg = modified_xml_tree.find(".//kg.apc.jmeter.threads.UltimateThreadGroup")
self.assertEqual(ul_tg, None)
converted_st_tg = modified_xml_tree.find(".//ThreadGroup[@testname='stepping tg']")
loop_ctrl = converted_st_tg.find(".//elementProp[@name='ThreadGroup.main_controller']")
tg_loops = loop_ctrl.find(".//*[@name='LoopController.loops']")
tg_forever = loop_ctrl.find(".//boolProp[@name='LoopController.continue_forever']")
self.assertEqual(tg_loops.text, "20")
self.assertEqual(tg_forever.text, "false")
st_tg_concurrency = converted_st_tg.find(".//stringProp[@name='ThreadGroup.num_threads']")
self.assertEqual(st_tg_concurrency.text, "123")
def test_smart_time(self):
s_t = JMeterScenarioBuilder.smart_time
self.assertEqual(s_t('1m'), 60 * 1000.0)
self.assertEqual(s_t('${VAR}'), '${VAR}')
def test_json_body_app_str(self):
self.obj.execution.merge({
"scenario": {
"requests": [{
"url": "http://blazedemo.com",
"headers": {"Content-Type": "application/json"},
"body": "{\"store_id\": \"${store_id}\", \"display_name\": \"${display_name}\"}"}]}})
self.obj.prepare()
jmx = JMX(self.obj.original_jmx)
selector = 'elementProp[name="HTTPsampler.Arguments"]>collectionProp'
selector += '>elementProp>stringProp[name="Argument.value"]'
res = jmx.get(selector)[0].text
self.assertNotEqual(res.find('store_id'), -1)
def test_json_body_app_dic(self):
self.obj.execution.merge({
"scenario": {
"requests": [{
"url": "http://blazedemo.com",
"headers": {"Content-Type": "application/json"},
"body": {
"store_id": "${store_id}",
"display_name": "${display_name}"}}]}})
self.obj.prepare()
jmx = JMX(self.obj.original_jmx)
selector = 'elementProp[name="HTTPsampler.Arguments"]>collectionProp'
selector += '>elementProp>stringProp[name="Argument.value"]'
res = jmx.get(selector)[0].text
self.assertNotEqual(res.find('store_id'), -1)
self.assertTrue(isinstance(json.loads(res), dict))
def test_json_body_app_list(self):
self.obj.execution.merge({
"scenario": {
"requests": [{
"url": "http://blazedemo.com",
"headers": {"Content-Type": "application/json"},
"body": [
{"store_id": "${store_id}"},
{"display_name": "${display_name}"}]}]}})
self.obj.prepare()
jmx = JMX(self.obj.original_jmx)
selector = 'elementProp[name="HTTPsampler.Arguments"]>collectionProp'
selector += '>elementProp>stringProp[name="Argument.value"]'
res = jmx.get(selector)[0].text
self.assertNotEqual(res.find('store_id'), -1)
self.assertTrue(isinstance(json.loads(res), list))
def test_json_body_requires_header(self):
self.obj.execution.merge({
"scenario": {
"requests": [{
"url": "http://blazedemo.com",
"body": {
"structure": {
"one": 2,
"two": "1"}}}]}})
self.assertRaises(TaurusInternalException, self.obj.prepare)
jmx = JMX(self.obj.original_jmx)
selector = 'stringProp[name="Argument.value"]'
self.assertTrue(all(not jprop.text.startswith('defaultdict') for jprop in jmx.get(selector)))
def test_json_body_no_app(self):
self.obj.execution.merge({
"scenario": {
"requests": [{
"url": "http://blazedemo.com",
"headers": {"Content-Type": "application/exi"},
"body": {
"store_id": "${store_id}",
"display_name": "${display_name}"
}}]}})
self.obj.prepare()
jmx = JMX(self.obj.original_jmx)
selector = 'elementProp[name="HTTPsampler.Arguments"]>collectionProp'
selector += '>elementProp>stringProp[name="Argument.value"]'
self.assertEqual(jmx.get(selector)[0].text.find('"store_id": "${store_id}"'), -1)
def test_jtl_verbose(self):
self.obj.execution.merge({
"write-xml-jtl": "full",
"scenario": {
"requests": [{
"url": "http://blazedemo.com"}]}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
self.assertNotEqual(jmx.get('ResultCollector[testname="Trace Writer"]'), [])
self.assertEqual(jmx.get('ResultCollector[testname="Errors Writer"]'), [])
def test_jtl_errors(self):
self.obj.execution.merge({
"write-xml-jtl": "error",
"scenario": {
"requests": [{
"url": "http://blazedemo.com"}]}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
self.assertNotEqual(jmx.get('ResultCollector[testname="Errors Writer"]'), [])
self.assertEqual(jmx.get('ResultCollector[testname="Trace Writer"]'), [])
def test_jtl_none(self):
self.obj.execution.merge({
"write-xml-jtl": "bla-bla-bla",
"scenario": {
"requests": [{
"url": "http://blazedemo.com"}]}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
self.assertEqual(jmx.get('ResultCollector[testname="Trace Writer"]'), [])
self.assertEqual(jmx.get('ResultCollector[testname="Errors Writer"]'), [])
def test_jtl_flags(self):
self.obj.execution.merge({
"write-xml-jtl": "error",
"scenario": {
"requests": [{
"url": "http://blazedemo.com"}]}})
self.obj.settings.merge({'xml-jtl-flags': {
'responseData': True,
'message': False}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
writers = xml_tree.findall(".//ResultCollector[@testname='Errors Writer']")
self.assertEqual(1, len(writers))
self.assertEqual('false', writers[0].find('objProp/value/samplerData').text)
self.assertEqual('false', writers[0].find('objProp/value/message').text)
self.assertEqual('true', writers[0].find('objProp/value/responseData').text)
self.assertEqual('true', writers[0].find('objProp/value/bytes').text)
def test_jmx_modification_unicode(self):
cfg_selector = ('Home Page>HTTPsampler.Arguments>Arguments.arguments'
'>param>Argument.value')
self.obj.execution.merge({
"scenario": {
"script": __dir__() + "/../resources/jmeter/jmx/dummy_plan.jmx",
"modifications": {
"set-prop": {
cfg_selector: u"✓"}}}})
selector = ("[testname='Home Page']>[name='HTTPsampler.Arguments']"
">[name='Arguments.arguments']>[name='param']>[name='Argument.value']")
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
self.assertEqual(jmx.get(selector)[0].text, u"✓")
def test_jmx_modification_add_stringprop(self):
cfg_selector = ('Home Page>HTTPsampler.Arguments>Arguments.arguments>param>new_str')
self.obj.execution.merge({
"scenario": {
"script": __dir__() + "/../resources/jmeter/jmx/dummy_plan.jmx",
"modifications": {
"set-prop": {
cfg_selector: 'new_value'}}}})
selector = ("[testname='Home Page']>[name='HTTPsampler.Arguments']"
">[name='Arguments.arguments']>[name='param']>[name='new_str']")
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
self.assertEqual(jmx.get(selector)[0].text, 'new_value')
def test_resources_regex(self):
self.obj.execution.merge({
"scenario": {
"retrieve-resources": True,
"retrieve-resources-regex": "myregex",
"requests": [{"url": "http://blazedemo.com/"}]}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
self.assertEqual(jmx.get('boolProp[name="HTTPSampler.image_parser"]')[0].text, "true")
self.assertEqual(jmx.get('stringProp[name="HTTPSampler.embedded_url_re"]')[0].text, "myregex")
def test_data_source_list(self):
self.obj.execution.merge({
"scenario": {
"requests": ["http://blazedemo.com/"],
# note that data-sources should be a list of strings/objects
"data-sources": {
"path": __dir__() + "/../resources/test1.csv"}}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_force_parent_sample(self):
self.configure({
'execution': {
'scenario': {
# 'force-parent-sample' is True by default
'script': __dir__() + '/../resources/jmeter/jmx/transactions.jmx'}}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
selector = 'TransactionController > boolProp[name="TransactionController.parent"]'
props = jmx.get(selector)
self.assertEqual(len(props), 2)
self.assertTrue(all(prop.text == 'true' for prop in props))
def test_disable_force_parent_sample(self):
self.configure({
'execution': {
'scenario': {
'script': __dir__() + '/../resources/jmeter/jmx/transactions.jmx',
'force-parent-sample': False}}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
selector = 'TransactionController > boolProp[name="TransactionController.parent"]'
props = jmx.get(selector)
self.assertEqual(len(props), 2)
non_parent = props[1]
self.assertEqual(non_parent.text, 'false')
def test_transaction_and_requests1(self):
self.configure({
'execution': {
'scenario': {
'force-parent-sample': False,
'requests': [{
'transaction': 'MY_TRANSACTION',
'do': [{
'url': 'http://blazedemo.com'}]}]}}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
selector = 'TransactionController > boolProp[name="TransactionController.parent"]'
props = jmx.get(selector)
self.assertEqual(len(props), 1)
non_parent = props[0]
self.assertEqual(non_parent.text, 'false')
def test_transaction_and_requests2(self):
self.configure({
'execution': {
'scenario': {
'requests': [{
'transaction': 'MY_TRANSACTION',
'force-parent-sample': False,
'do': [{
'url': 'http://blazedemo.com'}]}]}}})
self.obj.prepare()
jmx = JMX(self.obj.modified_jmx)
selector = 'TransactionController > boolProp[name="TransactionController.parent"]'
props = jmx.get(selector)
self.assertEqual(len(props), 1)
non_parent = props[0]
self.assertEqual(non_parent.text, 'false')
def test_jvm_heap_settings(self):
self.configure({
'execution': {
'iterations': 1,
'scenario': {
'script': __dir__() + '/../resources/jmeter/jmx/http.jmx'}},
'modules': {
'jmeter': {
'memory-xmx': '2G'}}})
self.obj.prepare()
self.obj._env['TEST_MODE'] = 'heap'
self.obj.startup()
self.obj.shutdown()
self.obj.post_process()
with open(os.path.join(self.obj.engine.artifacts_dir, "jmeter.out")) as fds:
stdout = fds.read()
self.assertIn("-Xmx2G", stdout)
def test_data_sources_in_artifacts(self):
self.configure({
'execution': {
'iterations': 1,
'scenario': {
'data-sources': ['test1.csv'],
'requests': ['http://blazedemo.com/${url}']}}})
csv_source = __dir__() + '/../resources/test1.csv'
self.obj.engine.file_search_paths.append(self.obj.engine.artifacts_dir)
shutil.copy2(csv_source, self.obj.engine.artifacts_dir)
self.obj.prepare()
def test_body_file_in_artifacts(self):
self.configure({
'execution': {
'iterations': 1,
'scenario': {
'requests': [{
"method": "PUT",
"url": "http://blazedemo.com/",
"body-file": "http.jmx"}]}}})
jmx_source = __dir__() + '/../resources/jmeter/jmx/http.jmx'
self.obj.engine.file_search_paths.append(self.obj.engine.artifacts_dir)
shutil.copy2(jmx_source, self.obj.engine.artifacts_dir)
self.obj.prepare()
def test_jmx_paths_local_prov(self):
"Ensures that file paths in JMX are not changed during local prov"
script = __dir__() + "/../resources/jmeter/jmx/csvs.jmx"
self.configure({
'execution': {
'iterations': 1,
'scenario': {
"script": script}}})
self.obj.prepare()
original = JMX(script)
prepared = JMX(self.obj.modified_jmx)
query = '//CSVDataSet/stringProp[@name="filename"]/text()'
original_paths = original.tree.xpath(query)
prepared_paths = prepared.tree.xpath(query)
self.assertEqual(original_paths, prepared_paths)
def test_jmx_paths_remote_prov(self):
"Ensures that file paths in JMX are modified during remote prov"
script = __dir__() + "/../resources/jmeter/jmx/csvs.jmx"
self.configure({
'execution': {
'iterations': 1,
'scenario': {
"script": script}},
'provisioning': 'cloud'})
self.obj.resource_files()
original = JMX(script)
prepared = JMX(self.obj.original_jmx)
query = '//CSVDataSet/stringProp[@name="filename"]/text()'
original_paths = original.tree.xpath(query)
prepared_paths = prepared.tree.xpath(query)
self.assertEqual(len(original_paths), len(prepared_paths))
for orig, modified in zip(original_paths, prepared_paths):
self.assertNotEqual(orig, modified)
self.assertEqual(os.path.basename(orig), os.path.basename(modified))
def test_intprop_modification(self):
script = __dir__() + "/../resources/jmeter/jmx/int_threads.jmx"
self.configure({
'execution': {
'iterations': 1,
'concurrency': 3,
'scenario': {
"script": script}}})
self.obj.prepare()
prepared = JMX(self.obj.modified_jmx)
tnum_sel = ".//*[@name='ThreadGroup.num_threads']"
prepared_threads = prepared.tree.xpath(tnum_sel)
self.assertEqual(1, int(prepared_threads[0].text))
self.assertEqual(2, int(prepared_threads[1].text))
def test_request_logic_if(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"if": "<cond>",
"then": [
"http://blazedemo.com/"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
if_controller = xml_tree.find(".//IfController")
self.assertIsNotNone(if_controller)
condition = xml_tree.find(".//IfController/stringProp[@name='IfController.condition']")
self.assertIsNotNone(condition)
self.assertEqual(condition.text, "<cond>")
def test_request_logic_if_else(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"if": "<cond>",
"then": [
"http://blazedemo.com/"],
"else": [
"http://demo.blazemeter.com/"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
ifs = xml_tree.findall(".//IfController")
self.assertEqual(2, len(ifs))
conditions = xml_tree.findall(".//IfController/stringProp[@name='IfController.condition']")
self.assertEqual(2, len(conditions))
self.assertEqual(conditions[0].text, "<cond>")
self.assertEqual(conditions[1].text, "!(<cond>)")
def test_request_logic_nested_if(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"if": "<cond1>",
"then": [
"http://blazedemo.com/", {
"if": "<cond2>",
"then": [
"http://demo.blazemeter.com/"]}]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
ifs = xml_tree.findall(".//IfController")
self.assertEqual(2, len(ifs))
conditions = xml_tree.findall(".//IfController/stringProp[@name='IfController.condition']")
self.assertEqual(2, len(conditions))
self.assertEqual(conditions[0].text, "<cond1>")
self.assertEqual(conditions[1].text, "<cond2>")
def test_resource_files_nested_requests(self):
self.configure({
'execution': {
'scenario': {
"data-sources": [__dir__() + "/../resources/test1.csv"],
"requests": [{
"if": "<cond1>",
"then": [{
"if": "<cond2>",
"then": [{
"url": "http://demo.blazemeter.com/",
"method": "POST",
"body-file": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"
}],
"else": [
{"action": "continue"},
]
}]}]}},
'provisioning': 'cloud'})
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 2)
def test_request_logic_loop(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"loop": 10,
"do": [
"http://blazedemo.com/"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
controller = xml_tree.find(".//LoopController")
self.assertIsNotNone(controller)
loops = xml_tree.find(".//LoopController/stringProp[@name='LoopController.loops']")
self.assertEqual(loops.text, "10")
forever = xml_tree.find(".//LoopController/boolProp[@name='LoopController.continue_forever']")
self.assertEqual(forever.text, "true")
def test_request_logic_loop_forever(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"loop": "forever",
"do": [
"http://blazedemo.com/"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
controller = xml_tree.find(".//LoopController")
self.assertIsNotNone(controller)
forever = xml_tree.find(".//LoopController/boolProp[@name='LoopController.continue_forever']")
self.assertEqual(forever.text, "true")
loops = xml_tree.find(".//LoopController/stringProp[@name='LoopController.loops']")
self.assertEqual(loops.text, "-1")
def test_request_logic_loop_invalid(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"loop": 100}]}}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_resource_files_loops(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"loop": 100,
"do": [{
"url": "http://demo.blazemeter.com/",
"method": "POST",
"body-file": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}]}]}},
'provisioning': 'cloud'})
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 1)
def test_request_logic_while(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"while": "<cond>",
"do": [
"http://blazedemo.com/"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
if_controller = xml_tree.find(".//WhileController")
self.assertIsNotNone(if_controller)
condition = xml_tree.find(".//WhileController/stringProp[@name='WhileController.condition']")
self.assertIsNotNone(condition)
self.assertEqual(condition.text, "<cond>")
def test_request_logic_while_invalid(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"while": "<cond>"}]}}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_request_logic_while_resources(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"while": "<cond>",
"do": [{
"url": "http://demo.blazemeter.com/",
"method": "POST",
"body-file": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}]}]}},
'provisioning': 'cloud'})
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 1)
def test_request_logic_foreach(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"foreach": "name in usernames",
"do": [
"http://site.com/users/${name}"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
self.assertIsNotNone(xml_tree.find(".//ForeachController"))
input = xml_tree.find(".//ForeachController/stringProp[@name='ForeachController.inputVal']")
self.assertEqual(input.text, "usernames")
loop_var = xml_tree.find(".//ForeachController/stringProp[@name='ForeachController.returnVal']")
self.assertEqual(loop_var.text, "name")
def test_request_logic_foreach_resources(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"foreach": "item in coll",
"do": [{
"url": "http://${item}.blazemeter.com/",
"method": "POST",
"body-file": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}]}]}},
'provisioning': 'cloud'})
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 1)
def test_request_logic_transaction(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"transaction": "API",
"do": [
"http://blazedemo.com/",
"http://blazedemo.com/reserve.php"]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
controller = xml_tree.find(".//TransactionController")
self.assertIsNotNone(controller)
self.assertEqual(controller.get('testname'), "API")
def test_request_logic_transaction_resources(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"transaction": "API",
"do": [{
"url": "http://demo.blazemeter.com/",
"method": "POST",
"body-file": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}]}]}},
'provisioning': 'cloud'})
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 1)
def test_request_logic_include(self):
self.configure({
'scenarios': {
'login': {
'requests': ['http://example.com/login']}},
'execution': {
'scenario': {
"requests": [{
"include-scenario": "login"}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
controller = xml_tree.find(".//GenericController")
self.assertIsNotNone(controller)
self.assertEqual(controller.get('testname'), "login")
ht = controller.getnext()
sampler = ht.find('HTTPSamplerProxy')
self.assertIsNotNone(sampler)
domain = sampler.find('stringProp[@name="HTTPSampler.domain"]')
self.assertEqual(domain.text, "example.com")
path = sampler.find('stringProp[@name="HTTPSampler.path"]')
self.assertEqual(path.text, "/login")
def test_request_logic_include_resources(self):
self.configure({
'scenarios': {
'login': {
'data-sources': [__dir__() + "/../resources/test1.csv"],
'requests': [{
"url": "http://demo.blazemeter.com/",
"method": "POST",
"body-file": __dir__() + "/../resources/jmeter/jmx/dummy.jmx"}]}},
'execution': {
'scenario': {
'data-sources': [__dir__() + "/../resources/test2.csv"],
"requests": [{
"include-scenario": "login"}]}},
'provisioning': 'cloud'})
res_files = self.obj.resource_files()
self.assertEqual(len(res_files), 3)
def test_logic_include_data_sources(self):
self.configure({
'scenarios': {
'login': {
'data-sources': [__dir__() + "/../resources/test1.csv"],
'requests': ['http://blazedemo.com/auth/${test1}']}},
'execution': {
'scenario': {
"data-sources": [__dir__() + "/../resources/test2.csv"],
"requests": [
{"include-scenario": "login"},
"http://example.com/${test2}"]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
thread_group = xml_tree.find('.//hashTree[@type="tg"]')
scenario_dataset = xml_tree.find('.//hashTree[@type="tg"]/CSVDataSet')
self.assertIsNotNone(scenario_dataset)
filename = scenario_dataset.find('stringProp[@name="filename"]')
self.assertEqual(filename.text, get_full_path(__dir__() + "/../resources/test2.csv"))
login_controler = thread_group.find('GenericController')
self.assertIsNotNone(login_controler)
login_ht = login_controler.getnext()
login_dataset = login_ht.find('CSVDataSet')
self.assertIsNotNone(login_dataset)
filename = scenario_dataset.find('stringProp[@name="filename"]')
self.assertEqual(filename.text, get_full_path(__dir__() + "/../resources/test2.csv"))
def test_include_recursion(self):
self.configure({
'scenarios': {
'a': {
'requests': [{
"include-scenario": "b"}]},
'b': {
'requests': [{
"include-scenario": "a"}]}},
'execution': {
'scenario': 'a'}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_include_sources_recursion(self):
self.configure({
'scenarios': {
'a': {
'requests': [{
"include-scenario": "b"}]},
'b': {
'requests': [{
"include-scenario": "a"}]}},
'execution': {
'scenario': 'a'}})
self.assertRaises(TaurusConfigError, self.obj.resource_files)
def test_logic_test_action(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"action": "pause",
"pause-duration": "1s",
}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
block = xml_tree.find(".//TestAction")
self.assertIsNotNone(block)
action = block.find('intProp[@name="ActionProcessor.action"]')
self.assertEqual(action.text, "1")
target = block.find('intProp[@name="ActionProcessor.target"]')
self.assertEqual(target.text, "0")
target = block.find('stringProp[@name="ActionProcessor.duration"]')
self.assertEqual(target.text, "1000")
def test_logic_test_action_target(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"action": "stop",
"target": "all-threads",
}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
block = xml_tree.find(".//TestAction")
self.assertIsNotNone(block)
action = block.find('intProp[@name="ActionProcessor.action"]')
self.assertEqual(action.text, "0")
target = block.find('intProp[@name="ActionProcessor.target"]')
self.assertEqual(target.text, "2")
def test_logic_test_action_unknown(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"action": "unknown",
}]}}})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_request_logic_set_vars(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"set-variables": {"foo": "bar"}}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
self.assertIsNotNone(xml_tree.find(".//JSR223PreProcessor"))
input = xml_tree.find(".//JSR223PreProcessor/stringProp[@name='script']")
self.assertEqual(input.text, "vars.put('foo', 'bar');")
def test_request_null_headers(self):
self.configure({
'execution': {
'scenario': {
"headers": None,
"requests": [
"http://blazedemo.com/"]}}})
self.obj.prepare()
def test_multipart_file_upload(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"url": "http://blazedemo.com/",
"method": "POST",
"multipart-form": True,
"upload-files": [{
"path": "stats.csv",
"param": "stats",
"mime-type": "text/csv"
}, {
"path": "report.pdf",
"param": "report",
"mime-type": "application/pdf"}]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
request = xml_tree.find('.//HTTPSamplerProxy')
self.assertIsNotNone(request)
self.assertEqual(request.find('boolProp[@name="HTTPSampler.DO_MULTIPART_POST"]').text, 'true')
self.assertEqual(request.find('boolProp[@name="HTTPSampler.BROWSER_COMPATIBLE_MULTIPART"]').text, 'true')
file_query = 'elementProp[@name="HTTPsampler.Files"]/collectionProp[@name="HTTPFileArgs.files"]/elementProp'
files = request.findall(file_query)
self.assertEqual(len(files), 2)
self.assertEqual(files[0].get('name'), "stats.csv")
self.assertEqual(files[0].find('stringProp[@name="File.path"]').text, "stats.csv")
self.assertEqual(files[0].find('stringProp[@name="File.paramname"]').text, "stats")
self.assertEqual(files[0].find('stringProp[@name="File.mimetype"]').text, "text/csv")
self.assertEqual(files[1].get('name'), "report.pdf")
self.assertEqual(files[1].find('stringProp[@name="File.path"]').text, "report.pdf")
self.assertEqual(files[1].find('stringProp[@name="File.paramname"]').text, "report")
self.assertEqual(files[1].find('stringProp[@name="File.mimetype"]').text, "application/pdf")
def test_upload_files_mime_autodetect(self):
self.configure({
'execution': {
'scenario': {
"requests": [{
"url": "http://blazedemo.com/",
"method": "POST",
"upload-files": [{
"path": "sound.mp3",
"param": "stats",
}, {
"path": "report.pdf",
"param": "report",
}, {
"path": "unknown.file",
"param": "stuff"}]}]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
request = xml_tree.find('.//HTTPSamplerProxy')
self.assertIsNotNone(request)
file_query = 'elementProp[@name="HTTPsampler.Files"]/collectionProp[@name="HTTPFileArgs.files"]/elementProp'
files = request.findall(file_query)
self.assertEqual(len(files), 3)
self.assertEqual(files[0].find('stringProp[@name="File.mimetype"]').text, "audio/mpeg")
self.assertEqual(files[1].find('stringProp[@name="File.mimetype"]').text, "application/pdf")
self.assertEqual(files[2].find('stringProp[@name="File.mimetype"]').text, "application/octet-stream")
def test_data_sources_jmx_gen_loop(self):
self.configure({
'execution': {
'scenario': {
"data-sources": [{
"path": __dir__() + "/../resources/test1.csv",
"loop": True}],
"requests": [
"http://example.com/${test1}"]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
dataset = xml_tree.find('.//hashTree[@type="tg"]/CSVDataSet')
self.assertIsNotNone(dataset)
filename = dataset.find('stringProp[@name="filename"]')
self.assertEqual(filename.text, get_full_path(__dir__() + "/../resources/test1.csv"))
loop = dataset.find('boolProp[@name="recycle"]')
self.assertEqual(loop.text, "true")
stop = dataset.find('boolProp[@name="stopThread"]')
self.assertEqual(stop.text, "false")
def test_data_sources_jmx_gen_stop(self):
self.configure({
'execution': {
'scenario': {
"data-sources": [{
"path": __dir__() + "/../resources/test1.csv",
"loop": False}],
"requests": [
"http://example.com/${test1}"]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
dataset = xml_tree.find('.//hashTree[@type="tg"]/CSVDataSet')
self.assertIsNotNone(dataset)
filename = dataset.find('stringProp[@name="filename"]')
self.assertEqual(filename.text, get_full_path(__dir__() + "/../resources/test1.csv"))
loop = dataset.find('boolProp[@name="recycle"]')
self.assertEqual(loop.text, "false")
stop = dataset.find('boolProp[@name="stopThread"]')
self.assertEqual(stop.text, "true")
def test_data_sources_varnames(self):
self.configure({
'execution': {
'scenario': {
"data-sources": [{
"path": __dir__() + "/../resources/test1.csv",
"variable-names": "a,b,c"}],
"requests": [
"http://example.com/${test1}"]}}})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
dataset = xml_tree.find('.//hashTree[@type="tg"]/CSVDataSet')
self.assertIsNotNone(dataset)
varnames = dataset.find('stringProp[@name="variableNames"]')
self.assertEqual(varnames.text, "a,b,c")
def test_functional_mode_flag(self):
self.obj.engine.aggregator.is_functional = True
self.obj.execution.merge({
'scenario': {
"requests": [
"http://example.com/",
],
}
})
self.obj.execution.merge(self.obj.engine.config)
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
functional_switch = xml_tree.find('.//TestPlan/boolProp[@name="TestPlan.functional_mode"]')
self.assertIsNotNone(functional_switch)
self.assertEqual(functional_switch.text, "true")
def test_functional_reader_pass(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/resource-errors-no-fail.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(2, len(samples))
first = samples[0]
self.assertEqual(first.test_case, "HTTP Request")
self.assertEqual(first.test_suite, "JMeter")
self.assertEqual(first.status, "PASSED")
self.assertEqual(first.start_time, 1440764640)
self.assertEqual(first.duration, 0.419)
self.assertEqual(first.error_msg, "")
self.assertEqual(first.error_trace, "")
def test_functional_reader_failed(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/standard-errors.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(185, len(samples))
first = samples[0]
self.assertEqual(first.test_case, "http://blazedemo.com/some-more-or-less-long-label")
self.assertEqual(first.test_suite, "JMeter")
self.assertEqual(first.status, "FAILED")
self.assertEqual(first.start_time, 1430825787)
self.assertEqual(first.duration, 0.011)
self.assertEqual(first.error_msg, "The operation lasted too long")
def test_functional_reader_broken(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/standard-errors.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(185, len(samples))
sample = samples[8]
self.assertEqual(sample.test_case, "http://blazedemo.com/some-more-or-less-long-label")
self.assertEqual(sample.test_suite, "JMeter")
self.assertEqual(sample.status, "BROKEN")
self.assertEqual(sample.start_time, 1430825788)
self.assertEqual(sample.duration, 0.01)
self.assertEqual(sample.error_msg, "Non HTTP response message: Read timed out")
self.assertTrue(sample.error_trace.startswith("java.net.SocketTimeoutException: Read timed out"))
def test_functional_reader_extras(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/trace.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(1, len(samples))
sample = samples[0]
self.assertIsNotNone(sample.extras)
fields = [
'assertions', 'connectTime', 'latency', 'responseTime',
'requestBody', 'requestBodySize', 'requestCookies', 'requestCookiesRaw', 'requestCookiesSize',
'requestHeaders', 'requestMethod', 'requestSize', 'requestURI',
'responseBody', 'responseBodySize', 'responseCode', 'responseHeaders',
'responseMessage', 'responseSize',
"threadId", "threadGroup",
]
for field in set(fields) - set(FuncJTLReader.FILE_EXTRACTED_FIELDS):
self.assertIn(field, sample.extras)
self.assertEqual(sample.extras["requestURI"], "http://blazedemo.com/")
self.assertEqual(sample.extras["requestMethod"], "GET")
def test_functional_reader_artifact_files(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/trace.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(1, len(samples))
self.assertTrue(os.path.exists(os.path.join(engine_obj.artifacts_dir, "sample-responseBody.bin")))
def test_functional_reader_extras_assertions(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/trace.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(1, len(samples))
sample = samples[0]
self.assertIsNotNone(sample.extras)
self.assertEqual(len(sample.extras["assertions"]), 2)
first, second = sample.extras["assertions"]
self.assertEqual(first, {"name": 'Passing Assertion',
"isFailed": False,
"errorMessage": ""})
self.assertEqual(second, {"name": 'Failing Assertion',
"isFailed": True,
"errorMessage": "Test failed: text expected to contain /something/"})
def test_functional_reader_extras_empty_body(self):
engine_obj = EngineEmul()
obj = FuncJTLReader(__dir__() + "/../resources/jmeter/jtl/cookies.jtl",
engine_obj,
logging.getLogger(''))
samples = list(obj.read(last_pass=True))
self.assertEqual(2, len(samples))
sample = samples[1]
self.assertIsNotNone(sample.extras)
self.assertEqual(sample.extras["requestCookies"], {'hello': 'world', 'visited': 'yes'})
def test_jsr223_block(self):
script = __dir__() + "/../resources/jmeter/jsr223_script.js"
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://blazedemo.com/",
"jsr223": {
"language": "javascript",
"script-file": script,
"parameters": "first second"
}
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
post_procs = xml_tree.findall(".//JSR223PostProcessor[@testclass='JSR223PostProcessor']")
self.assertEqual(1, len(post_procs))
jsr = post_procs[0]
self.assertEqual(script, jsr.find(".//stringProp[@name='filename']").text)
self.assertEqual("javascript", jsr.find(".//stringProp[@name='scriptLanguage']").text)
self.assertEqual("first second", jsr.find(".//stringProp[@name='parameters']").text)
def test_jsr223_exceptions_2(self):
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://blazedemo.com/",
"jsr223": {
"language": "javascript"
}
}]
}
}
})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_jsr223_multiple(self):
pre_script = __dir__() + "/../resources/jmeter/jsr223_script.js"
post_script = __dir__() + "/../resources/jmeter/bean_script.bhs"
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://blazedemo.com/",
"jsr223": [{
"language": "javascript",
"script-file": pre_script,
"execute": "before",
}, {
"language": "beanshell",
"script-file": post_script,
"execute": "after",
},
'vars.put("a", 1)']
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
pre_procs = xml_tree.findall(".//JSR223PreProcessor[@testclass='JSR223PreProcessor']")
post_procs = xml_tree.findall(".//JSR223PostProcessor[@testclass='JSR223PostProcessor']")
self.assertEqual(1, len(pre_procs))
self.assertEqual(2, len(post_procs))
pre = pre_procs[0]
self.assertEqual(pre_script, pre.find(".//stringProp[@name='filename']").text)
self.assertEqual("javascript", pre.find(".//stringProp[@name='scriptLanguage']").text)
self.assertEqual(None, pre.find(".//stringProp[@name='parameters']").text)
pre = post_procs[0]
self.assertEqual(post_script, pre.find(".//stringProp[@name='filename']").text)
self.assertEqual("beanshell", pre.find(".//stringProp[@name='scriptLanguage']").text)
self.assertEqual(None, pre.find(".//stringProp[@name='parameters']").text)
pre = post_procs[1]
self.assertEqual(None, pre.find(".//stringProp[@name='filename']").text)
self.assertEqual("groovy", pre.find(".//stringProp[@name='scriptLanguage']").text)
self.assertEqual(None, pre.find(".//stringProp[@name='parameters']").text)
self.assertEqual('vars.put("a", 1)', pre.find(".//stringProp[@name='script']").text)
def test_request_content_encoding(self):
self.configure({
"execution": {
"scenario": {
'content-encoding': 'cp1251',
"requests": [{
"url": "http://blazedemo.com/",
"body": "S'il vous plaît",
"content-encoding": "utf-8",
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.modified_jmx, "rb").read())
defaults_encoding_prop = xml_tree.find(".//ConfigTestElement/stringProp[@name='HTTPSampler.contentEncoding']")
self.assertIsNotNone(defaults_encoding_prop)
self.assertEqual(defaults_encoding_prop.text, 'cp1251')
sampler_encoding_prop = xml_tree.find(".//HTTPSamplerProxy/stringProp[@name='HTTPSampler.contentEncoding']")
self.assertIsNotNone(sampler_encoding_prop)
self.assertEqual(sampler_encoding_prop.text, 'utf-8')
def test_redirect_empty(self):
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://example.com/",
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
follow_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.follow_redirects']")
self.assertIsNotNone(follow_redirects)
self.assertEqual(follow_redirects.text, 'true')
auto_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.auto_redirects']")
self.assertIsNotNone(auto_redirects)
self.assertEqual(auto_redirects.text, 'false')
def test_redirect_follow(self):
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://example.com/",
"follow-redirects": True,
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
follow_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.follow_redirects']")
self.assertIsNotNone(follow_redirects)
self.assertEqual(follow_redirects.text, 'true')
auto_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.auto_redirects']")
self.assertIsNotNone(auto_redirects)
self.assertEqual(auto_redirects.text, 'false')
def test_disable_redirect(self):
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://example.com/",
"follow-redirects": False,
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
follow_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.follow_redirects']")
self.assertIsNotNone(follow_redirects)
self.assertEqual(follow_redirects.text, 'false')
auto_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.auto_redirects']")
self.assertIsNotNone(auto_redirects)
self.assertEqual(auto_redirects.text, 'false')
def test_redirect_scenario_level(self):
self.configure({
"execution": {
"scenario": {
"follow-redirects": False,
"requests": [{
"url": "http://example.com/",
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
follow_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.follow_redirects']")
self.assertIsNotNone(follow_redirects)
self.assertEqual(follow_redirects.text, 'false')
auto_redirects = xml_tree.find(".//HTTPSamplerProxy/boolProp[@name='HTTPSampler.auto_redirects']")
self.assertIsNotNone(auto_redirects)
self.assertEqual(auto_redirects.text, 'false')
def test_reader_unicode(self):
reader = JTLReader(__dir__() + "/../resources/jmeter/jtl/unicode.jtl", logging.getLogger(''), None)
reader.ignored_labels = [u("Тест.Эхо")]
for point in reader.datapoints():
cumulative = point[DataPoint.CUMULATIVE]
self.assertNotIn("Тест.Эхо", cumulative)
def test_soapui_script(self):
self.configure({
"execution": {
"scenario": {
"script": __dir__() + "/../resources/soapui/project.xml",
"test-case": "index",
}
}
})
self.obj.prepare()
self.assertIn("TestSuite 1-index", self.obj.engine.config["scenarios"])
def test_soapui_renaming(self):
self.configure({
"execution": {
"scenario": {
"script": __dir__() + "/../resources/soapui/project.xml",
"test-case": "index",
},
},
"scenarios": {
"TestSuite 1-index": {
"hello": "world",
},
"TestSuite 1-index-1": {
"hello": "world",
},
},
})
self.obj.prepare()
self.assertIn("TestSuite 1-index", self.obj.engine.config["scenarios"])
self.assertIn("TestSuite 1-index-1", self.obj.engine.config["scenarios"])
self.assertIn("TestSuite 1-index-2", self.obj.engine.config["scenarios"])
def test_include_scenario_mutual_recursion(self):
self.configure({
"execution": {
"scenario": "scen",
},
"scenarios": {
"scen": {
"requests": [{"include-scenario": "subroutine"},
{"include-scenario": "subroutine"}]
},
"subroutine": {"requests": ["http://blazedemo.com"]},
},
})
self.obj.prepare()
def test_include_scenario_mutual_recursion_resources(self):
self.configure({
"execution": {
"scenario": "scen",
},
"scenarios": {
"scen": {
"requests": [{"include-scenario": "subroutine"},
{"include-scenario": "subroutine"}]
},
"subroutine": {"requests": ["http://blazedemo.com"]},
},
})
self.obj.resource_files()
def test_resource_files_relpath(self):
self.configure({
"execution": {
"scenario": {
"script": __dir__() + "/../resources/jmeter/jmx/nested/directory/csv.jmx"
}
}
})
resources = self.obj.get_resource_files()
self.assertNotIn("a.csv", resources)
self.assertTrue(any(res.endswith(os.path.join("nested", "directory", "a.csv")) for res in resources))
def test_stdout_stderr_capture(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
self.obj.log.debug("Check...")
time.sleep(1)
self.obj.shutdown()
self.obj.post_process()
except:
pass
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "jmeter.out")))
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "jmeter.err")))
def test_func_aggregator_chosen(self):
self.configure(json.loads(open(__dir__() + "/../resources/json/get-post.json").read()))
self.obj.engine.aggregator = FunctionalAggregator()
self.obj.prepare()
self.assertEquals('get-post', self.obj.reader.executor_label)
def test_source_ips(self):
self.configure({
"execution": {
"scenario": {
"random-source-ip": True,
"requests": [{
"url": "http://example.com/",
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
ip_source = xml_tree.find(".//HTTPSamplerProxy/stringProp[@name='HTTPSampler.ipSource']")
self.assertIsNotNone(ip_source)
self.assertIsNotNone(ip_source.text)
def test_source_ips_request_level(self):
self.configure({
"execution": {
"scenario": {
"requests": [{
"url": "http://example.com/",
"random-source-ip": True,
}]
}
}
})
self.obj.prepare()
xml_tree = etree.fromstring(open(self.obj.original_jmx, "rb").read())
ip_source = xml_tree.find(".//HTTPSamplerProxy/stringProp[@name='HTTPSampler.ipSource']")
self.assertIsNotNone(ip_source)
self.assertIsNotNone(ip_source.text)
class TestJMX(BZTestCase):
def test_jmx_unicode_checkmark(self):
obj = JMX()
res = obj._get_http_request("url", "label", "method", 0, {"param": u"✓"}, True)
prop = res.find(".//stringProp[@name='Argument.value']")
self.assertNotEqual("BINARY", prop.text)
self.assertEqual(u"✓", prop.text)
def test_variable_hostname(self):
obj = JMX()
res = obj._get_http_request("http://${hostName}:${Port}/${Path}", "label", "method", 0, {}, True)
self.assertEqual("/${Path}", res.find(".//stringProp[@name='HTTPSampler.path']").text)
self.assertEqual("${hostName}", res.find(".//stringProp[@name='HTTPSampler.domain']").text)
self.assertEqual("${Port}", res.find(".//stringProp[@name='HTTPSampler.port']").text)
def test_no_port(self):
obj = JMX()
res = obj._get_http_request("http://hostname", "label", "method", 0, {}, True)
self.assertEqual("", res.find(".//stringProp[@name='HTTPSampler.path']").text)
self.assertEqual("hostname", res.find(".//stringProp[@name='HTTPSampler.domain']").text)
self.assertEqual("", res.find(".//stringProp[@name='HTTPSampler.port']").text)
def test_regexp_subject(self):
res = JMX._get_extractor('test_name', 'baddy', 'regexp', 1, 1, 'error')
self.assertEqual("body", res.find(".//stringProp[@name='RegexExtractor.useHeaders']").text)
res = JMX._get_extractor('test_name', 'headers', 'regexp', 1, 1, 'error')
self.assertEqual("true", res.find(".//stringProp[@name='RegexExtractor.useHeaders']").text)
res = JMX._get_extractor('test_name', 'http-code', 'regexp', 1, 1, 'error')
self.assertEqual("code", res.find(".//stringProp[@name='RegexExtractor.useHeaders']").text)
self.assertEqual("parent", res.find(".//stringProp[@name='Sample.scope']").text)
def test_int_udv(self):
res = JMX()
data = {"varname2": "1", "varname": 1, 2: 3}
res.add_user_def_vars_elements(data)
def test_source_ips_single(self):
obj = JMX()
res = obj._get_http_request("/", "label", "method", 0, {}, True,
use_random_host_ip=True, host_ips=["192.168.1.1"])
self.assertEqual("192.168.1.1", res.find(".//stringProp[@name='HTTPSampler.ipSource']").text)
def test_source_ips_multiple(self):
obj = JMX()
res = obj._get_http_request("/", "label", "method", 0, {}, True,
use_random_host_ip=True, host_ips=["192.168.1.1", "192.168.1.2"])
self.assertEqual("${__chooseRandom(192.168.1.1,192.168.1.2,randomAddr)}",
res.find(".//stringProp[@name='HTTPSampler.ipSource']").text)
| 1 | 14,442 | Is this change intentional? | Blazemeter-taurus | py |
@@ -55,10 +55,7 @@ namespace Microsoft.CodeAnalysis.Sarif
if (nativeOffset != -1)
{
- stackFrame.Properties = new Dictionary<string, string>
- {
- { "NativeOffset", nativeOffset.ToString(CultureInfo.InvariantCulture) }
- };
+ stackFrame.SetProperty("NativeOffset", nativeOffset.ToString(CultureInfo.InvariantCulture));
}
return stackFrame; | 1 | // Copyright (c) Microsoft. All Rights Reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Reflection;
using System.Text;
namespace Microsoft.CodeAnalysis.Sarif
{
/// <summary>
/// A stack frame of a SARIF stack.
/// </summary>
public partial class StackFrame
{
internal const string IN = " in ";
internal const string AT = " at ";
internal const string LINE = ":line";
/// <summary>
/// Creates a SARIF StackFrame instance from a .NET StackFrame instance
/// </summary>
/// <param name="stackTrace"></param>
/// <returns></returns>
public static StackFrame Create(System.Diagnostics.StackFrame dotNetStackFrame)
{
// This value is -1 if not present
int ilOffset = dotNetStackFrame.GetILOffset();
string fileName = dotNetStackFrame.GetFileName();
int nativeOffset = dotNetStackFrame.GetNativeOffset();
MethodBase methodBase = dotNetStackFrame.GetMethod();
Assembly assembly = methodBase?.DeclaringType.Assembly;
string fullyQualifiedName = CreateFullyQualifiedName(methodBase);
StackFrame stackFrame = new StackFrame
{
Module = assembly?.GetName().Name,
FullyQualifiedLogicalName = fullyQualifiedName
};
if (fileName != null)
{
stackFrame.Uri = new Uri(fileName);
stackFrame.Line = dotNetStackFrame.GetFileLineNumber();
stackFrame.Column = dotNetStackFrame.GetFileColumnNumber();
}
if (ilOffset != -1)
{
stackFrame.Offset = ilOffset;
}
if (nativeOffset != -1)
{
stackFrame.Properties = new Dictionary<string, string>
{
{ "NativeOffset", nativeOffset.ToString(CultureInfo.InvariantCulture) }
};
}
return stackFrame;
}
public override string ToString()
{
string result = AT + this.FullyQualifiedLogicalName;
if (this.Uri != null)
{
string lineNumber = this.Line.ToString(CultureInfo.InvariantCulture);
string fileName = this.Uri.LocalPath;
result += IN + fileName + LINE + " " + lineNumber;
}
return result;
}
private static string CreateFullyQualifiedName(MethodBase methodBase)
{
if (methodBase == null) { return null; }
var sb = new StringBuilder();
Type type = methodBase.DeclaringType;
// if there is a type (non global method) print it
if (type != null)
{
sb.Append(type.FullName.Replace('+', '.'));
sb.Append(".");
}
sb.Append(methodBase.Name);
// deal with the generic portion of the method
if (methodBase is MethodInfo && ((MethodInfo)methodBase).IsGenericMethod)
{
Type[] typeArguments = ((MethodInfo)methodBase).GetGenericArguments();
sb.Append("[");
int k = 0;
bool firstTypeParameter = true;
while (k < typeArguments.Length)
{
if (firstTypeParameter == false)
{
sb.Append(",");
}
else
{
firstTypeParameter = false;
}
sb.Append(typeArguments[k].Name);
k++;
}
sb.Append("]");
}
// arguments printing
sb.Append("(");
ParameterInfo[] parameterInfos = methodBase.GetParameters();
bool firstParameterInfo = true;
for (int j = 0; j < parameterInfos.Length; j++)
{
if (firstParameterInfo == false)
{
sb.Append(", ");
}
else
{
firstParameterInfo = false;
}
String typeName = "<UnknownType>";
if (parameterInfos[j].ParameterType != null)
{
typeName = parameterInfos[j].ParameterType.Name;
}
sb.Append(typeName + " " + parameterInfos[j].Name);
}
sb.Append(")");
return sb.ToString();
}
}
}
| 1 | 10,754 | This could be an integer property if we supported explicitly setting non-string-valued properties. | microsoft-sarif-sdk | .cs |
@@ -14,8 +14,7 @@ use Symfony\Component\Security\Csrf\CsrfTokenManagerInterface;
class RouteCsrfProtector implements EventSubscriberInterface
{
public const CSRF_TOKEN_REQUEST_PARAMETER = 'routeCsrfToken';
- /** @access protected */
- const CSRF_TOKEN_ID_PREFIX = 'route_';
+ public const CSRF_TOKEN_ID_PREFIX = 'route_';
/**
* @var \Doctrine\Common\Annotations\Reader | 1 | <?php
namespace Shopsys\FrameworkBundle\Component\Router\Security;
use Doctrine\Common\Annotations\Reader;
use ReflectionMethod;
use Shopsys\FrameworkBundle\Component\Router\Security\Annotation\CsrfProtection;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Symfony\Component\HttpKernel\Event\FilterControllerEvent;
use Symfony\Component\HttpKernel\KernelEvents;
use Symfony\Component\Security\Csrf\CsrfToken;
use Symfony\Component\Security\Csrf\CsrfTokenManagerInterface;
class RouteCsrfProtector implements EventSubscriberInterface
{
public const CSRF_TOKEN_REQUEST_PARAMETER = 'routeCsrfToken';
/** @access protected */
const CSRF_TOKEN_ID_PREFIX = 'route_';
/**
* @var \Doctrine\Common\Annotations\Reader
*/
protected $annotationReader;
/**
* @var \Symfony\Component\Security\Csrf\CsrfTokenManagerInterface
*/
protected $tokenManager;
/**
* @param \Doctrine\Common\Annotations\Reader $annotationReader
* @param \Symfony\Component\Security\Csrf\CsrfTokenManagerInterface $tokenManager
*/
public function __construct(Reader $annotationReader, CsrfTokenManagerInterface $tokenManager)
{
$this->annotationReader = $annotationReader;
$this->tokenManager = $tokenManager;
}
/**
* @return string[]
*/
public static function getSubscribedEvents()
{
return [
KernelEvents::CONTROLLER => 'onKernelController',
];
}
/**
* @param \Symfony\Component\HttpKernel\Event\FilterControllerEvent $event
*/
public function onKernelController(FilterControllerEvent $event)
{
if ($this->isProtected($event)) {
$request = $event->getRequest();
$csrfToken = $request->get(self::CSRF_TOKEN_REQUEST_PARAMETER);
$routeName = $request->get('_route');
if ($csrfToken === null || !$this->isCsrfTokenValid($routeName, $csrfToken)) {
throw new \Symfony\Component\HttpKernel\Exception\BadRequestHttpException('Csrf token is invalid');
}
}
}
/**
* @param string $routeName
* @return string
*/
public function getCsrfTokenId($routeName)
{
return static::CSRF_TOKEN_ID_PREFIX . $routeName;
}
/**
* @param string $routeName
* @return string
*/
public function getCsrfTokenByRoute($routeName)
{
return $this->tokenManager->getToken($this->getCsrfTokenId($routeName))->getValue();
}
/**
* @param string $routeName
* @param string $csrfToken
* @return bool
*/
protected function isCsrfTokenValid($routeName, $csrfToken)
{
$token = new CsrfToken($this->getCsrfTokenId($routeName), $csrfToken);
return $this->tokenManager->isTokenValid($token);
}
/**
* @param \Symfony\Component\HttpKernel\Event\FilterControllerEvent $event
* @return bool
*/
protected function isProtected(FilterControllerEvent $event)
{
if (!$event->isMasterRequest()) {
return false;
}
list($controller, $action) = $event->getController();
$method = new ReflectionMethod($controller, $action);
$annotation = $this->annotationReader->getMethodAnnotation($method, CsrfProtection::class);
return $annotation !== null;
}
}
| 1 | 17,612 | this is not consistent with the previous annotation - is it because of the usage in `Admin\Content\Category\list.html.twig` twig template? Should not it be at least mentioned in the commit message? :slightly_smiling_face: | shopsys-shopsys | php |
@@ -149,6 +149,7 @@ func ReadSecretFromStdin() (string, error) {
case <-routineTerminate:
return
default:
+ return
}
}
}() | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package util
import (
"bytes"
"crypto/tls"
"fmt"
"io/ioutil"
"math/big"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"google.golang.org/grpc/metadata"
"github.com/ethereum/go-ethereum/common"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/crypto/ssh/terminal"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/ioctl/validator"
"github.com/iotexproject/iotex-core/pkg/log"
)
const (
// IotxDecimalNum defines the number of decimal digits for IoTeX
IotxDecimalNum = 18
// GasPriceDecimalNum defines the number of decimal digits for gas price
GasPriceDecimalNum = 12
)
// ExecuteCmd executes cmd with args, and return system output, e.g., help info, and error
func ExecuteCmd(cmd *cobra.Command, args ...string) (string, error) {
buf := new(bytes.Buffer)
cmd.SetOutput(buf)
cmd.SetArgs(args)
_, err := cmd.ExecuteC()
return buf.String(), err
}
// ConnectToEndpoint starts a new connection
func ConnectToEndpoint(secure bool) (*grpc.ClientConn, error) {
endpoint := config.ReadConfig.Endpoint
if endpoint == "" {
return nil, output.NewError(output.ConfigError, `use "ioctl config set endpoint" to config endpoint first`, nil)
}
if !secure {
return grpc.Dial(endpoint, grpc.WithInsecure())
}
return grpc.Dial(endpoint, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})))
}
// StringToRau converts different unit string into Rau big int
func StringToRau(amount string, numDecimals int) (*big.Int, error) {
amountStrings := strings.Split(amount, ".")
if len(amountStrings) != 1 {
if len(amountStrings) > 2 || len(amountStrings[1]) > numDecimals {
return nil, output.NewError(output.ConvertError, "failed to convert string into big int", nil)
}
amountStrings[0] += amountStrings[1]
numDecimals -= len(amountStrings[1])
}
if len(amountStrings[0]) == 0 {
return nil, output.NewError(output.ConvertError, "failed to convert string into big int", nil)
}
zeroString := strings.Repeat("0", numDecimals)
amountStrings[0] += zeroString
amountRau, ok := big.NewInt(0).SetString(amountStrings[0], 10)
if !ok {
return nil, output.NewError(output.ConvertError, "failed to convert string into big int", nil)
}
if amountRau.Sign() < 0 {
return nil, output.NewError(output.ConvertError, "invalid number that is minus", nil)
}
return amountRau, nil
}
// RauToString converts Rau big int into Iotx string
func RauToString(amount *big.Int, numDecimals int) string {
if numDecimals == 0 {
return amount.String()
}
targetUnit := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(numDecimals)), nil)
amountInt, amountDec := big.NewInt(0), big.NewInt(0)
amountInt.DivMod(amount, targetUnit, amountDec)
if amountDec.Sign() != 0 {
decString := strings.TrimRight(amountDec.String(), "0")
zeroString := strings.Repeat("0", numDecimals-len(amountDec.String()))
decString = zeroString + decString
return amountInt.String() + "." + decString
}
return amountInt.String()
}
// IoAddrToEvmAddr converts IoTeX address into evm address
func IoAddrToEvmAddr(ioAddr string) (common.Address, error) {
if err := validator.ValidateAddress(ioAddr); err != nil {
return common.Address{}, output.NewError(output.ValidationError, "", err)
}
address, err := address.FromString(ioAddr)
if err != nil {
return common.Address{}, output.NewError(output.ConvertError, "", err)
}
return common.BytesToAddress(address.Bytes()), nil
}
// StringToIOTX converts Rau string to Iotx string
func StringToIOTX(amount string) (string, error) {
amountInt, err := StringToRau(amount, 0)
if err != nil {
return "", output.NewError(output.ConvertError, "", err)
}
return RauToString(amountInt, IotxDecimalNum), nil
}
// ReadSecretFromStdin used to safely get password input
func ReadSecretFromStdin() (string, error) {
signalListener := make(chan os.Signal, 1)
signal.Notify(signalListener, os.Interrupt)
routineTerminate := make(chan struct{})
sta, err := terminal.GetState(int(syscall.Stdin))
if err != nil {
return "", output.NewError(output.RuntimeError, "", err)
}
go func() {
for {
select {
case <-signalListener:
err = terminal.Restore(int(syscall.Stdin), sta)
if err != nil {
log.L().Error("failed restore terminal", zap.Error(err))
return
}
os.Exit(130)
case <-routineTerminate:
return
default:
}
}
}()
bytePass, err := terminal.ReadPassword(int(syscall.Stdin))
close(routineTerminate)
if err != nil {
return "", output.NewError(output.RuntimeError, "failed to read password", nil)
}
return string(bytePass), nil
}
// GetAddress get address from address or alias or context
func GetAddress(in string) (string, error) {
addr, err := config.GetAddressOrAlias(in)
if err != nil {
return "", output.NewError(output.AddressError, "", err)
}
return Address(addr)
}
// Address returns the address corresponding to alias. if 'in' is an IoTeX address, returns 'in'
func Address(in string) (string, error) {
if len(in) >= validator.IoAddrLen {
if err := validator.ValidateAddress(in); err != nil {
return "", output.NewError(output.ValidationError, "", err)
}
return in, nil
}
addr, ok := config.ReadConfig.Aliases[in]
if ok {
return addr, nil
}
return "", output.NewError(output.ConfigError, "cannot find address from "+in, nil)
}
// JwtAuth used for ioctl set auth and send for every grpc request
func JwtAuth() (jwt metadata.MD, err error) {
jwtFile := os.Getenv("HOME") + "/.config/ioctl/default/auth.jwt"
jwtString, err := ioutil.ReadFile(jwtFile)
if err != nil {
return nil, err
}
return metadata.Pairs("authorization", "bearer "+string(jwtString)), nil
}
// CheckArgs used for check ioctl cmd arg(s)'s num
func CheckArgs(validNum ...int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
for _, n := range validNum {
if len(args) == n {
return nil
}
}
nums := strings.Replace(strings.Trim(fmt.Sprint(validNum), "[]"), " ", " or ", -1)
return fmt.Errorf("accepts "+nums+" arg(s), received %d", len(args))
}
}
// TrimHexPrefix removes 0x prefix from a string if it has
func TrimHexPrefix(s string) string {
return strings.TrimPrefix(s, "0x")
}
// ParseHdwPath parse hdwallet path
func ParseHdwPath(addressOrAlias string) (uint32, uint32, uint32, error) {
// parse derive path
// for hdw::1/1/2, return 1, 1, 2
// for hdw::1/2, treat as default account = 0, return 0, 1, 2
args := strings.Split(addressOrAlias[5:], "/")
if len(args) < 2 || len(args) > 3 {
return 0, 0, 0, output.NewError(output.ValidationError, "derivation path error", nil)
}
arg := make([]uint32, 3)
j := 0
for i := 3 - len(args); i < 3; i++ {
u64, err := strconv.ParseUint(args[j], 10, 32)
if err != nil {
return 0, 0, 0, output.NewError(output.InputError, fmt.Sprintf("%v must be integer value", args[j]), err)
}
arg[i] = uint32(u64)
j++
}
return arg[0], arg[1], arg[2], nil
}
// AliasIsHdwalletKey check whether to use hdwallet key
func AliasIsHdwalletKey(addressOrAlias string) bool {
if strings.HasPrefix(strings.ToLower(addressOrAlias), "hdw::") {
return true
}
return false
}
| 1 | 23,330 | why add `return` here? | iotexproject-iotex-core | go |
@@ -82,7 +82,9 @@ namespace Nethermind.Network.Test.Discovery
Assert.AreEqual(message.DestinationAddress, deserializedMessage.DestinationAddress);
Assert.AreEqual(message.SourceAddress, deserializedMessage.SourceAddress);
Assert.AreEqual(message.Version, deserializedMessage.Version);
- Assert.IsNotNull(deserializedMessage.Mdc);
+
+ // check MDC correctness
+ Assert.IsNotNull(Bytes.FromHexString("0xf8c61953f3b94a91aefe611e61dd74fe26aa5c969d9f29b7e063e6169171a772"));
}
[Test] | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.Net;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Test.Builders;
using Nethermind.Crypto;
using Nethermind.Network.Config;
using Nethermind.Network.Discovery.Messages;
using Nethermind.Network.Discovery.Serializers;
using Nethermind.Network.Test.Builders;
using Nethermind.Stats;
using Nethermind.Stats.Model;
using NUnit.Framework;
namespace Nethermind.Network.Test.Discovery
{
[Parallelizable(ParallelScope.Self)]
[TestFixture]
public class DiscoveryMessageSerializerTests
{
private readonly PrivateKey _privateKey =
new PrivateKey("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee");
//private readonly PrivateKey _farPrivateKey = new PrivateKey("3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266");
private IPEndPoint _farAddress;
private IPEndPoint _nearAddress;
private IDiscoveryConfig _config;
private INetworkConfig _networkConfig;
private IMessageSerializationService _messageSerializationService;
private ITimestamper _timestamper;
[SetUp]
public void Initialize()
{
_config = new DiscoveryConfig();
_networkConfig = new NetworkConfig();
_networkConfig.ExternalIp = "99.10.10.66";
_networkConfig.LocalIp = "10.0.0.5";
_farAddress = new IPEndPoint(IPAddress.Parse("192.168.1.2"), 1);
_nearAddress = new IPEndPoint(IPAddress.Parse(_networkConfig.LocalIp), _networkConfig.DiscoveryPort);
_messageSerializationService = Build.A.SerializationService().WithDiscovery(_privateKey).TestObject;
_timestamper = Timestamper.Default;
}
[Test]
public void PingMessageTest()
{
var message = new PingMessage
{
FarAddress = _farAddress,
DestinationAddress = _nearAddress,
SourceAddress = _farAddress,
FarPublicKey = _privateKey.PublicKey,
ExpirationTime = 60 + (long) _timestamper.EpochMilliseconds
};
var data = _messageSerializationService.Serialize(message);
var deserializedMessage = _messageSerializationService.Deserialize<PingMessage>(data);
Assert.AreEqual(message.MessageType, deserializedMessage.MessageType);
Assert.AreEqual(message.FarPublicKey, deserializedMessage.FarPublicKey);
Assert.AreEqual(message.ExpirationTime, deserializedMessage.ExpirationTime);
Assert.AreEqual(message.FarAddress, deserializedMessage.SourceAddress);
Assert.AreEqual(message.DestinationAddress, deserializedMessage.DestinationAddress);
Assert.AreEqual(message.SourceAddress, deserializedMessage.SourceAddress);
Assert.AreEqual(message.Version, deserializedMessage.Version);
Assert.IsNotNull(deserializedMessage.Mdc);
}
[Test]
public void PongMessageTest()
{
var message = new PongMessage
{
FarAddress = _farAddress,
PingMdc = new byte[] {1, 2, 3},
FarPublicKey = _privateKey.PublicKey,
ExpirationTime = 60 + (long) _timestamper.EpochMilliseconds
};
var data = _messageSerializationService.Serialize(message);
var deserializedMessage = _messageSerializationService.Deserialize<PongMessage>(data);
Assert.AreEqual(message.MessageType, deserializedMessage.MessageType);
Assert.AreEqual(message.FarPublicKey, deserializedMessage.FarPublicKey);
Assert.AreEqual(message.ExpirationTime, deserializedMessage.ExpirationTime);
Assert.AreEqual(message.PingMdc, deserializedMessage.PingMdc);
}
[Test]
public void Ping_with_node_id_address()
{
string message =
"24917ba09abd910901145714c396ade5679735cf9f7796f7576439a13e6e5fc4466988ce6936ac208c4e513d1d0caa0e93160bd5ebdb10ec09df80c95e8d6c0c32d0f154d5bed121c028596f02cf974d50454e3b0ff2d0973deeb742e14e087a0004f9058df90585f8578e3138352e3136392e3233312e343982c35082c350b84036ae45a29ae5d99c0cdb78794fa439f180b13f595a2acd82bf7c0541c0238ea33f5fec5c16bfd7b851449ae0c1e8cbf1502342425fdb65face5eac705d6416a2f8568d3138352e37382e36362e31323282765f82765fb84034dedd0befcd6beb1acc3b7138246b08bd6056ec608b84983d5ce202e1af83c8cf8121063df26d7135536c1636aaa782b63e9f889f4c97172c3a4e5b09a4d721f8558c3138352e31372e34332e343382c35082c350b84035fb64bf23d73efa210bd9299e39d1b33bc189389a98c2d9998394df8d3b6f2e94cad1c36e8a00e3050d60394a8bd0febdfcd22b8127edc71ee7fd28bd2a8f8df8578e3136372e39392e3133382e32313482520b82520bb8403d9ca5956b38557aba991e31cf510d4df641dce9cc26bfeb7de082f0c07abb6ede3a58410c8f249dabeecee4ad3979929ac4c7c496ad20b8cfdd061b7401b4f5f8578e31332e3132352e3131302e323336820bd9820bd9b8402b40b3299cc4680a191c67564ab877face479b8d0c06e17946c68832dd2f17d814fda0258b941f0bd54358d2fc7b1bb5018197114ee0054e3dce576ce6567174f8568d36392e3136342e3231352e313482765f82765fb8402d11cfe93f8caf5aa9c4a90128ddc61350f585d5b0a14c137c18b12f21c4c5d0d28e440601ace627498e8d19903f0676b18ea210c80b528b14afb57edcbcee12f8578e3138352e3136392e3233302e363082c35082c350b840209dc79ec6937114afcefe9ca604a2b62a5313181cfa517298c386030cc421b23feb84b82ab024e983b902c410f936bacc55d88aee3d819b0e7bfcf7d285d28cf8548b31332e3232392e312e3339827597827597b84023c049cfc57345656e1fc9924a121859723a6cc3adea62e6ddd5c15f4b04b8ed044a29cd188d7c26d798da93aa828b911d65e37914935c34f92c9d6f671b3e7bf8588f3232302e3131372e3135342e313431820400820400b8401eecac5177f517a00373f5918f373fb3aa347c87dba678b58a09c0fe73bf578c2447e8f1d6e8f92c3248514d55157398e4909d36d42840f2c70f98120fd2da92f8558c3132322e31312e34372e393582c4a782c4a7b84011e4bc809f78687ac4cceff4ac574cda15010ef20d657d296fc0daf696dd8e80178c3aa64a02db51eecd7c6e05513d49dbbc0824df0fbb53fbbef07e81335926f8588f3138352e3135332e3139382e32303382c35082c350b84014ce698fb9ebd75a7ee6ab123b87f10e041e8bad7b290e5caddd7b75e3f477661923d7ad303a9a97042eb9b1657dc0848411d7b58287d8655881971ab25fd965f8588f3230372e3135342e3231382e313139825209825209b8400ba6b9f606a43a95edc6247cdb1c1e105145817be7bcafd6b2c0ba15d58145f0dc1a194f70ba73cd6f4cdd6864edc7687f311254c7555cc32e4d45aeb1b80416f8558c3133372e37342e3134342e3482765f82765fb8401083237e8c12e17153970639079096ad87bf0f534c84c131e7da339d70282e81919e1dbe02415453464849c72e9deb6c784997de2c4aa175282f84ffcd4b79f3f8568d35312e3134302e3132372e393582765f82765fb8400efa939a67ba0d177143c26cad8bc86a29cf7456af8132ddcfb956ab470173981fcf1d08fdbaa14ec4aa9e240880115406f533911f833545809704f5fff6b89ef8568d3230372e3134382e32372e3834827661827661b84003944d60046265f36aa333373e36604570029dc0dc9518d4226ba2037ae33cc2c5dd6940ee22c3ce85ad8a3c5791f81b73530dbe77aacd22d9e25593c4a354c8f8568d36342e33342e3233312e31343082765f82765fb8401feb66dd6b901ba73614a5bb7946426e1d9f0bf3df8368c3d80b47c6983b0f82d0fc360d422e79d67e81faaa0b37ec39c84f962179805dc85357fdb27e282c47845b867da0";
var deserializedMessage =
_messageSerializationService.Deserialize<NeighborsMessage>(Bytes.FromHexString(message));
Assert.IsNotNull(deserializedMessage);
}
[Test]
[Ignore("Is it some v5 message?")]
public void Can_deserialize_the_strange_message()
{
string message = "46261b14e3783640a24a652205a6fb7afdb94855c07bb9559777d98e54e51562442219fd8673b1a6aef0f4eaa3b1ed39695839775ed634e9b58d56bde116cd1c63e88d9e953bf05b24e9871de8ea630d98f812bdf176b712b7f9ba2c4db242170102f6c3808080cb845adc681b827668827668a070dfc96ee3da9864524f1f0214a35d46b56093f020ee588a05fafe1323335ce7845cc60fd7";
var deserializedMessage =
_messageSerializationService.Deserialize<PongMessage>(Bytes.FromHexString(message));
Assert.IsNotNull(deserializedMessage);
}
[Test]
public void FindNodeMessageTest()
{
var message = new FindNodeMessage
{
FarAddress = _farAddress,
SearchedNodeId = new byte[] {1, 2, 3},
FarPublicKey = _privateKey.PublicKey,
ExpirationTime = 60 + (long) _timestamper.EpochMilliseconds
};
var data = _messageSerializationService.Serialize(message);
var deserializedMessage = _messageSerializationService.Deserialize<FindNodeMessage>(data);
Assert.AreEqual(message.MessageType, deserializedMessage.MessageType);
Assert.AreEqual(message.FarPublicKey, deserializedMessage.FarPublicKey);
Assert.AreEqual(message.ExpirationTime, deserializedMessage.ExpirationTime);
Assert.AreEqual(message.SearchedNodeId, deserializedMessage.SearchedNodeId);
}
[Test]
public void NeighborsMessageTest()
{
var message = new NeighborsMessage
{
FarAddress = _farAddress,
Nodes = new[]
{
new Node("192.168.1.2", 1),
new Node("192.168.1.3", 2),
new Node("192.168.1.4", 3)
},
FarPublicKey = _privateKey.PublicKey,
ExpirationTime = 60 + (long) _timestamper.EpochMilliseconds
};
var data = _messageSerializationService.Serialize(message);
var deserializedMessage = _messageSerializationService.Deserialize<NeighborsMessage>(data);
Assert.AreEqual(message.MessageType, deserializedMessage.MessageType);
Assert.AreEqual(message.FarPublicKey, deserializedMessage.FarPublicKey);
Assert.AreEqual(message.ExpirationTime, deserializedMessage.ExpirationTime);
for (var i = 0; i < message.Nodes.Length; i++)
{
Assert.AreEqual(message.Nodes[i].Host, deserializedMessage.Nodes[i].Host);
Assert.AreEqual(message.Nodes[i].Port, deserializedMessage.Nodes[i].Port);
Assert.AreEqual(message.Nodes[i].IdHash, deserializedMessage.Nodes[i].IdHash);
Assert.AreEqual(message.Nodes[i], deserializedMessage.Nodes[i]);
}
}
}
} | 1 | 24,145 | put this hash in variable | NethermindEth-nethermind | .cs |
@@ -5,11 +5,8 @@
package libkbfs
import (
- "bytes"
- "errors"
"testing"
- "github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc" | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"errors"
"testing"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
type FakeBServerClient struct {
bserverMem *BlockServerMemory
readyChan chan<- struct{}
goChan <-chan struct{}
finishChan chan<- struct{}
}
func NewFakeBServerClient(
crypto cryptoPure, log logger.Logger,
readyChan chan<- struct{},
goChan <-chan struct{},
finishChan chan<- struct{}) *FakeBServerClient {
return &FakeBServerClient{
bserverMem: NewBlockServerMemory(log),
readyChan: readyChan,
goChan: goChan,
finishChan: finishChan,
}
}
func (fc *FakeBServerClient) maybeWaitOnChannel(ctx context.Context) error {
if fc.readyChan == nil {
return nil
}
// say we're ready, and wait for a signal to proceed or a
// cancellation.
select {
case fc.readyChan <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
}
select {
case <-fc.goChan:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (fc *FakeBServerClient) maybeFinishOnChannel(ctx context.Context) error {
if fc.finishChan != nil {
select {
case fc.finishChan <- struct{}{}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
func (fc *FakeBServerClient) GetSessionChallenge(context.Context) (keybase1.ChallengeInfo, error) {
return keybase1.ChallengeInfo{}, errors.New("GetSessionChallenge not implemented")
}
func (fc *FakeBServerClient) AuthenticateSession(context.Context, string) error {
return errors.New("AuthenticateSession not implemented")
}
func (fc *FakeBServerClient) PutBlock(ctx context.Context, arg keybase1.PutBlockArg) error {
err := fc.maybeWaitOnChannel(ctx)
if err != nil {
return err
}
defer func() {
finishErr := fc.maybeFinishOnChannel(ctx)
if err == nil {
err = finishErr
}
}()
id, err := kbfsblock.IDFromString(arg.Bid.BlockHash)
if err != nil {
return err
}
tlfID, err := tlf.ParseID(arg.Folder)
if err != nil {
return err
}
serverHalf, err := kbfscrypto.ParseBlockCryptKeyServerHalf(arg.BlockKey)
if err != nil {
return err
}
bCtx := kbfsblock.Context{
RefNonce: kbfsblock.ZeroRefNonce,
Creator: arg.Bid.ChargedTo,
}
return fc.bserverMem.Put(ctx, tlfID, id, bCtx, arg.Buf, serverHalf)
}
func (fc *FakeBServerClient) GetBlock(ctx context.Context, arg keybase1.GetBlockArg) (keybase1.GetBlockRes, error) {
err := fc.maybeWaitOnChannel(ctx)
if err != nil {
return keybase1.GetBlockRes{}, err
}
defer func() {
finishErr := fc.maybeFinishOnChannel(ctx)
if err == nil {
err = finishErr
}
}()
id, err := kbfsblock.IDFromString(arg.Bid.BlockHash)
if err != nil {
return keybase1.GetBlockRes{}, err
}
tlfID, err := tlf.ParseID(arg.Folder)
if err != nil {
return keybase1.GetBlockRes{}, err
}
// Always use this block context (the one the block was
// originally put with) since the RPC API doesn't pass along
// all the info from the block context passed into
// BlockServer.Get().
bCtx := kbfsblock.Context{
RefNonce: kbfsblock.ZeroRefNonce,
Creator: arg.Bid.ChargedTo,
}
data, serverHalf, err := fc.bserverMem.Get(ctx, tlfID, id, bCtx)
if err != nil {
return keybase1.GetBlockRes{}, err
}
return keybase1.GetBlockRes{
BlockKey: serverHalf.String(),
Buf: data,
}, nil
}
func (fc *FakeBServerClient) AddReference(ctx context.Context, arg keybase1.AddReferenceArg) error {
id, err := kbfsblock.IDFromString(arg.Ref.Bid.BlockHash)
if err != nil {
return err
}
tlfID, err := tlf.ParseID(arg.Folder)
if err != nil {
return err
}
bCtx := kbfsblock.Context{
RefNonce: kbfsblock.RefNonce(arg.Ref.Nonce),
Creator: arg.Ref.ChargedTo,
}
return fc.bserverMem.AddBlockReference(ctx, tlfID, id, bCtx)
}
func (fc *FakeBServerClient) DelReference(context.Context, keybase1.DelReferenceArg) error {
return errors.New("DelReference not implemented")
}
func (fc *FakeBServerClient) DelReferenceWithCount(context.Context, keybase1.DelReferenceWithCountArg) (
res keybase1.DowngradeReferenceRes, err error) {
return res, errors.New("DelReferenceWithCount not implemented")
}
func (fc *FakeBServerClient) ArchiveReference(context.Context, keybase1.ArchiveReferenceArg) ([]keybase1.BlockReference, error) {
return nil, errors.New("ArchiveReference not implemented")
}
func (fc *FakeBServerClient) ArchiveReferenceWithCount(context.Context, keybase1.ArchiveReferenceWithCountArg) (
res keybase1.DowngradeReferenceRes, err error) {
return res, errors.New("ArchiveReference not implemented")
}
func (fc *FakeBServerClient) GetUserQuotaInfo(context.Context) ([]byte, error) {
return nil, errors.New("GetUserQuotaInfo not implemented")
}
func (fc *FakeBServerClient) numBlocks() int {
return fc.bserverMem.numBlocks()
}
// Test that putting a block, and getting it back, works
func TestBServerRemotePutAndGet(t *testing.T) {
codec := kbfscodec.NewMsgpack()
localUsers := MakeLocalUsers([]libkb.NormalizedUsername{"user1", "user2"})
currentUID := localUsers[0].UID
log := logger.NewTestLogger(t)
crypto := MakeCryptoCommon(codec)
fc := NewFakeBServerClient(crypto, log, nil, nil, nil)
b := newBlockServerRemoteWithClient(codec, nil, log, fc)
tlfID := tlf.FakeID(2, false)
bCtx := kbfsblock.MakeFirstContext(currentUID)
data := []byte{1, 2, 3, 4}
bID, err := kbfsblock.MakePermanentID(data)
if err != nil {
t.Fatal(err)
}
serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
if err != nil {
t.Errorf("Couldn't make block server key half: %v", err)
}
ctx := context.Background()
err = b.Put(ctx, tlfID, bID, bCtx, data, serverHalf)
if err != nil {
t.Fatalf("Put got error: %v", err)
}
// make sure it actually got to the db
nb := fc.numBlocks()
if nb != 1 {
t.Errorf("There are %d blocks in the db, not 1 as expected", nb)
}
// Now get the same block back
buf, key, err := b.Get(ctx, tlfID, bID, bCtx)
if err != nil {
t.Fatalf("Get returned an error: %v", err)
}
if !bytes.Equal(buf, data) {
t.Errorf("Got bad data -- got %v, expected %v", buf, data)
}
if key != serverHalf {
t.Errorf("Got bad key -- got %v, expected %v", key, serverHalf)
}
// Add a reference.
nonce, err := kbfsblock.MakeRefNonce()
if err != nil {
t.Fatal(err)
}
bCtx2 := kbfsblock.MakeContext(currentUID, localUsers[1].UID, nonce)
err = b.AddBlockReference(ctx, tlfID, bID, bCtx2)
if err != nil {
t.Fatal(err)
}
// Now get the same block back
buf, key, err = b.Get(ctx, tlfID, bID, bCtx2)
if err != nil {
t.Fatalf("Get returned an error: %v", err)
}
if !bytes.Equal(buf, data) {
t.Errorf("Got bad data -- got %v, expected %v", buf, data)
}
if key != serverHalf {
t.Errorf("Got bad key -- got %v, expected %v", key, serverHalf)
}
}
// If we cancel the RPC before the RPC returns, the call should error quickly.
func TestBServerRemotePutCanceled(t *testing.T) {
codec := kbfscodec.NewMsgpack()
localUsers := MakeLocalUsers([]libkb.NormalizedUsername{"testuser"})
currentUID := localUsers[0].UID
serverConn, conn := rpc.MakeConnectionForTest(t)
log := logger.NewTestLogger(t)
b := newBlockServerRemoteWithClient(codec, nil, log,
keybase1.BlockClient{Cli: conn.GetClient()})
f := func(ctx context.Context) error {
bID := kbfsblock.FakeID(1)
tlfID := tlf.FakeID(2, false)
bCtx := kbfsblock.MakeFirstContext(currentUID)
data := []byte{1, 2, 3, 4}
serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
if err != nil {
t.Errorf("Couldn't make block server key half: %v", err)
}
err = b.Put(ctx, tlfID, bID, bCtx, data, serverHalf)
return err
}
testRPCWithCanceledContext(t, serverConn, f)
}
| 1 | 15,290 | Why not use `kbfsblock.ZeroRefNonce`? | keybase-kbfs | go |
@@ -26,7 +26,7 @@ class EarlyStopException(Exception):
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
- "LightGBMCallbackEnv",
+ "CallbackEnv",
["model",
"params",
"iteration", | 1 | # coding: utf-8
"""Callbacks library."""
import collections
from operator import gt, lt
from .basic import _ConfigAliases, _log_info, _log_warning
class EarlyStopException(Exception):
"""Exception of early stopping."""
def __init__(self, best_iteration, best_score):
"""Create early stopping exception.
Parameters
----------
best_iteration : int
The best iteration stopped.
best_score : float
The score of the best iteration.
"""
super().__init__()
self.best_iteration = best_iteration
self.best_score = best_score
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
"LightGBMCallbackEnv",
["model",
"params",
"iteration",
"begin_iteration",
"end_iteration",
"evaluation_result_list"])
def _format_eval_result(value, show_stdv=True):
"""Format metric string."""
if len(value) == 4:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
elif len(value) == 5:
if show_stdv:
return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4])
else:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
else:
raise ValueError("Wrong metric value")
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that prints the evaluation results.
Parameters
----------
period : int, optional (default=1)
The period to print the evaluation results.
show_stdv : bool, optional (default=True)
Whether to show stdv (if provided).
Returns
-------
callback : function
The callback that prints the evaluation results every ``period`` iteration(s).
"""
def _callback(env):
if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0:
result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list])
_log_info('[%d]\t%s' % (env.iteration + 1, result))
_callback.order = 10
return _callback
def record_evaluation(eval_result):
"""Create a callback that records the evaluation history into ``eval_result``.
Parameters
----------
eval_result : dict
A dictionary to store the evaluation results.
Returns
-------
callback : function
The callback that records the evaluation history into the passed dictionary.
"""
if not isinstance(eval_result, dict):
raise TypeError('eval_result should be a dictionary')
eval_result.clear()
def _init(env):
for data_name, eval_name, _, _ in env.evaluation_result_list:
eval_result.setdefault(data_name, collections.OrderedDict())
eval_result[data_name].setdefault(eval_name, [])
def _callback(env):
if not eval_result:
_init(env)
for data_name, eval_name, result, _ in env.evaluation_result_list:
eval_result[data_name][eval_name].append(result)
_callback.order = 20
return _callback
def reset_parameter(**kwargs):
"""Create a callback that resets the parameter after the first iteration.
.. note::
The initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs : value should be list or function
List of parameters for each boosting round
or a customized function that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If function func, parameter = func(current_round).
Returns
-------
callback : function
The callback that resets the parameter after the first iteration.
"""
def _callback(env):
new_parameters = {}
for key, value in kwargs.items():
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'."
.format(repr(key)))
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters)
_callback.before_iteration = True
_callback.order = 10
return _callback
def early_stopping(stopping_rounds, first_metric_only=False, verbose=True):
"""Create a callback that activates early stopping.
Activates early stopping.
The model will train until the validation score stops improving.
Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric set ``first_metric_only`` to True.
Parameters
----------
stopping_rounds : int
The possible number of rounds without the trend occurrence.
first_metric_only : bool, optional (default=False)
Whether to use only the first metric for early stopping.
verbose : bool, optional (default=True)
Whether to print message with early stopping information.
Returns
-------
callback : function
The callback that activates early stopping.
"""
best_score = []
best_iter = []
best_score_list = []
cmp_op = []
enabled = [True]
first_metric = ['']
def _init(env):
enabled[0] = not any(env.params.get(boost_alias, "") == 'dart' for boost_alias
in _ConfigAliases.get("boosting"))
if not enabled[0]:
_log_warning('Early stopping is not available in dart mode')
return
if not env.evaluation_result_list:
raise ValueError('For early stopping, '
'at least one dataset and eval metric is required for evaluation')
if verbose:
_log_info("Training until validation scores don't improve for {} rounds".format(stopping_rounds))
# split is needed for "<dataset type> <metric>" case (e.g. "train l1")
first_metric[0] = env.evaluation_result_list[0][1].split(" ")[-1]
for eval_ret in env.evaluation_result_list:
best_iter.append(0)
best_score_list.append(None)
if eval_ret[3]:
best_score.append(float('-inf'))
cmp_op.append(gt)
else:
best_score.append(float('inf'))
cmp_op.append(lt)
def _final_iteration_check(env, eval_name_splitted, i):
if env.iteration == env.end_iteration - 1:
if verbose:
_log_info('Did not meet early stopping. Best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
if first_metric_only:
_log_info("Evaluated only: {}".format(eval_name_splitted[-1]))
raise EarlyStopException(best_iter[i], best_score_list[i])
def _callback(env):
if not cmp_op:
_init(env)
if not enabled[0]:
return
for i in range(len(env.evaluation_result_list)):
score = env.evaluation_result_list[i][2]
if best_score_list[i] is None or cmp_op[i](score, best_score[i]):
best_score[i] = score
best_iter[i] = env.iteration
best_score_list[i] = env.evaluation_result_list
# split is needed for "<dataset type> <metric>" case (e.g. "train l1")
eval_name_splitted = env.evaluation_result_list[i][1].split(" ")
if first_metric_only and first_metric[0] != eval_name_splitted[-1]:
continue # use only the first metric for early stopping
if ((env.evaluation_result_list[i][0] == "cv_agg" and eval_name_splitted[0] == "train"
or env.evaluation_result_list[i][0] == env.model._train_data_name)):
_final_iteration_check(env, eval_name_splitted, i)
continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train)
elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose:
_log_info('Early stopping, best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
if first_metric_only:
_log_info("Evaluated only: {}".format(eval_name_splitted[-1]))
raise EarlyStopException(best_iter[i], best_score_list[i])
_final_iteration_check(env, eval_name_splitted, i)
_callback.order = 30
return _callback
| 1 | 28,470 | what mypy warning does this address? I'm confused by this change. | microsoft-LightGBM | cpp |
@@ -3751,6 +3751,18 @@ void Player::addAttacked(const Player* attacked)
attackedSet.insert(attacked->guid);
}
+void Player::removeAttacked(const Player* attacked)
+{
+ if (!attacked || attacked == this) {
+ return;
+ }
+
+ auto it = attackedSet.find(attacked->guid);
+ if (it != attackedSet.end()) {
+ attackedSet.erase(it);
+ }
+}
+
void Player::clearAttacked()
{
attackedSet.clear(); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <bitset>
#include "bed.h"
#include "chat.h"
#include "combat.h"
#include "configmanager.h"
#include "creatureevent.h"
#include "events.h"
#include "game.h"
#include "iologindata.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "weapons.h"
extern ConfigManager g_config;
extern Game g_game;
extern Chat* g_chat;
extern Vocations g_vocations;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
MuteCountMap Player::muteCountMap;
uint32_t Player::playerAutoID = 0x10000000;
Player::Player(ProtocolGame_ptr p) :
Creature(), lastPing(OTSYS_TIME()), lastPong(lastPing), inbox(new Inbox(ITEM_INBOX)), client(std::move(p))
{
inbox->incrementReferenceCounter();
}
Player::~Player()
{
for (Item* item : inventory) {
if (item) {
item->setParent(nullptr);
item->decrementReferenceCounter();
}
}
for (const auto& it : depotLockerMap) {
it.second->removeInbox(inbox);
it.second->decrementReferenceCounter();
}
inbox->decrementReferenceCounter();
setWriteItem(nullptr);
setEditHouse(nullptr);
}
bool Player::setVocation(uint16_t vocId)
{
Vocation* voc = g_vocations.getVocation(vocId);
if (!voc) {
return false;
}
vocation = voc;
Condition* condition = getCondition(CONDITION_REGENERATION, CONDITIONID_DEFAULT);
if (condition) {
condition->setParam(CONDITION_PARAM_HEALTHGAIN, vocation->getHealthGainAmount());
condition->setParam(CONDITION_PARAM_HEALTHTICKS, vocation->getHealthGainTicks() * 1000);
condition->setParam(CONDITION_PARAM_MANAGAIN, vocation->getManaGainAmount());
condition->setParam(CONDITION_PARAM_MANATICKS, vocation->getManaGainTicks() * 1000);
}
return true;
}
bool Player::isPushable() const
{
if (hasFlag(PlayerFlag_CannotBePushed)) {
return false;
}
return Creature::isPushable();
}
std::string Player::getDescription(int32_t lookDistance) const
{
std::ostringstream s;
if (lookDistance == -1) {
s << "yourself.";
if (group->access) {
s << " You are " << group->name << '.';
} else if (vocation->getId() != VOCATION_NONE) {
s << " You are " << vocation->getVocDescription() << '.';
} else {
s << " You have no vocation.";
}
} else {
s << name;
if (!group->access) {
s << " (Level " << level << ')';
}
s << '.';
if (sex == PLAYERSEX_FEMALE) {
s << " She";
} else {
s << " He";
}
if (group->access) {
s << " is " << group->name << '.';
} else if (vocation->getId() != VOCATION_NONE) {
s << " is " << vocation->getVocDescription() << '.';
} else {
s << " has no vocation.";
}
}
if (party) {
if (lookDistance == -1) {
s << " Your party has ";
} else if (sex == PLAYERSEX_FEMALE) {
s << " She is in a party with ";
} else {
s << " He is in a party with ";
}
size_t memberCount = party->getMemberCount() + 1;
if (memberCount == 1) {
s << "1 member and ";
} else {
s << memberCount << " members and ";
}
size_t invitationCount = party->getInvitationCount();
if (invitationCount == 1) {
s << "1 pending invitation.";
} else {
s << invitationCount << " pending invitations.";
}
}
if (guild && guildRank) {
if (lookDistance == -1) {
s << " You are ";
} else if (sex == PLAYERSEX_FEMALE) {
s << " She is ";
} else {
s << " He is ";
}
s << guildRank->name << " of the " << guild->getName();
if (!guildNick.empty()) {
s << " (" << guildNick << ')';
}
size_t memberCount = guild->getMemberCount();
if (memberCount == 1) {
s << ", which has 1 member, " << guild->getMembersOnline().size() << " of them online.";
} else {
s << ", which has " << memberCount << " members, " << guild->getMembersOnline().size() << " of them online.";
}
}
return s.str();
}
Item* Player::getInventoryItem(slots_t slot) const
{
if (slot < CONST_SLOT_FIRST || slot > CONST_SLOT_LAST) {
return nullptr;
}
return inventory[slot];
}
void Player::addConditionSuppressions(uint32_t conditions)
{
conditionSuppressions |= conditions;
}
void Player::removeConditionSuppressions(uint32_t conditions)
{
conditionSuppressions &= ~conditions;
}
Item* Player::getWeapon(slots_t slot, bool ignoreAmmo) const
{
Item* item = inventory[slot];
if (!item) {
return nullptr;
}
WeaponType_t weaponType = item->getWeaponType();
if (weaponType == WEAPON_NONE || weaponType == WEAPON_SHIELD || weaponType == WEAPON_AMMO) {
return nullptr;
}
if (!ignoreAmmo && weaponType == WEAPON_DISTANCE) {
const ItemType& it = Item::items[item->getID()];
if (it.ammoType != AMMO_NONE) {
Item* ammoItem = inventory[CONST_SLOT_AMMO];
if (!ammoItem || ammoItem->getAmmoType() != it.ammoType) {
return nullptr;
}
item = ammoItem;
}
}
return item;
}
Item* Player::getWeapon(bool ignoreAmmo/* = false*/) const
{
Item* item = getWeapon(CONST_SLOT_LEFT, ignoreAmmo);
if (item) {
return item;
}
item = getWeapon(CONST_SLOT_RIGHT, ignoreAmmo);
if (item) {
return item;
}
return nullptr;
}
WeaponType_t Player::getWeaponType() const
{
Item* item = getWeapon();
if (!item) {
return WEAPON_NONE;
}
return item->getWeaponType();
}
int32_t Player::getWeaponSkill(const Item* item) const
{
if (!item) {
return getSkillLevel(SKILL_FIST);
}
int32_t attackSkill;
WeaponType_t weaponType = item->getWeaponType();
switch (weaponType) {
case WEAPON_SWORD: {
attackSkill = getSkillLevel(SKILL_SWORD);
break;
}
case WEAPON_CLUB: {
attackSkill = getSkillLevel(SKILL_CLUB);
break;
}
case WEAPON_AXE: {
attackSkill = getSkillLevel(SKILL_AXE);
break;
}
case WEAPON_DISTANCE: {
attackSkill = getSkillLevel(SKILL_DISTANCE);
break;
}
default: {
attackSkill = 0;
break;
}
}
return attackSkill;
}
int32_t Player::getArmor() const
{
int32_t armor = 0;
static const slots_t armorSlots[] = {CONST_SLOT_HEAD, CONST_SLOT_NECKLACE, CONST_SLOT_ARMOR, CONST_SLOT_LEGS, CONST_SLOT_FEET, CONST_SLOT_RING};
for (slots_t slot : armorSlots) {
Item* inventoryItem = inventory[slot];
if (inventoryItem) {
armor += inventoryItem->getArmor();
}
}
return static_cast<int32_t>(armor * vocation->armorMultiplier);
}
void Player::getShieldAndWeapon(const Item*& shield, const Item*& weapon) const
{
shield = nullptr;
weapon = nullptr;
for (uint32_t slot = CONST_SLOT_RIGHT; slot <= CONST_SLOT_LEFT; slot++) {
Item* item = inventory[slot];
if (!item) {
continue;
}
switch (item->getWeaponType()) {
case WEAPON_NONE:
break;
case WEAPON_SHIELD: {
if (!shield || item->getDefense() > shield->getDefense()) {
shield = item;
}
break;
}
default: { // weapons that are not shields
weapon = item;
break;
}
}
}
}
int32_t Player::getDefense() const
{
int32_t baseDefense = 5;
int32_t defenseValue = 0;
int32_t defenseSkill = 0;
int32_t extraDefense = 0;
float defenseFactor = getDefenseFactor();
const Item* weapon;
const Item* shield;
getShieldAndWeapon(shield, weapon);
if (weapon) {
defenseValue = baseDefense + weapon->getDefense();
extraDefense = weapon->getExtraDefense();
defenseSkill = getWeaponSkill(weapon);
}
if (shield && shield->getDefense() >= defenseValue) {
defenseValue = baseDefense + shield->getDefense() + extraDefense;
defenseSkill = getSkillLevel(SKILL_SHIELD);
}
if (defenseSkill == 0) {
return 0;
}
defenseValue = static_cast<int32_t>(defenseValue * vocation->defenseMultiplier);
return static_cast<int32_t>(std::ceil((static_cast<float>(defenseSkill * (defenseValue * 0.015)) + (defenseValue * 0.1)) * defenseFactor));
}
float Player::getAttackFactor() const
{
switch (fightMode) {
case FIGHTMODE_ATTACK: return 1.0f;
case FIGHTMODE_BALANCED: return 1.2f;
case FIGHTMODE_DEFENSE: return 2.0f;
default: return 1.0f;
}
}
float Player::getDefenseFactor() const
{
switch (fightMode) {
case FIGHTMODE_ATTACK: return 1.0f;
case FIGHTMODE_BALANCED: return 1.2f;
case FIGHTMODE_DEFENSE: {
if ((OTSYS_TIME() - lastAttack) < getAttackSpeed()) {
return 1.0f;
}
return 2.0f;
}
default: return 1.0f;
}
}
uint16_t Player::getClientIcons() const
{
uint16_t icons = 0;
for (Condition* condition : conditions) {
if (!isSuppress(condition->getType())) {
icons |= condition->getIcons();
}
}
if (pzLocked) {
icons |= ICON_REDSWORDS;
}
if (tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
icons |= ICON_PIGEON;
// Don't show ICON_SWORDS if player is in protection zone.
if (hasBitSet(ICON_SWORDS, icons)) {
icons &= ~ICON_SWORDS;
}
}
// Game client debugs with 10 or more icons
// so let's prevent that from happening.
std::bitset<20> icon_bitset(static_cast<uint64_t>(icons));
for (size_t pos = 0, bits_set = icon_bitset.count(); bits_set >= 10; ++pos) {
if (icon_bitset[pos]) {
icon_bitset.reset(pos);
--bits_set;
}
}
return icon_bitset.to_ulong();
}
void Player::updateInventoryWeight()
{
if (hasFlag(PlayerFlag_HasInfiniteCapacity)) {
return;
}
inventoryWeight = 0;
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
const Item* item = inventory[i];
if (item) {
inventoryWeight += item->getWeight();
}
}
}
void Player::addSkillAdvance(skills_t skill, uint64_t count)
{
uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level);
uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
//player has reached max skill
return;
}
g_events->eventPlayerOnGainSkillTries(this, skill, count);
if (count == 0) {
return;
}
bool sendUpdateSkills = false;
while ((skills[skill].tries + count) >= nextReqTries) {
count -= nextReqTries - skills[skill].tries;
skills[skill].level++;
skills[skill].tries = 0;
skills[skill].percent = 0;
std::ostringstream ss;
ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level);
sendUpdateSkills = true;
currReqTries = nextReqTries;
nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
count = 0;
break;
}
}
skills[skill].tries += count;
uint32_t newPercent;
if (nextReqTries > currReqTries) {
newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries);
} else {
newPercent = 0;
}
if (skills[skill].percent != newPercent) {
skills[skill].percent = newPercent;
sendUpdateSkills = true;
}
if (sendUpdateSkills) {
sendSkills();
}
}
void Player::setVarStats(stats_t stat, int32_t modifier)
{
varStats[stat] += modifier;
switch (stat) {
case STAT_MAXHITPOINTS: {
if (getHealth() > getMaxHealth()) {
Creature::changeHealth(getMaxHealth() - getHealth());
} else {
g_game.addCreatureHealth(this);
}
break;
}
case STAT_MAXMANAPOINTS: {
if (getMana() > getMaxMana()) {
Creature::changeMana(getMaxMana() - getMana());
}
break;
}
default: {
break;
}
}
}
int32_t Player::getDefaultStats(stats_t stat) const
{
switch (stat) {
case STAT_MAXHITPOINTS: return healthMax;
case STAT_MAXMANAPOINTS: return manaMax;
case STAT_MAGICPOINTS: return getBaseMagicLevel();
default: return 0;
}
}
void Player::addContainer(uint8_t cid, Container* container)
{
if (cid > 0xF) {
return;
}
if (container->getID() == ITEM_BROWSEFIELD) {
container->incrementReferenceCounter();
}
auto it = openContainers.find(cid);
if (it != openContainers.end()) {
OpenContainer& openContainer = it->second;
Container* oldContainer = openContainer.container;
if (oldContainer->getID() == ITEM_BROWSEFIELD) {
oldContainer->decrementReferenceCounter();
}
openContainer.container = container;
openContainer.index = 0;
} else {
OpenContainer openContainer;
openContainer.container = container;
openContainer.index = 0;
openContainers[cid] = openContainer;
}
}
void Player::closeContainer(uint8_t cid)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return;
}
OpenContainer openContainer = it->second;
Container* container = openContainer.container;
openContainers.erase(it);
if (container && container->getID() == ITEM_BROWSEFIELD) {
container->decrementReferenceCounter();
}
}
void Player::setContainerIndex(uint8_t cid, uint16_t index)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return;
}
it->second.index = index;
}
Container* Player::getContainerByID(uint8_t cid)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return nullptr;
}
return it->second.container;
}
int8_t Player::getContainerID(const Container* container) const
{
for (const auto& it : openContainers) {
if (it.second.container == container) {
return it.first;
}
}
return -1;
}
uint16_t Player::getContainerIndex(uint8_t cid) const
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return 0;
}
return it->second.index;
}
bool Player::canOpenCorpse(uint32_t ownerId) const
{
return getID() == ownerId || (party && party->canOpenCorpse(ownerId));
}
uint16_t Player::getLookCorpse() const
{
if (sex == PLAYERSEX_FEMALE) {
return ITEM_FEMALE_CORPSE;
} else {
return ITEM_MALE_CORPSE;
}
}
void Player::addStorageValue(const uint32_t key, const int32_t value, const bool isLogin/* = false*/)
{
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
if (IS_IN_KEYRANGE(key, OUTFITS_RANGE)) {
outfits.emplace_back(
value >> 16,
value & 0xFF
);
return;
} else if (IS_IN_KEYRANGE(key, MOUNTS_RANGE)) {
// do nothing
} else {
std::cout << "Warning: unknown reserved key: " << key << " player: " << getName() << std::endl;
return;
}
}
if (value != -1) {
int32_t oldValue;
getStorageValue(key, oldValue);
storageMap[key] = value;
if (!isLogin) {
auto currentFrameTime = g_dispatcher.getDispatcherCycle();
if (lastQuestlogUpdate != currentFrameTime && g_game.quests.isQuestStorage(key, value, oldValue)) {
lastQuestlogUpdate = currentFrameTime;
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Your questlog has been updated.");
}
}
} else {
storageMap.erase(key);
}
}
bool Player::getStorageValue(const uint32_t key, int32_t& value) const
{
auto it = storageMap.find(key);
if (it == storageMap.end()) {
value = -1;
return false;
}
value = it->second;
return true;
}
bool Player::canSee(const Position& pos) const
{
if (!client) {
return false;
}
return client->canSee(pos);
}
bool Player::canSeeCreature(const Creature* creature) const
{
if (creature == this) {
return true;
}
if (creature->isInGhostMode() && !group->access) {
return false;
}
if (!creature->getPlayer() && !canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
bool Player::canWalkthrough(const Creature* creature) const
{
if (group->access || creature->isInGhostMode()) {
return true;
}
const Player* player = creature->getPlayer();
if (!player) {
return false;
}
const Tile* playerTile = player->getTile();
if (!playerTile || !playerTile->hasFlag(TILESTATE_PROTECTIONZONE)) {
return false;
}
const Item* playerTileGround = playerTile->getGround();
if (!playerTileGround || !playerTileGround->hasWalkStack()) {
return false;
}
Player* thisPlayer = const_cast<Player*>(this);
if ((OTSYS_TIME() - lastWalkthroughAttempt) > 2000) {
thisPlayer->setLastWalkthroughAttempt(OTSYS_TIME());
return false;
}
if (creature->getPosition() != lastWalkthroughPosition) {
thisPlayer->setLastWalkthroughPosition(creature->getPosition());
return false;
}
thisPlayer->setLastWalkthroughPosition(creature->getPosition());
return true;
}
bool Player::canWalkthroughEx(const Creature* creature) const
{
if (group->access) {
return true;
}
const Player* player = creature->getPlayer();
if (!player) {
return false;
}
const Tile* playerTile = player->getTile();
return playerTile && playerTile->hasFlag(TILESTATE_PROTECTIONZONE);
}
void Player::onReceiveMail() const
{
if (isNearDepotBox()) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, "New mail has arrived.");
}
}
bool Player::isNearDepotBox() const
{
const Position& pos = getPosition();
for (int32_t cx = -1; cx <= 1; ++cx) {
for (int32_t cy = -1; cy <= 1; ++cy) {
Tile* tile = g_game.map.getTile(pos.x + cx, pos.y + cy, pos.z);
if (!tile) {
continue;
}
if (tile->hasFlag(TILESTATE_DEPOT)) {
return true;
}
}
}
return false;
}
DepotChest* Player::getDepotChest(uint32_t depotId, bool autoCreate)
{
auto it = depotChests.find(depotId);
if (it != depotChests.end()) {
return it->second;
}
if (!autoCreate) {
return nullptr;
}
DepotChest* depotChest = new DepotChest(ITEM_DEPOT);
depotChest->incrementReferenceCounter();
depotChest->setMaxDepotItems(getMaxDepotItems());
depotChests[depotId] = depotChest;
return depotChest;
}
DepotLocker* Player::getDepotLocker(uint32_t depotId)
{
auto it = depotLockerMap.find(depotId);
if (it != depotLockerMap.end()) {
inbox->setParent(it->second);
return it->second;
}
DepotLocker* depotLocker = new DepotLocker(ITEM_LOCKER1);
depotLocker->setDepotId(depotId);
depotLocker->internalAddThing(Item::CreateItem(ITEM_MARKET));
depotLocker->internalAddThing(inbox);
depotLocker->internalAddThing(getDepotChest(depotId, true));
depotLockerMap[depotId] = depotLocker;
return depotLocker;
}
void Player::sendCancelMessage(ReturnValue message) const
{
sendCancelMessage(getReturnMessage(message));
}
void Player::sendStats()
{
if (client) {
client->sendStats();
lastStatsTrainingTime = getOfflineTrainingTime() / 60 / 1000;
}
}
void Player::sendPing()
{
int64_t timeNow = OTSYS_TIME();
bool hasLostConnection = false;
if ((timeNow - lastPing) >= 5000) {
lastPing = timeNow;
if (client) {
client->sendPing();
} else {
hasLostConnection = true;
}
}
int64_t noPongTime = timeNow - lastPong;
if ((hasLostConnection || noPongTime >= 7000) && attackedCreature && attackedCreature->getPlayer()) {
setAttackedCreature(nullptr);
}
if (noPongTime >= 60000 && canLogout()) {
if (g_creatureEvents->playerLogout(this)) {
if (client) {
client->logout(true, true);
} else {
g_game.removeCreature(this, true);
}
}
}
}
Item* Player::getWriteItem(uint32_t& windowTextId, uint16_t& maxWriteLen)
{
windowTextId = this->windowTextId;
maxWriteLen = this->maxWriteLen;
return writeItem;
}
void Player::setWriteItem(Item* item, uint16_t maxWriteLen /*= 0*/)
{
windowTextId++;
if (writeItem) {
writeItem->decrementReferenceCounter();
}
if (item) {
writeItem = item;
this->maxWriteLen = maxWriteLen;
writeItem->incrementReferenceCounter();
} else {
writeItem = nullptr;
this->maxWriteLen = 0;
}
}
House* Player::getEditHouse(uint32_t& windowTextId, uint32_t& listId)
{
windowTextId = this->windowTextId;
listId = this->editListId;
return editHouse;
}
void Player::setEditHouse(House* house, uint32_t listId /*= 0*/)
{
windowTextId++;
editHouse = house;
editListId = listId;
}
void Player::sendHouseWindow(House* house, uint32_t listId) const
{
if (!client) {
return;
}
std::string text;
if (house->getAccessList(listId, text)) {
client->sendHouseWindow(windowTextId, text);
}
}
//container
void Player::sendAddContainerItem(const Container* container, const Item* item)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
uint16_t slot = openContainer.index;
if (container->getID() == ITEM_BROWSEFIELD) {
uint16_t containerSize = container->size() - 1;
uint16_t pageEnd = openContainer.index + container->capacity() - 1;
if (containerSize > pageEnd) {
slot = pageEnd;
item = container->getItemByIndex(pageEnd);
} else {
slot = containerSize;
}
} else if (openContainer.index >= container->capacity()) {
item = container->getItemByIndex(openContainer.index - 1);
}
client->sendAddContainerItem(it.first, slot, item);
}
}
void Player::sendUpdateContainerItem(const Container* container, uint16_t slot, const Item* newItem)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
if (slot < openContainer.index) {
continue;
}
uint16_t pageEnd = openContainer.index + container->capacity();
if (slot >= pageEnd) {
continue;
}
client->sendUpdateContainerItem(it.first, slot, newItem);
}
}
void Player::sendRemoveContainerItem(const Container* container, uint16_t slot)
{
if (!client) {
return;
}
for (auto& it : openContainers) {
OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
uint16_t& firstIndex = openContainer.index;
if (firstIndex > 0 && firstIndex >= container->size() - 1) {
firstIndex -= container->capacity();
sendContainer(it.first, container, false, firstIndex);
}
client->sendRemoveContainerItem(it.first, std::max<uint16_t>(slot, firstIndex), container->getItemByIndex(container->capacity() + firstIndex));
}
}
void Player::onUpdateTileItem(const Tile* tile, const Position& pos, const Item* oldItem,
const ItemType& oldType, const Item* newItem, const ItemType& newType)
{
Creature::onUpdateTileItem(tile, pos, oldItem, oldType, newItem, newType);
if (oldItem != newItem) {
onRemoveTileItem(tile, pos, oldType, oldItem);
}
if (tradeState != TRADE_TRANSFER) {
if (tradeItem && oldItem == tradeItem) {
g_game.internalCloseTrade(this);
}
}
}
void Player::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType,
const Item* item)
{
Creature::onRemoveTileItem(tile, pos, iType, item);
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
const Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::onCreatureAppear(Creature* creature, bool isLogin)
{
Creature::onCreatureAppear(creature, isLogin);
if (isLogin && creature == this) {
for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) {
Item* item = inventory[slot];
if (item) {
item->startDecaying();
g_moveEvents->onPlayerEquip(this, item, static_cast<slots_t>(slot), false);
}
}
for (Condition* condition : storedConditionList) {
addCondition(condition);
}
storedConditionList.clear();
BedItem* bed = g_game.getBedBySleeper(guid);
if (bed) {
bed->wakeUp(this);
}
std::cout << name << " has logged in." << std::endl;
if (guild) {
guild->addMember(this);
}
int32_t offlineTime;
if (getLastLogout() != 0) {
// Not counting more than 21 days to prevent overflow when multiplying with 1000 (for milliseconds).
offlineTime = std::min<int32_t>(time(nullptr) - getLastLogout(), 86400 * 21);
} else {
offlineTime = 0;
}
for (Condition* condition : getMuteConditions()) {
condition->setTicks(condition->getTicks() - (offlineTime * 1000));
if (condition->getTicks() <= 0) {
removeCondition(condition);
}
}
g_game.checkPlayersRecord();
IOLoginData::updateOnlineStatus(guid, true);
}
}
void Player::onAttackedCreatureDisappear(bool isLogout)
{
sendCancelTarget();
if (!isLogout) {
sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost.");
}
}
void Player::onFollowCreatureDisappear(bool isLogout)
{
sendCancelTarget();
if (!isLogout) {
sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost.");
}
}
void Player::onChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
if (attackedCreature && !hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
if (!group->access && isMounted()) {
dismount();
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
wasMounted = true;
}
} else {
if (wasMounted) {
toggleMount(true);
wasMounted = false;
}
}
g_game.updateCreatureWalkthrough(this);
sendIcons();
}
void Player::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
} else if (zone == ZONE_NOPVP) {
if (attackedCreature->getPlayer()) {
if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
}
} else if (zone == ZONE_NORMAL) {
//attackedCreature can leave a pvp zone if not pzlocked
if (g_game.getWorldType() == WORLD_TYPE_NO_PVP) {
if (attackedCreature->getPlayer()) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
}
}
}
void Player::onRemoveCreature(Creature* creature, bool isLogout)
{
Creature::onRemoveCreature(creature, isLogout);
if (creature == this) {
if (isLogout) {
loginPosition = getPosition();
}
lastLogout = time(nullptr);
if (eventWalk != 0) {
setFollowCreature(nullptr);
}
if (tradePartner) {
g_game.internalCloseTrade(this);
}
closeShopWindow();
clearPartyInvitations();
if (party) {
party->leaveParty(this);
}
g_chat->removeUserFromAllChannels(*this);
std::cout << getName() << " has logged out." << std::endl;
if (guild) {
guild->removeMember(this);
}
IOLoginData::updateOnlineStatus(guid, false);
bool saved = false;
for (uint32_t tries = 0; tries < 3; ++tries) {
if (IOLoginData::savePlayer(this)) {
saved = true;
break;
}
}
if (!saved) {
std::cout << "Error while saving player: " << getName() << std::endl;
}
}
}
void Player::openShopWindow(Npc* npc, const std::list<ShopInfo>& shop)
{
shopItemList = shop;
sendShop(npc);
sendSaleItemList();
}
bool Player::closeShopWindow(bool sendCloseShopWindow /*= true*/)
{
//unreference callbacks
int32_t onBuy;
int32_t onSell;
Npc* npc = getShopOwner(onBuy, onSell);
if (!npc) {
shopItemList.clear();
return false;
}
setShopOwner(nullptr, -1, -1);
npc->onPlayerEndTrade(this, onBuy, onSell);
if (sendCloseShopWindow) {
sendCloseShop();
}
shopItemList.clear();
return true;
}
void Player::onWalk(Direction& dir)
{
Creature::onWalk(dir);
setNextActionTask(nullptr);
setNextAction(OTSYS_TIME() + getStepDuration(dir));
}
void Player::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
Creature::onCreatureMove(creature, newTile, newPos, oldTile, oldPos, teleport);
if (hasFollowPath && (creature == followCreature || (creature == this && followCreature))) {
isUpdatingPath = false;
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, &g_game, getID())));
}
if (creature != this) {
return;
}
if (tradeState != TRADE_TRANSFER) {
//check if we should close trade
if (tradeItem && !Position::areInRange<1, 1, 0>(tradeItem->getPosition(), getPosition())) {
g_game.internalCloseTrade(this);
}
if (tradePartner && !Position::areInRange<2, 2, 0>(tradePartner->getPosition(), getPosition())) {
g_game.internalCloseTrade(this);
}
}
// close modal windows
if (!modalWindows.empty()) {
// TODO: This shouldn't be hardcoded
for (uint32_t modalWindowId : modalWindows) {
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
break;
}
}
modalWindows.clear();
}
// leave market
if (inMarket) {
inMarket = false;
}
if (party) {
party->updateSharedExperience();
}
if (teleport || oldPos.z != newPos.z) {
int32_t ticks = g_config.getNumber(ConfigManager::STAIRHOP_DELAY);
if (ticks > 0) {
if (Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_PACIFIED, ticks, 0)) {
addCondition(condition);
}
}
}
}
//container
void Player::onAddContainerItem(const Item* item)
{
checkTradeState(item);
}
void Player::onUpdateContainerItem(const Container* container, const Item* oldItem, const Item* newItem)
{
if (oldItem != newItem) {
onRemoveContainerItem(container, oldItem);
}
if (tradeState != TRADE_TRANSFER) {
checkTradeState(oldItem);
}
}
void Player::onRemoveContainerItem(const Container* container, const Item* item)
{
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
if (tradeItem->getParent() != container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::onCloseContainer(const Container* container)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
if (it.second.container == container) {
client->sendCloseContainer(it.first);
}
}
}
void Player::onSendContainer(const Container* container)
{
if (!client) {
return;
}
bool hasParent = container->hasParent();
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container == container) {
client->sendContainer(it.first, container, hasParent, openContainer.index);
}
}
}
//inventory
void Player::onUpdateInventoryItem(Item* oldItem, Item* newItem)
{
if (oldItem != newItem) {
onRemoveInventoryItem(oldItem);
}
if (tradeState != TRADE_TRANSFER) {
checkTradeState(oldItem);
}
}
void Player::onRemoveInventoryItem(Item* item)
{
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
const Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::checkTradeState(const Item* item)
{
if (!tradeItem || tradeState == TRADE_TRANSFER) {
return;
}
if (tradeItem == item) {
g_game.internalCloseTrade(this);
} else {
const Container* container = dynamic_cast<const Container*>(item->getParent());
while (container) {
if (container == tradeItem) {
g_game.internalCloseTrade(this);
break;
}
container = dynamic_cast<const Container*>(container->getParent());
}
}
}
void Player::setNextWalkActionTask(SchedulerTask* task)
{
if (walkTaskEvent != 0) {
g_scheduler.stopEvent(walkTaskEvent);
walkTaskEvent = 0;
}
delete walkTask;
walkTask = task;
}
void Player::setNextWalkTask(SchedulerTask* task)
{
if (nextStepEvent != 0) {
g_scheduler.stopEvent(nextStepEvent);
nextStepEvent = 0;
}
if (task) {
nextStepEvent = g_scheduler.addEvent(task);
resetIdleTime();
}
}
void Player::setNextActionTask(SchedulerTask* task)
{
if (actionTaskEvent != 0) {
g_scheduler.stopEvent(actionTaskEvent);
actionTaskEvent = 0;
}
if (task) {
actionTaskEvent = g_scheduler.addEvent(task);
resetIdleTime();
}
}
uint32_t Player::getNextActionTime() const
{
return std::max<int64_t>(SCHEDULER_MINTICKS, nextAction - OTSYS_TIME());
}
void Player::onThink(uint32_t interval)
{
Creature::onThink(interval);
sendPing();
MessageBufferTicks += interval;
if (MessageBufferTicks >= 1500) {
MessageBufferTicks = 0;
addMessageBuffer();
}
if (!getTile()->hasFlag(TILESTATE_NOLOGOUT) && !isAccessPlayer()) {
idleTime += interval;
const int32_t kickAfterMinutes = g_config.getNumber(ConfigManager::KICK_AFTER_MINUTES);
if (idleTime > (kickAfterMinutes * 60000) + 60000) {
kickPlayer(true);
} else if (client && idleTime == 60000 * kickAfterMinutes) {
std::ostringstream ss;
ss << "You have been idle for " << kickAfterMinutes << " minutes. You will be disconnected in one minute if you are still idle then.";
client->sendTextMessage(TextMessage(MESSAGE_STATUS_WARNING, ss.str()));
}
}
if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) {
checkSkullTicks(interval);
}
addOfflineTrainingTime(interval);
if (lastStatsTrainingTime != getOfflineTrainingTime() / 60 / 1000) {
sendStats();
}
}
uint32_t Player::isMuted() const
{
if (hasFlag(PlayerFlag_CannotBeMuted)) {
return 0;
}
int32_t muteTicks = 0;
for (Condition* condition : conditions) {
if (condition->getType() == CONDITION_MUTED && condition->getTicks() > muteTicks) {
muteTicks = condition->getTicks();
}
}
return static_cast<uint32_t>(muteTicks) / 1000;
}
void Player::addMessageBuffer()
{
if (MessageBufferCount > 0 && g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER) != 0 && !hasFlag(PlayerFlag_CannotBeMuted)) {
--MessageBufferCount;
}
}
void Player::removeMessageBuffer()
{
if (hasFlag(PlayerFlag_CannotBeMuted)) {
return;
}
const int32_t maxMessageBuffer = g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER);
if (maxMessageBuffer != 0 && MessageBufferCount <= maxMessageBuffer + 1) {
if (++MessageBufferCount > maxMessageBuffer) {
uint32_t muteCount = 1;
auto it = muteCountMap.find(guid);
if (it != muteCountMap.end()) {
muteCount = it->second;
}
uint32_t muteTime = 5 * muteCount * muteCount;
muteCountMap[guid] = muteCount + 1;
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_MUTED, muteTime * 1000, 0);
addCondition(condition);
std::ostringstream ss;
ss << "You are muted for " << muteTime << " seconds.";
sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
}
}
}
void Player::drainHealth(Creature* attacker, int32_t damage)
{
Creature::drainHealth(attacker, damage);
sendStats();
}
void Player::drainMana(Creature* attacker, int32_t manaLoss)
{
Creature::drainMana(attacker, manaLoss);
sendStats();
}
void Player::addManaSpent(uint64_t amount)
{
if (hasFlag(PlayerFlag_NotGainMana)) {
return;
}
uint64_t currReqMana = vocation->getReqMana(magLevel);
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
//player has reached max magic level
return;
}
g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, amount);
if (amount == 0) {
return;
}
bool sendUpdateStats = false;
while ((manaSpent + amount) >= nextReqMana) {
amount -= nextReqMana - manaSpent;
magLevel++;
manaSpent = 0;
std::ostringstream ss;
ss << "You advanced to magic level " << magLevel << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel);
sendUpdateStats = true;
currReqMana = nextReqMana;
nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
return;
}
}
manaSpent += amount;
uint8_t oldPercent = magLevelPercent;
if (nextReqMana > currReqMana) {
magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana);
} else {
magLevelPercent = 0;
}
if (oldPercent != magLevelPercent) {
sendUpdateStats = true;
}
if (sendUpdateStats) {
sendStats();
}
}
void Player::addExperience(Creature* source, uint64_t exp, bool sendText/* = false*/)
{
uint64_t currLevelExp = Player::getExpForLevel(level);
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
uint64_t rawExp = exp;
if (currLevelExp >= nextLevelExp) {
//player has reached max level
levelPercent = 0;
sendStats();
return;
}
g_events->eventPlayerOnGainExperience(this, source, exp, rawExp);
if (exp == 0) {
return;
}
experience += exp;
if (sendText) {
std::string expString = std::to_string(exp) + (exp != 1 ? " experience points." : " experience point.");
TextMessage message(MESSAGE_EXPERIENCE, "You gained " + expString);
message.position = position;
message.primary.value = exp;
message.primary.color = TEXTCOLOR_WHITE_EXP;
sendTextMessage(message);
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, false, true);
spectators.erase(this);
if (!spectators.empty()) {
message.type = MESSAGE_EXPERIENCE_OTHERS;
message.text = getName() + " gained " + expString;
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendTextMessage(message);
}
}
}
uint32_t prevLevel = level;
while (experience >= nextLevelExp) {
++level;
healthMax += vocation->getHPGain();
health += vocation->getHPGain();
manaMax += vocation->getManaGain();
mana += vocation->getManaGain();
capacity += vocation->getCapGain();
currLevelExp = nextLevelExp;
nextLevelExp = Player::getExpForLevel(level + 1);
if (currLevelExp >= nextLevelExp) {
//player has reached max level
break;
}
}
if (prevLevel != level) {
health = healthMax;
mana = manaMax;
updateBaseSpeed();
setBaseSpeed(getBaseSpeed());
g_game.changeSpeed(this, 0);
g_game.addCreatureHealth(this);
if (party) {
party->updateSharedExperience();
}
g_creatureEvents->playerAdvance(this, SKILL_LEVEL, prevLevel, level);
std::ostringstream ss;
ss << "You advanced from Level " << prevLevel << " to Level " << level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
sendStats();
}
void Player::removeExperience(uint64_t exp, bool sendText/* = false*/)
{
if (experience == 0 || exp == 0) {
return;
}
g_events->eventPlayerOnLoseExperience(this, exp);
if (exp == 0) {
return;
}
uint64_t lostExp = experience;
experience = std::max<int64_t>(0, experience - exp);
if (sendText) {
lostExp -= experience;
std::string expString = std::to_string(lostExp) + (lostExp != 1 ? " experience points." : " experience point.");
TextMessage message(MESSAGE_EXPERIENCE, "You lost " + expString);
message.position = position;
message.primary.value = lostExp;
message.primary.color = TEXTCOLOR_RED;
sendTextMessage(message);
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, false, true);
spectators.erase(this);
if (!spectators.empty()) {
message.type = MESSAGE_EXPERIENCE_OTHERS;
message.text = getName() + " lost " + expString;
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendTextMessage(message);
}
}
}
uint32_t oldLevel = level;
uint64_t currLevelExp = Player::getExpForLevel(level);
while (level > 1 && experience < currLevelExp) {
--level;
healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain());
manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain());
capacity = std::max<int32_t>(0, capacity - vocation->getCapGain());
currLevelExp = Player::getExpForLevel(level);
}
if (oldLevel != level) {
health = healthMax;
mana = manaMax;
updateBaseSpeed();
setBaseSpeed(getBaseSpeed());
g_game.changeSpeed(this, 0);
g_game.addCreatureHealth(this);
if (party) {
party->updateSharedExperience();
}
std::ostringstream ss;
ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
sendStats();
}
uint8_t Player::getPercentLevel(uint64_t count, uint64_t nextLevelCount)
{
if (nextLevelCount == 0) {
return 0;
}
uint8_t result = (count * 100) / nextLevelCount;
if (result > 100) {
return 0;
}
return result;
}
void Player::onBlockHit()
{
if (shieldBlockCount > 0) {
--shieldBlockCount;
if (hasShield()) {
addSkillAdvance(SKILL_SHIELD, 1);
}
}
}
void Player::onAttackedCreatureBlockHit(BlockType_t blockType)
{
lastAttackBlockType = blockType;
switch (blockType) {
case BLOCK_NONE: {
addAttackSkillPoint = true;
bloodHitCount = 30;
shieldBlockCount = 30;
break;
}
case BLOCK_DEFENSE:
case BLOCK_ARMOR: {
//need to draw blood every 30 hits
if (bloodHitCount > 0) {
addAttackSkillPoint = true;
--bloodHitCount;
} else {
addAttackSkillPoint = false;
}
break;
}
default: {
addAttackSkillPoint = false;
break;
}
}
}
bool Player::hasShield() const
{
Item* item = inventory[CONST_SLOT_LEFT];
if (item && item->getWeaponType() == WEAPON_SHIELD) {
return true;
}
item = inventory[CONST_SLOT_RIGHT];
if (item && item->getWeaponType() == WEAPON_SHIELD) {
return true;
}
return false;
}
BlockType_t Player::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false*/, bool checkArmor /* = false*/, bool field /* = false*/)
{
BlockType_t blockType = Creature::blockHit(attacker, combatType, damage, checkDefense, checkArmor, field);
if (attacker) {
sendCreatureSquare(attacker, SQ_COLOR_BLACK);
}
if (blockType != BLOCK_NONE) {
return blockType;
}
if (damage > 0) {
for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) {
if (!isItemAbilityEnabled(static_cast<slots_t>(slot))) {
continue;
}
Item* item = inventory[slot];
if (!item) {
continue;
}
const ItemType& it = Item::items[item->getID()];
if (it.abilities) {
const int16_t& absorbPercent = it.abilities->absorbPercent[combatTypeToIndex(combatType)];
if (absorbPercent != 0) {
damage -= std::round(damage * (absorbPercent / 100.));
uint16_t charges = item->getCharges();
if (charges != 0) {
g_game.transformItem(item, item->getID(), charges - 1);
}
}
if (field) {
const int16_t& fieldAbsorbPercent = it.abilities->fieldAbsorbPercent[combatTypeToIndex(combatType)];
if (fieldAbsorbPercent != 0) {
damage -= std::round(damage * (fieldAbsorbPercent / 100.));
uint16_t charges = item->getCharges();
if (charges != 0) {
g_game.transformItem(item, item->getID(), charges - 1);
}
}
}
}
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
return blockType;
}
uint32_t Player::getIP() const
{
if (client) {
return client->getIP();
}
return 0;
}
void Player::death(Creature* lastHitCreature)
{
loginPosition = town->getTemplePosition();
if (skillLoss) {
uint8_t unfairFightReduction = 100;
bool lastHitPlayer = Player::lastHitIsPlayer(lastHitCreature);
if (lastHitPlayer) {
uint32_t sumLevels = 0;
uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
for (const auto& it : damageMap) {
CountBlock_t cb = it.second;
if ((OTSYS_TIME() - cb.ticks) <= inFightTicks) {
Player* damageDealer = g_game.getPlayerByID(it.first);
if (damageDealer) {
sumLevels += damageDealer->getLevel();
}
}
}
if (sumLevels > level) {
double reduce = level / static_cast<double>(sumLevels);
unfairFightReduction = std::max<uint8_t>(20, std::floor((reduce * 100) + 0.5));
}
}
//Magic level loss
uint64_t sumMana = 0;
uint64_t lostMana = 0;
//sum up all the mana
for (uint32_t i = 1; i <= magLevel; ++i) {
sumMana += vocation->getReqMana(i);
}
sumMana += manaSpent;
double deathLossPercent = getLostPercent() * (unfairFightReduction / 100.);
lostMana = static_cast<uint64_t>(sumMana * deathLossPercent);
while (lostMana > manaSpent && magLevel > 0) {
lostMana -= manaSpent;
manaSpent = vocation->getReqMana(magLevel);
magLevel--;
}
manaSpent -= lostMana;
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (nextReqMana > vocation->getReqMana(magLevel)) {
magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana);
} else {
magLevelPercent = 0;
}
//Skill loss
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) { //for each skill
uint64_t sumSkillTries = 0;
for (uint16_t c = 11; c <= skills[i].level; ++c) { //sum up all required tries for all skill levels
sumSkillTries += vocation->getReqSkillTries(i, c);
}
sumSkillTries += skills[i].tries;
uint32_t lostSkillTries = static_cast<uint32_t>(sumSkillTries * deathLossPercent);
while (lostSkillTries > skills[i].tries) {
lostSkillTries -= skills[i].tries;
if (skills[i].level <= 10) {
skills[i].level = 10;
skills[i].tries = 0;
lostSkillTries = 0;
break;
}
skills[i].tries = vocation->getReqSkillTries(i, skills[i].level);
skills[i].level--;
}
skills[i].tries = std::max<int32_t>(0, skills[i].tries - lostSkillTries);
skills[i].percent = Player::getPercentLevel(skills[i].tries, vocation->getReqSkillTries(i, skills[i].level));
}
//Level loss
uint64_t expLoss = static_cast<uint64_t>(experience * deathLossPercent);
g_events->eventPlayerOnLoseExperience(this, expLoss);
if (expLoss != 0) {
uint32_t oldLevel = level;
if (vocation->getId() == VOCATION_NONE || level > 7) {
experience -= expLoss;
}
while (level > 1 && experience < Player::getExpForLevel(level)) {
--level;
healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain());
manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain());
capacity = std::max<int32_t>(0, capacity - vocation->getCapGain());
}
if (oldLevel != level) {
std::ostringstream ss;
ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint64_t currLevelExp = Player::getExpForLevel(level);
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
}
std::bitset<6> bitset(blessings);
if (bitset[5]) {
if (lastHitPlayer) {
bitset.reset(5);
blessings = bitset.to_ulong();
} else {
blessings = 32;
}
} else {
blessings = 0;
}
sendStats();
sendSkills();
sendReLoginWindow(unfairFightReduction);
if (getSkull() == SKULL_BLACK) {
health = 40;
mana = 0;
} else {
health = healthMax;
mana = manaMax;
}
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->isPersistent()) {
it = conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
} else {
++it;
}
}
} else {
setLossSkill(true);
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->isPersistent()) {
it = conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
} else {
++it;
}
}
health = healthMax;
g_game.internalTeleport(this, getTemplePosition(), true);
g_game.addCreatureHealth(this);
onThink(EVENT_CREATURE_THINK_INTERVAL);
onIdleStatus();
sendStats();
}
}
bool Player::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (getZone() != ZONE_PVP || !Player::lastHitIsPlayer(lastHitCreature)) {
return Creature::dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
setDropLoot(true);
return false;
}
Item* Player::getCorpse(Creature* lastHitCreature, Creature* mostDamageCreature)
{
Item* corpse = Creature::getCorpse(lastHitCreature, mostDamageCreature);
if (corpse && corpse->getContainer()) {
std::ostringstream ss;
if (lastHitCreature) {
ss << "You recognize " << getNameDescription() << ". " << (getSex() == PLAYERSEX_FEMALE ? "She" : "He") << " was killed by " << lastHitCreature->getNameDescription() << '.';
} else {
ss << "You recognize " << getNameDescription() << '.';
}
corpse->setSpecialDescription(ss.str());
}
return corpse;
}
void Player::addInFightTicks(bool pzlock /*= false*/)
{
if (hasFlag(PlayerFlag_NotGainInFight)) {
return;
}
if (pzlock) {
pzLocked = true;
}
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::PZ_LOCKED), 0);
addCondition(condition);
}
void Player::removeList()
{
g_game.removePlayer(this);
for (const auto& it : g_game.getPlayers()) {
it.second->notifyStatusChange(this, VIPSTATUS_OFFLINE);
}
}
void Player::addList()
{
for (const auto& it : g_game.getPlayers()) {
it.second->notifyStatusChange(this, VIPSTATUS_ONLINE);
}
g_game.addPlayer(this);
}
void Player::kickPlayer(bool displayEffect)
{
g_creatureEvents->playerLogout(this);
if (client) {
client->logout(displayEffect, true);
} else {
g_game.removeCreature(this);
}
}
void Player::notifyStatusChange(Player* loginPlayer, VipStatus_t status)
{
if (!client) {
return;
}
auto it = VIPList.find(loginPlayer->guid);
if (it == VIPList.end()) {
return;
}
client->sendUpdatedVIPStatus(loginPlayer->guid, status);
if (status == VIPSTATUS_ONLINE) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged in."));
} else if (status == VIPSTATUS_OFFLINE) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged out."));
}
}
bool Player::removeVIP(uint32_t vipGuid)
{
if (VIPList.erase(vipGuid) == 0) {
return false;
}
IOLoginData::removeVIPEntry(accountNumber, vipGuid);
return true;
}
bool Player::addVIP(uint32_t vipGuid, const std::string& vipName, VipStatus_t status)
{
if (VIPList.size() >= getMaxVIPEntries() || VIPList.size() == 200) { // max number of buddies is 200 in 9.53
sendTextMessage(MESSAGE_STATUS_SMALL, "You cannot add more buddies.");
return false;
}
auto result = VIPList.insert(vipGuid);
if (!result.second) {
sendTextMessage(MESSAGE_STATUS_SMALL, "This player is already in your list.");
return false;
}
IOLoginData::addVIPEntry(accountNumber, vipGuid, "", 0, false);
if (client) {
client->sendVIP(vipGuid, vipName, "", 0, false, status);
}
return true;
}
bool Player::addVIPInternal(uint32_t vipGuid)
{
if (VIPList.size() >= getMaxVIPEntries() || VIPList.size() == 200) { // max number of buddies is 200 in 9.53
return false;
}
return VIPList.insert(vipGuid).second;
}
bool Player::editVIP(uint32_t vipGuid, const std::string& description, uint32_t icon, bool notify)
{
auto it = VIPList.find(vipGuid);
if (it == VIPList.end()) {
return false; // player is not in VIP
}
IOLoginData::editVIPEntry(accountNumber, vipGuid, description, icon, notify);
return true;
}
//close container and its child containers
void Player::autoCloseContainers(const Container* container)
{
std::vector<uint32_t> closeList;
for (const auto& it : openContainers) {
Container* tmpContainer = it.second.container;
while (tmpContainer) {
if (tmpContainer->isRemoved() || tmpContainer == container) {
closeList.push_back(it.first);
break;
}
tmpContainer = dynamic_cast<Container*>(tmpContainer->getParent());
}
}
for (uint32_t containerId : closeList) {
closeContainer(containerId);
if (client) {
client->sendCloseContainer(containerId);
}
}
}
bool Player::hasCapacity(const Item* item, uint32_t count) const
{
if (hasFlag(PlayerFlag_CannotPickupItem)) {
return false;
}
if (hasFlag(PlayerFlag_HasInfiniteCapacity) || item->getTopParent() == this) {
return true;
}
uint32_t itemWeight = item->getContainer() != nullptr ? item->getWeight() : item->getBaseWeight();
if (item->isStackable()) {
itemWeight *= count;
}
return itemWeight <= getFreeCapacity();
}
ReturnValue Player::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature*) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags);
if (childIsOwner) {
//a child container is querying the player, just check if enough capacity
bool skipLimit = hasBitSet(FLAG_NOLIMIT, flags);
if (skipLimit || hasCapacity(item, count)) {
return RETURNVALUE_NOERROR;
}
return RETURNVALUE_NOTENOUGHCAPACITY;
}
if (!item->isPickupable()) {
return RETURNVALUE_CANNOTPICKUP;
}
ReturnValue ret = RETURNVALUE_NOERROR;
const int32_t& slotPosition = item->getSlotPosition();
if ((slotPosition & SLOTP_HEAD) || (slotPosition & SLOTP_NECKLACE) ||
(slotPosition & SLOTP_BACKPACK) || (slotPosition & SLOTP_ARMOR) ||
(slotPosition & SLOTP_LEGS) || (slotPosition & SLOTP_FEET) ||
(slotPosition & SLOTP_RING)) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else if (slotPosition & SLOTP_TWO_HAND) {
ret = RETURNVALUE_PUTTHISOBJECTINBOTHHANDS;
} else if ((slotPosition & SLOTP_RIGHT) || (slotPosition & SLOTP_LEFT)) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else {
ret = RETURNVALUE_PUTTHISOBJECTINYOURHAND;
}
}
switch (index) {
case CONST_SLOT_HEAD: {
if (slotPosition & SLOTP_HEAD) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_NECKLACE: {
if (slotPosition & SLOTP_NECKLACE) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_BACKPACK: {
if (slotPosition & SLOTP_BACKPACK) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_ARMOR: {
if (slotPosition & SLOTP_ARMOR) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_RIGHT: {
if (slotPosition & SLOTP_RIGHT) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
if (item->getWeaponType() != WEAPON_SHIELD) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else {
const Item* leftItem = inventory[CONST_SLOT_LEFT];
if (leftItem) {
if ((leftItem->getSlotPosition() | slotPosition) & SLOTP_TWO_HAND) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
} else if (slotPosition & SLOTP_TWO_HAND) {
if (inventory[CONST_SLOT_LEFT] && inventory[CONST_SLOT_LEFT] != item) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (inventory[CONST_SLOT_LEFT]) {
const Item* leftItem = inventory[CONST_SLOT_LEFT];
WeaponType_t type = item->getWeaponType(), leftType = leftItem->getWeaponType();
if (leftItem->getSlotPosition() & SLOTP_TWO_HAND) {
ret = RETURNVALUE_DROPTWOHANDEDITEM;
} else if (item == leftItem && count == item->getItemCount()) {
ret = RETURNVALUE_NOERROR;
} else if (leftType == WEAPON_SHIELD && type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANONLYUSEONESHIELD;
} else if (leftType == WEAPON_NONE || type == WEAPON_NONE ||
leftType == WEAPON_SHIELD || leftType == WEAPON_AMMO
|| type == WEAPON_SHIELD || type == WEAPON_AMMO) {
ret = RETURNVALUE_NOERROR;
} else {
ret = RETURNVALUE_CANONLYUSEONEWEAPON;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
break;
}
case CONST_SLOT_LEFT: {
if (slotPosition & SLOTP_LEFT) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
WeaponType_t type = item->getWeaponType();
if (type == WEAPON_NONE || type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else if (inventory[CONST_SLOT_RIGHT] && (slotPosition & SLOTP_TWO_HAND)) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (slotPosition & SLOTP_TWO_HAND) {
if (inventory[CONST_SLOT_RIGHT] && inventory[CONST_SLOT_RIGHT] != item) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (inventory[CONST_SLOT_RIGHT]) {
const Item* rightItem = inventory[CONST_SLOT_RIGHT];
WeaponType_t type = item->getWeaponType(), rightType = rightItem->getWeaponType();
if (rightItem->getSlotPosition() & SLOTP_TWO_HAND) {
ret = RETURNVALUE_DROPTWOHANDEDITEM;
} else if (item == rightItem && count == item->getItemCount()) {
ret = RETURNVALUE_NOERROR;
} else if (rightType == WEAPON_SHIELD && type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANONLYUSEONESHIELD;
} else if (rightType == WEAPON_NONE || type == WEAPON_NONE ||
rightType == WEAPON_SHIELD || rightType == WEAPON_AMMO
|| type == WEAPON_SHIELD || type == WEAPON_AMMO) {
ret = RETURNVALUE_NOERROR;
} else {
ret = RETURNVALUE_CANONLYUSEONEWEAPON;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
break;
}
case CONST_SLOT_LEGS: {
if (slotPosition & SLOTP_LEGS) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_FEET: {
if (slotPosition & SLOTP_FEET) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_RING: {
if (slotPosition & SLOTP_RING) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_AMMO: {
if ((slotPosition & SLOTP_AMMO) || g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_WHEREEVER:
case -1:
ret = RETURNVALUE_NOTENOUGHROOM;
break;
default:
ret = RETURNVALUE_NOTPOSSIBLE;
break;
}
if (ret == RETURNVALUE_NOERROR || ret == RETURNVALUE_NOTENOUGHROOM) {
//need an exchange with source?
const Item* inventoryItem = getInventoryItem(static_cast<slots_t>(index));
if (inventoryItem && (!inventoryItem->isStackable() || inventoryItem->getID() != item->getID())) {
return RETURNVALUE_NEEDEXCHANGE;
}
//check if enough capacity
if (!hasCapacity(item, count)) {
return RETURNVALUE_NOTENOUGHCAPACITY;
}
if (!g_moveEvents->onPlayerEquip(const_cast<Player*>(this), const_cast<Item*>(item), static_cast<slots_t>(index), true)) {
return RETURNVALUE_CANNOTBEDRESSED;
}
}
return ret;
}
ReturnValue Player::queryMaxCount(int32_t index, const Thing& thing, uint32_t count, uint32_t& maxQueryCount,
uint32_t flags) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
maxQueryCount = 0;
return RETURNVALUE_NOTPOSSIBLE;
}
if (index == INDEX_WHEREEVER) {
uint32_t n = 0;
for (int32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) {
Item* inventoryItem = inventory[slotIndex];
if (inventoryItem) {
if (Container* subContainer = inventoryItem->getContainer()) {
uint32_t queryCount = 0;
subContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags);
n += queryCount;
//iterate through all items, including sub-containers (deep search)
for (ContainerIterator it = subContainer->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
queryCount = 0;
tmpContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags);
n += queryCount;
}
}
} else if (inventoryItem->isStackable() && item->equals(inventoryItem) && inventoryItem->getItemCount() < 100) {
uint32_t remainder = (100 - inventoryItem->getItemCount());
if (queryAdd(slotIndex, *item, remainder, flags) == RETURNVALUE_NOERROR) {
n += remainder;
}
}
} else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot
if (item->isStackable()) {
n += 100;
} else {
++n;
}
}
}
maxQueryCount = n;
} else {
const Item* destItem = nullptr;
const Thing* destThing = getThing(index);
if (destThing) {
destItem = destThing->getItem();
}
if (destItem) {
if (destItem->isStackable() && item->equals(destItem) && destItem->getItemCount() < 100) {
maxQueryCount = 100 - destItem->getItemCount();
} else {
maxQueryCount = 0;
}
} else if (queryAdd(index, *item, count, flags) == RETURNVALUE_NOERROR) { //empty slot
if (item->isStackable()) {
maxQueryCount = 100;
} else {
maxQueryCount = 1;
}
return RETURNVALUE_NOERROR;
}
}
if (maxQueryCount < count) {
return RETURNVALUE_NOTENOUGHROOM;
} else {
return RETURNVALUE_NOERROR;
}
}
ReturnValue Player::queryRemove(const Thing& thing, uint32_t count, uint32_t flags) const
{
int32_t index = getThingIndex(&thing);
if (index == -1) {
return RETURNVALUE_NOTPOSSIBLE;
}
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (count == 0 || (item->isStackable() && count > item->getItemCount())) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) {
return RETURNVALUE_NOTMOVEABLE;
}
return RETURNVALUE_NOERROR;
}
Cylinder* Player::queryDestination(int32_t& index, const Thing& thing, Item** destItem,
uint32_t& flags)
{
if (index == 0 /*drop to capacity window*/ || index == INDEX_WHEREEVER) {
*destItem = nullptr;
const Item* item = thing.getItem();
if (item == nullptr) {
return this;
}
bool autoStack = !((flags & FLAG_IGNOREAUTOSTACK) == FLAG_IGNOREAUTOSTACK);
bool isStackable = item->isStackable();
std::vector<Container*> containers;
for (uint32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) {
Item* inventoryItem = inventory[slotIndex];
if (inventoryItem) {
if (inventoryItem == tradeItem) {
continue;
}
if (inventoryItem == item) {
continue;
}
if (autoStack && isStackable) {
//try find an already existing item to stack with
if (queryAdd(slotIndex, *item, item->getItemCount(), 0) == RETURNVALUE_NOERROR) {
if (inventoryItem->equals(item) && inventoryItem->getItemCount() < 100) {
index = slotIndex;
*destItem = inventoryItem;
return this;
}
}
if (Container* subContainer = inventoryItem->getContainer()) {
containers.push_back(subContainer);
}
} else if (Container* subContainer = inventoryItem->getContainer()) {
containers.push_back(subContainer);
}
} else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot
index = slotIndex;
*destItem = nullptr;
return this;
}
}
size_t i = 0;
while (i < containers.size()) {
Container* tmpContainer = containers[i++];
if (!autoStack || !isStackable) {
//we need to find first empty container as fast as we can for non-stackable items
uint32_t n = tmpContainer->capacity() - tmpContainer->size();
while (n) {
if (tmpContainer->queryAdd(tmpContainer->capacity() - n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) {
index = tmpContainer->capacity() - n;
*destItem = nullptr;
return tmpContainer;
}
n--;
}
for (Item* tmpContainerItem : tmpContainer->getItemList()) {
if (Container* subContainer = tmpContainerItem->getContainer()) {
containers.push_back(subContainer);
}
}
continue;
}
uint32_t n = 0;
for (Item* tmpItem : tmpContainer->getItemList()) {
if (tmpItem == tradeItem) {
continue;
}
if (tmpItem == item) {
continue;
}
//try find an already existing item to stack with
if (tmpItem->equals(item) && tmpItem->getItemCount() < 100) {
index = n;
*destItem = tmpItem;
return tmpContainer;
}
if (Container* subContainer = tmpItem->getContainer()) {
containers.push_back(subContainer);
}
n++;
}
if (n < tmpContainer->capacity() && tmpContainer->queryAdd(n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) {
index = n;
*destItem = nullptr;
return tmpContainer;
}
}
return this;
}
Thing* destThing = getThing(index);
if (destThing) {
*destItem = destThing->getItem();
}
Cylinder* subCylinder = dynamic_cast<Cylinder*>(destThing);
if (subCylinder) {
index = INDEX_WHEREEVER;
*destItem = nullptr;
return subCylinder;
} else {
return this;
}
}
void Player::addThing(int32_t index, Thing* thing)
{
if (index < CONST_SLOT_FIRST || index > CONST_SLOT_LAST) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setParent(this);
inventory[index] = item;
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
}
void Player::updateThing(Thing* thing, uint16_t itemId, uint32_t count)
{
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setID(itemId);
item->setSubType(count);
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(item, item);
}
void Player::replaceThing(uint32_t index, Thing* thing)
{
if (index > CONST_SLOT_LAST) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* oldItem = getInventoryItem(static_cast<slots_t>(index));
if (!oldItem) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(oldItem, item);
item->setParent(this);
inventory[index] = item;
}
void Player::removeThing(Thing* thing, uint32_t count)
{
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
if (item->isStackable()) {
if (count == item->getItemCount()) {
//send change to client
sendInventoryItem(static_cast<slots_t>(index), nullptr);
//event methods
onRemoveInventoryItem(item);
item->setParent(nullptr);
inventory[index] = nullptr;
} else {
uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count));
item->setItemCount(newCount);
//send change to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(item, item);
}
} else {
//send change to client
sendInventoryItem(static_cast<slots_t>(index), nullptr);
//event methods
onRemoveInventoryItem(item);
item->setParent(nullptr);
inventory[index] = nullptr;
}
}
int32_t Player::getThingIndex(const Thing* thing) const
{
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
if (inventory[i] == thing) {
return i;
}
}
return -1;
}
size_t Player::getFirstIndex() const
{
return CONST_SLOT_FIRST;
}
size_t Player::getLastIndex() const
{
return CONST_SLOT_LAST + 1;
}
uint32_t Player::getItemTypeCount(uint16_t itemId, int32_t subType /*= -1*/) const
{
uint32_t count = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
if (item->getID() == itemId) {
count += Item::countByType(item, subType);
}
if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
count += Item::countByType(*it, subType);
}
}
}
}
return count;
}
bool Player::removeItemOfType(uint16_t itemId, uint32_t amount, int32_t subType, bool ignoreEquipped/* = false*/) const
{
if (amount == 0) {
return true;
}
std::vector<Item*> itemList;
uint32_t count = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
if (!ignoreEquipped && item->getID() == itemId) {
uint32_t itemCount = Item::countByType(item, subType);
if (itemCount == 0) {
continue;
}
itemList.push_back(item);
count += itemCount;
if (count >= amount) {
g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable);
return true;
}
} else if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
Item* containerItem = *it;
if (containerItem->getID() == itemId) {
uint32_t itemCount = Item::countByType(containerItem, subType);
if (itemCount == 0) {
continue;
}
itemList.push_back(containerItem);
count += itemCount;
if (count >= amount) {
g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable);
return true;
}
}
}
}
}
return false;
}
std::map<uint32_t, uint32_t>& Player::getAllItemTypeCount(std::map<uint32_t, uint32_t>& countMap) const
{
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
countMap[item->getID()] += Item::countByType(item, -1);
if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
countMap[(*it)->getID()] += Item::countByType(*it, -1);
}
}
}
return countMap;
}
Thing* Player::getThing(size_t index) const
{
if (index >= CONST_SLOT_FIRST && index <= CONST_SLOT_LAST) {
return inventory[index];
}
return nullptr;
}
void Player::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/)
{
if (link == LINK_OWNER) {
//calling movement scripts
g_moveEvents->onPlayerEquip(this, thing->getItem(), static_cast<slots_t>(index), false);
}
bool requireListUpdate = true;
if (link == LINK_OWNER || link == LINK_TOPPARENT) {
const Item* i = (oldParent ? oldParent->getItem() : nullptr);
// Check if we owned the old container too, so we don't need to do anything,
// as the list was updated in postRemoveNotification
assert(i ? i->getContainer() != nullptr : true);
if (i) {
requireListUpdate = i->getContainer()->getHoldingPlayer() != this;
} else {
requireListUpdate = oldParent != this;
}
updateInventoryWeight();
updateItemsLight();
sendStats();
}
if (const Item* item = thing->getItem()) {
if (const Container* container = item->getContainer()) {
onSendContainer(container);
}
if (shopOwner && requireListUpdate) {
updateSaleShopList(item);
}
} else if (const Creature* creature = thing->getCreature()) {
if (creature == this) {
//check containers
std::vector<Container*> containers;
for (const auto& it : openContainers) {
Container* container = it.second.container;
if (!Position::areInRange<1, 1, 0>(container->getPosition(), getPosition())) {
containers.push_back(container);
}
}
for (const Container* container : containers) {
autoCloseContainers(container);
}
}
}
}
void Player::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/)
{
if (link == LINK_OWNER) {
//calling movement scripts
g_moveEvents->onPlayerDeEquip(this, thing->getItem(), static_cast<slots_t>(index));
}
bool requireListUpdate = true;
if (link == LINK_OWNER || link == LINK_TOPPARENT) {
const Item* i = (newParent ? newParent->getItem() : nullptr);
// Check if we owned the old container too, so we don't need to do anything,
// as the list was updated in postRemoveNotification
assert(i ? i->getContainer() != nullptr : true);
if (i) {
requireListUpdate = i->getContainer()->getHoldingPlayer() != this;
} else {
requireListUpdate = newParent != this;
}
updateInventoryWeight();
updateItemsLight();
sendStats();
}
if (const Item* item = thing->getItem()) {
if (const Container* container = item->getContainer()) {
if (container->isRemoved() || !Position::areInRange<1, 1, 0>(getPosition(), container->getPosition())) {
autoCloseContainers(container);
} else if (container->getTopParent() == this) {
onSendContainer(container);
} else if (const Container* topContainer = dynamic_cast<const Container*>(container->getTopParent())) {
if (const DepotChest* depotChest = dynamic_cast<const DepotChest*>(topContainer)) {
bool isOwner = false;
for (const auto& it : depotChests) {
if (it.second == depotChest) {
isOwner = true;
onSendContainer(container);
}
}
if (!isOwner) {
autoCloseContainers(container);
}
} else {
onSendContainer(container);
}
} else {
autoCloseContainers(container);
}
}
if (shopOwner && requireListUpdate) {
updateSaleShopList(item);
}
}
}
bool Player::updateSaleShopList(const Item* item)
{
uint16_t itemId = item->getID();
if (itemId != ITEM_GOLD_COIN && itemId != ITEM_PLATINUM_COIN && itemId != ITEM_CRYSTAL_COIN) {
auto it = std::find_if(shopItemList.begin(), shopItemList.end(), [itemId](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.sellPrice != 0; });
if (it == shopItemList.end()) {
const Container* container = item->getContainer();
if (!container) {
return false;
}
const auto& items = container->getItemList();
return std::any_of(items.begin(), items.end(), [this](const Item* containerItem) {
return updateSaleShopList(containerItem);
});
}
}
if (client) {
client->sendSaleItemList(shopItemList);
}
return true;
}
bool Player::hasShopItemForSale(uint32_t itemId, uint8_t subType) const
{
const ItemType& itemType = Item::items[itemId];
return std::any_of(shopItemList.begin(), shopItemList.end(), [&](const ShopInfo& shopInfo) {
return shopInfo.itemId == itemId && shopInfo.buyPrice != 0 && (!itemType.isFluidContainer() || shopInfo.subType == subType);
});
}
void Player::internalAddThing(Thing* thing)
{
internalAddThing(0, thing);
}
void Player::internalAddThing(uint32_t index, Thing* thing)
{
Item* item = thing->getItem();
if (!item) {
return;
}
//index == 0 means we should equip this item at the most appropiate slot (no action required here)
if (index > 0 && index < 11) {
if (inventory[index]) {
return;
}
inventory[index] = item;
item->setParent(this);
}
}
bool Player::setFollowCreature(Creature* creature)
{
if (!Creature::setFollowCreature(creature)) {
setFollowCreature(nullptr);
setAttackedCreature(nullptr);
sendCancelMessage(RETURNVALUE_THEREISNOWAY);
sendCancelTarget();
stopWalk();
return false;
}
return true;
}
bool Player::setAttackedCreature(Creature* creature)
{
if (!Creature::setAttackedCreature(creature)) {
sendCancelTarget();
return false;
}
if (chaseMode && creature) {
if (followCreature != creature) {
//chase opponent
setFollowCreature(creature);
}
} else if (followCreature) {
setFollowCreature(nullptr);
}
if (creature) {
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
return true;
}
void Player::goToFollowCreature()
{
if (!walkTask) {
if ((OTSYS_TIME() - lastFailedFollow) < 2000) {
return;
}
Creature::goToFollowCreature();
if (followCreature && !hasFollowPath) {
lastFailedFollow = OTSYS_TIME();
}
}
}
void Player::getPathSearchParams(const Creature* creature, FindPathParams& fpp) const
{
Creature::getPathSearchParams(creature, fpp);
fpp.fullPathSearch = true;
}
void Player::doAttacking(uint32_t)
{
if (lastAttack == 0) {
lastAttack = OTSYS_TIME() - getAttackSpeed() - 1;
}
if (hasCondition(CONDITION_PACIFIED)) {
return;
}
if ((OTSYS_TIME() - lastAttack) >= getAttackSpeed()) {
bool result = false;
Item* tool = getWeapon();
const Weapon* weapon = g_weapons->getWeapon(tool);
if (weapon) {
if (!weapon->interruptSwing()) {
result = weapon->useWeapon(this, tool, attackedCreature);
} else if (!canDoAction()) {
uint32_t delay = getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::checkCreatureAttack,
&g_game, getID()));
setNextActionTask(task);
} else {
result = weapon->useWeapon(this, tool, attackedCreature);
}
} else {
result = Weapon::useFist(this, attackedCreature);
}
if (result) {
lastAttack = OTSYS_TIME();
}
}
}
uint64_t Player::getGainedExperience(Creature* attacker) const
{
if (g_config.getBoolean(ConfigManager::EXPERIENCE_FROM_PLAYERS)) {
Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer != this && skillLoss && std::abs(static_cast<int32_t>(attackerPlayer->getLevel() - level)) <= g_config.getNumber(ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)) {
return std::max<uint64_t>(0, std::floor(getLostExperience() * getDamageRatio(attacker) * 0.75));
}
}
return 0;
}
void Player::onFollowCreature(const Creature* creature)
{
if (!creature) {
stopWalk();
}
}
void Player::setChaseMode(bool mode)
{
bool prevChaseMode = chaseMode;
chaseMode = mode;
if (prevChaseMode != chaseMode) {
if (chaseMode) {
if (!followCreature && attackedCreature) {
//chase opponent
setFollowCreature(attackedCreature);
}
} else if (attackedCreature) {
setFollowCreature(nullptr);
cancelNextWalk = true;
}
}
}
void Player::onWalkAborted()
{
setNextWalkActionTask(nullptr);
sendCancelWalk();
}
void Player::onWalkComplete()
{
if (walkTask) {
walkTaskEvent = g_scheduler.addEvent(walkTask);
walkTask = nullptr;
}
}
void Player::stopWalk()
{
cancelNextWalk = true;
}
void Player::getCreatureLight(LightInfo& light) const
{
if (internalLight.level > itemsLight.level) {
light = internalLight;
} else {
light = itemsLight;
}
}
void Player::updateItemsLight(bool internal /*=false*/)
{
LightInfo maxLight;
LightInfo curLight;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
Item* item = inventory[i];
if (item) {
item->getLight(curLight);
if (curLight.level > maxLight.level) {
maxLight = curLight;
}
}
}
if (itemsLight.level != maxLight.level || itemsLight.color != maxLight.color) {
itemsLight = maxLight;
if (!internal) {
g_game.changeLight(this);
}
}
}
void Player::onAddCondition(ConditionType_t type)
{
Creature::onAddCondition(type);
if (type == CONDITION_OUTFIT && isMounted()) {
dismount();
}
sendIcons();
}
void Player::onAddCombatCondition(ConditionType_t type)
{
switch (type) {
case CONDITION_POISON:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are poisoned.");
break;
case CONDITION_DROWN:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drowning.");
break;
case CONDITION_PARALYZE:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are paralyzed.");
break;
case CONDITION_DRUNK:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drunk.");
break;
case CONDITION_CURSED:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are cursed.");
break;
case CONDITION_FREEZING:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are freezing.");
break;
case CONDITION_DAZZLED:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are dazzled.");
break;
case CONDITION_BLEEDING:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are bleeding.");
break;
default:
break;
}
}
void Player::onEndCondition(ConditionType_t type)
{
Creature::onEndCondition(type);
if (type == CONDITION_INFIGHT) {
onIdleStatus();
pzLocked = false;
clearAttacked();
if (getSkull() != SKULL_RED && getSkull() != SKULL_BLACK) {
setSkull(SKULL_NONE);
}
}
sendIcons();
}
void Player::onCombatRemoveCondition(Condition* condition)
{
//Creature::onCombatRemoveCondition(condition);
if (condition->getId() > 0) {
//Means the condition is from an item, id == slot
if (g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
Item* item = getInventoryItem(static_cast<slots_t>(condition->getId()));
if (item) {
//25% chance to destroy the item
if (25 >= uniform_random(1, 100)) {
g_game.internalRemoveItem(item);
}
}
}
} else {
if (!canDoAction()) {
const uint32_t delay = getNextActionTime();
const int32_t ticks = delay - (delay % EVENT_CREATURE_THINK_INTERVAL);
if (ticks < 0) {
removeCondition(condition);
} else {
condition->setTicks(ticks);
}
} else {
removeCondition(condition);
}
}
}
void Player::onAttackedCreature(Creature* target)
{
Creature::onAttackedCreature(target);
if (target->getZone() == ZONE_PVP) {
return;
}
if (target == this) {
addInFightTicks();
return;
}
if (hasFlag(PlayerFlag_NotGainInFight)) {
return;
}
Player* targetPlayer = target->getPlayer();
if (targetPlayer && !isPartner(targetPlayer) && !isGuildMate(targetPlayer)) {
if (!pzLocked && g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
pzLocked = true;
sendIcons();
}
if (getSkull() == SKULL_NONE && getSkullClient(targetPlayer) == SKULL_YELLOW) {
addAttacked(targetPlayer);
targetPlayer->sendCreatureSkull(this);
} else if (!targetPlayer->hasAttacked(this)) {
if (!pzLocked) {
pzLocked = true;
sendIcons();
}
if (!Combat::isInPvpZone(this, targetPlayer) && !isInWar(targetPlayer)) {
addAttacked(targetPlayer);
if (targetPlayer->getSkull() == SKULL_NONE && getSkull() == SKULL_NONE) {
setSkull(SKULL_WHITE);
}
if (getSkull() == SKULL_NONE) {
targetPlayer->sendCreatureSkull(this);
}
}
}
}
addInFightTicks();
}
void Player::onAttacked()
{
Creature::onAttacked();
addInFightTicks();
}
void Player::onIdleStatus()
{
Creature::onIdleStatus();
if (party) {
party->clearPlayerPoints(this);
}
}
void Player::onPlacedCreature()
{
//scripting event - onLogin
if (!g_creatureEvents->playerLogin(this)) {
kickPlayer(true);
}
}
void Player::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
Creature::onAttackedCreatureDrainHealth(target, points);
if (target) {
if (party && !Combat::isPlayerCombat(target)) {
Monster* tmpMonster = target->getMonster();
if (tmpMonster && tmpMonster->isHostile()) {
//We have fulfilled a requirement for shared experience
party->updatePlayerTicks(this, points);
}
}
}
}
void Player::onTargetCreatureGainHealth(Creature* target, int32_t points)
{
if (target && party) {
Player* tmpPlayer = nullptr;
if (target->getPlayer()) {
tmpPlayer = target->getPlayer();
} else if (Creature* targetMaster = target->getMaster()) {
if (Player* targetMasterPlayer = targetMaster->getPlayer()) {
tmpPlayer = targetMasterPlayer;
}
}
if (isPartner(tmpPlayer)) {
party->updatePlayerTicks(this, points);
}
}
}
bool Player::onKilledCreature(Creature* target, bool lastHit/* = true*/)
{
bool unjustified = false;
if (hasFlag(PlayerFlag_NotGenerateLoot)) {
target->setDropLoot(false);
}
Creature::onKilledCreature(target, lastHit);
if (Player* targetPlayer = target->getPlayer()) {
if (targetPlayer && targetPlayer->getZone() == ZONE_PVP) {
targetPlayer->setDropLoot(false);
targetPlayer->setLossSkill(false);
} else if (!hasFlag(PlayerFlag_NotGainInFight) && !isPartner(targetPlayer)) {
if (!Combat::isInPvpZone(this, targetPlayer) && hasAttacked(targetPlayer) && !targetPlayer->hasAttacked(this) && !isGuildMate(targetPlayer) && targetPlayer != this) {
if (targetPlayer->getSkull() == SKULL_NONE && !isInWar(targetPlayer)) {
unjustified = true;
addUnjustifiedDead(targetPlayer);
}
if (lastHit && hasCondition(CONDITION_INFIGHT)) {
pzLocked = true;
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::WHITE_SKULL_TIME), 0);
addCondition(condition);
}
}
}
}
return unjustified;
}
void Player::gainExperience(uint64_t gainExp, Creature* source)
{
if (hasFlag(PlayerFlag_NotGainExperience) || gainExp == 0 || staminaMinutes == 0) {
return;
}
addExperience(source, gainExp, true);
}
void Player::onGainExperience(uint64_t gainExp, Creature* target)
{
if (hasFlag(PlayerFlag_NotGainExperience)) {
return;
}
if (target && !target->getPlayer() && party && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
party->shareExperience(gainExp, target);
//We will get a share of the experience through the sharing mechanism
return;
}
Creature::onGainExperience(gainExp, target);
gainExperience(gainExp, target);
}
void Player::onGainSharedExperience(uint64_t gainExp, Creature* source)
{
gainExperience(gainExp, source);
}
bool Player::isImmune(CombatType_t type) const
{
if (hasFlag(PlayerFlag_CannotBeAttacked)) {
return true;
}
return Creature::isImmune(type);
}
bool Player::isImmune(ConditionType_t type) const
{
if (hasFlag(PlayerFlag_CannotBeAttacked)) {
return true;
}
return Creature::isImmune(type);
}
bool Player::isAttackable() const
{
return !hasFlag(PlayerFlag_CannotBeAttacked);
}
bool Player::lastHitIsPlayer(Creature* lastHitCreature)
{
if (!lastHitCreature) {
return false;
}
if (lastHitCreature->getPlayer()) {
return true;
}
Creature* lastHitMaster = lastHitCreature->getMaster();
return lastHitMaster && lastHitMaster->getPlayer();
}
void Player::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
Creature::changeHealth(healthChange, sendHealthChange);
sendStats();
}
void Player::changeMana(int32_t manaChange)
{
if (!hasFlag(PlayerFlag_HasInfiniteMana)) {
Creature::changeMana(manaChange);
}
sendStats();
}
void Player::changeSoul(int32_t soulChange)
{
if (soulChange > 0) {
soul += std::min<int32_t>(soulChange, vocation->getSoulMax() - soul);
} else {
soul = std::max<int32_t>(0, soul + soulChange);
}
sendStats();
}
bool Player::canWear(uint32_t lookType, uint8_t addons) const
{
if (group->access) {
return true;
}
const Outfit* outfit = Outfits::getInstance().getOutfitByLookType(sex, lookType);
if (!outfit) {
return false;
}
if (outfit->premium && !isPremium()) {
return false;
}
if (outfit->unlocked && addons == 0) {
return true;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType != lookType) {
continue;
}
return (outfitEntry.addons & addons) == addons;
}
return false;
}
bool Player::canLogout()
{
if (isConnecting) {
return false;
}
if (getTile()->hasFlag(TILESTATE_NOLOGOUT)) {
return false;
}
if (getTile()->hasFlag(TILESTATE_PROTECTIONZONE)) {
return true;
}
return !isPzLocked() && !hasCondition(CONDITION_INFIGHT);
}
void Player::genReservedStorageRange()
{
//generate outfits range
uint32_t base_key = PSTRG_OUTFITS_RANGE_START;
for (const OutfitEntry& entry : outfits) {
storageMap[++base_key] = (entry.lookType << 16) | entry.addons;
}
}
void Player::addOutfit(uint16_t lookType, uint8_t addons)
{
for (OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
outfitEntry.addons |= addons;
return;
}
}
outfits.emplace_back(lookType, addons);
}
bool Player::removeOutfit(uint16_t lookType)
{
for (auto it = outfits.begin(), end = outfits.end(); it != end; ++it) {
OutfitEntry& entry = *it;
if (entry.lookType == lookType) {
outfits.erase(it);
return true;
}
}
return false;
}
bool Player::removeOutfitAddon(uint16_t lookType, uint8_t addons)
{
for (OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
outfitEntry.addons &= ~addons;
return true;
}
}
return false;
}
bool Player::getOutfitAddons(const Outfit& outfit, uint8_t& addons) const
{
if (group->access) {
addons = 3;
return true;
}
if (outfit.premium && !isPremium()) {
return false;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType != outfit.lookType) {
continue;
}
addons = outfitEntry.addons;
return true;
}
if (!outfit.unlocked) {
return false;
}
addons = 0;
return true;
}
void Player::setSex(PlayerSex_t newSex)
{
sex = newSex;
}
Skulls_t Player::getSkull() const
{
if (hasFlag(PlayerFlag_NotGainInFight)) {
return SKULL_NONE;
}
return skull;
}
Skulls_t Player::getSkullClient(const Creature* creature) const
{
if (!creature || g_game.getWorldType() != WORLD_TYPE_PVP) {
return SKULL_NONE;
}
const Player* player = creature->getPlayer();
if (player && player->getSkull() == SKULL_NONE) {
if (isInWar(player)) {
return SKULL_GREEN;
}
if (!player->getGuildWarVector().empty() && guild == player->getGuild()) {
return SKULL_GREEN;
}
if (player->hasAttacked(this)) {
return SKULL_YELLOW;
}
if (isPartner(player)) {
return SKULL_GREEN;
}
}
return Creature::getSkullClient(creature);
}
bool Player::hasAttacked(const Player* attacked) const
{
if (hasFlag(PlayerFlag_NotGainInFight) || !attacked) {
return false;
}
return attackedSet.find(attacked->guid) != attackedSet.end();
}
void Player::addAttacked(const Player* attacked)
{
if (hasFlag(PlayerFlag_NotGainInFight) || !attacked || attacked == this) {
return;
}
attackedSet.insert(attacked->guid);
}
void Player::clearAttacked()
{
attackedSet.clear();
}
void Player::addUnjustifiedDead(const Player* attacked)
{
if (hasFlag(PlayerFlag_NotGainInFight) || attacked == this || g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
return;
}
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Warning! The murder of " + attacked->getName() + " was not justified.");
skullTicks += g_config.getNumber(ConfigManager::FRAG_TIME);
if (getSkull() != SKULL_BLACK) {
if (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) {
setSkull(SKULL_BLACK);
} else if (getSkull() != SKULL_RED && g_config.getNumber(ConfigManager::KILLS_TO_RED) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_RED) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) {
setSkull(SKULL_RED);
}
}
}
void Player::checkSkullTicks(int32_t ticks)
{
int32_t newTicks = skullTicks - ticks;
if (newTicks < 0) {
skullTicks = 0;
} else {
skullTicks = newTicks;
}
if ((skull == SKULL_RED || skull == SKULL_BLACK) && skullTicks < 1000 && !hasCondition(CONDITION_INFIGHT)) {
setSkull(SKULL_NONE);
}
}
bool Player::isPromoted() const
{
uint16_t promotedVocation = g_vocations.getPromotedVocation(vocation->getId());
return promotedVocation == VOCATION_NONE && vocation->getId() != promotedVocation;
}
double Player::getLostPercent() const
{
int32_t blessingCount = std::bitset<5>(blessings).count();
int32_t deathLosePercent = g_config.getNumber(ConfigManager::DEATH_LOSE_PERCENT);
if (deathLosePercent != -1) {
if (isPromoted()) {
deathLosePercent -= 3;
}
deathLosePercent -= blessingCount;
return std::max<int32_t>(0, deathLosePercent) / 100.;
}
double lossPercent;
if (level >= 25) {
double tmpLevel = level + (levelPercent / 100.);
lossPercent = static_cast<double>((tmpLevel + 50) * 50 * ((tmpLevel * tmpLevel) - (5 * tmpLevel) + 8)) / experience;
} else {
lossPercent = 10;
}
if (isPromoted()) {
lossPercent *= 0.7;
}
return lossPercent * pow(0.92, blessingCount) / 100;
}
void Player::learnInstantSpell(const std::string& spellName)
{
if (!hasLearnedInstantSpell(spellName)) {
learnedInstantSpellList.push_front(spellName);
}
}
void Player::forgetInstantSpell(const std::string& spellName)
{
learnedInstantSpellList.remove(spellName);
}
bool Player::hasLearnedInstantSpell(const std::string& spellName) const
{
if (hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
for (const auto& learnedSpellName : learnedInstantSpellList) {
if (strcasecmp(learnedSpellName.c_str(), spellName.c_str()) == 0) {
return true;
}
}
return false;
}
bool Player::isInWar(const Player* player) const
{
if (!player || !guild) {
return false;
}
const Guild* playerGuild = player->getGuild();
if (!playerGuild) {
return false;
}
return isInWarList(playerGuild->getId()) && player->isInWarList(guild->getId());
}
bool Player::isInWarList(uint32_t guildId) const
{
return std::find(guildWarVector.begin(), guildWarVector.end(), guildId) != guildWarVector.end();
}
bool Player::isPremium() const
{
if (g_config.getBoolean(ConfigManager::FREE_PREMIUM) || hasFlag(PlayerFlag_IsAlwaysPremium)) {
return true;
}
return premiumDays > 0;
}
void Player::setPremiumDays(int32_t v)
{
premiumDays = v;
sendBasicData();
}
PartyShields_t Player::getPartyShield(const Player* player) const
{
if (!player) {
return SHIELD_NONE;
}
if (party) {
if (party->getLeader() == player) {
if (party->isSharedExperienceActive()) {
if (party->isSharedExperienceEnabled()) {
return SHIELD_YELLOW_SHAREDEXP;
}
if (party->canUseSharedExperience(player)) {
return SHIELD_YELLOW_NOSHAREDEXP;
}
return SHIELD_YELLOW_NOSHAREDEXP_BLINK;
}
return SHIELD_YELLOW;
}
if (player->party == party) {
if (party->isSharedExperienceActive()) {
if (party->isSharedExperienceEnabled()) {
return SHIELD_BLUE_SHAREDEXP;
}
if (party->canUseSharedExperience(player)) {
return SHIELD_BLUE_NOSHAREDEXP;
}
return SHIELD_BLUE_NOSHAREDEXP_BLINK;
}
return SHIELD_BLUE;
}
if (isInviting(player)) {
return SHIELD_WHITEBLUE;
}
}
if (player->isInviting(this)) {
return SHIELD_WHITEYELLOW;
}
if (player->party) {
return SHIELD_GRAY;
}
return SHIELD_NONE;
}
bool Player::isInviting(const Player* player) const
{
if (!player || !party || party->getLeader() != this) {
return false;
}
return party->isPlayerInvited(player);
}
bool Player::isPartner(const Player* player) const
{
if (!player || !party) {
return false;
}
return party == player->party;
}
bool Player::isGuildMate(const Player* player) const
{
if (!player || !guild) {
return false;
}
return guild == player->guild;
}
void Player::sendPlayerPartyIcons(Player* player)
{
sendCreatureShield(player);
sendCreatureSkull(player);
}
bool Player::addPartyInvitation(Party* party)
{
auto it = std::find(invitePartyList.begin(), invitePartyList.end(), party);
if (it != invitePartyList.end()) {
return false;
}
invitePartyList.push_front(party);
return true;
}
void Player::removePartyInvitation(Party* party)
{
invitePartyList.remove(party);
}
void Player::clearPartyInvitations()
{
for (Party* invitingParty : invitePartyList) {
invitingParty->removeInvite(*this, false);
}
invitePartyList.clear();
}
GuildEmblems_t Player::getGuildEmblem(const Player* player) const
{
if (!player) {
return GUILDEMBLEM_NONE;
}
const Guild* playerGuild = player->getGuild();
if (!playerGuild) {
return GUILDEMBLEM_NONE;
}
if (player->getGuildWarVector().empty()) {
if (guild == playerGuild) {
return GUILDEMBLEM_MEMBER;
} else {
return GUILDEMBLEM_OTHER;
}
} else if (guild == playerGuild) {
return GUILDEMBLEM_ALLY;
} else if (isInWar(player)) {
return GUILDEMBLEM_ENEMY;
}
return GUILDEMBLEM_NEUTRAL;
}
uint8_t Player::getCurrentMount() const
{
int32_t value;
if (getStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, value)) {
return value;
}
return 0;
}
void Player::setCurrentMount(uint8_t mount)
{
addStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, mount);
}
bool Player::toggleMount(bool mount)
{
if ((OTSYS_TIME() - lastToggleMount) < 3000 && !wasMounted) {
sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
if (mount) {
if (isMounted()) {
return false;
}
if (!group->access && tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE);
return false;
}
const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(getSex(), defaultOutfit.lookType);
if (!playerOutfit) {
return false;
}
uint8_t currentMountId = getCurrentMount();
if (currentMountId == 0) {
sendOutfitWindow();
return false;
}
Mount* currentMount = g_game.mounts.getMountByID(currentMountId);
if (!currentMount) {
return false;
}
if (!hasMount(currentMount)) {
setCurrentMount(0);
sendOutfitWindow();
return false;
}
if (currentMount->premium && !isPremium()) {
sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT);
return false;
}
if (hasCondition(CONDITION_OUTFIT)) {
sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return false;
}
defaultOutfit.lookMount = currentMount->clientId;
if (currentMount->speed != 0) {
g_game.changeSpeed(this, currentMount->speed);
}
} else {
if (!isMounted()) {
return false;
}
dismount();
}
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
lastToggleMount = OTSYS_TIME();
return true;
}
bool Player::tameMount(uint8_t mountId)
{
if (!g_game.mounts.getMountByID(mountId)) {
return false;
}
const uint8_t tmpMountId = mountId - 1;
const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31);
int32_t value;
if (getStorageValue(key, value)) {
value |= (1 << (tmpMountId % 31));
} else {
value = (1 << (tmpMountId % 31));
}
addStorageValue(key, value);
return true;
}
bool Player::untameMount(uint8_t mountId)
{
if (!g_game.mounts.getMountByID(mountId)) {
return false;
}
const uint8_t tmpMountId = mountId - 1;
const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31);
int32_t value;
if (!getStorageValue(key, value)) {
return true;
}
value &= ~(1 << (tmpMountId % 31));
addStorageValue(key, value);
if (getCurrentMount() == mountId) {
if (isMounted()) {
dismount();
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
}
setCurrentMount(0);
}
return true;
}
bool Player::hasMount(const Mount* mount) const
{
if (isAccessPlayer()) {
return true;
}
if (mount->premium && !isPremium()) {
return false;
}
const uint8_t tmpMountId = mount->id - 1;
int32_t value;
if (!getStorageValue(PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31), value)) {
return false;
}
return ((1 << (tmpMountId % 31)) & value) != 0;
}
void Player::dismount()
{
Mount* mount = g_game.mounts.getMountByID(getCurrentMount());
if (mount && mount->speed > 0) {
g_game.changeSpeed(this, -mount->speed);
}
defaultOutfit.lookMount = 0;
}
bool Player::addOfflineTrainingTries(skills_t skill, uint64_t tries)
{
if (tries == 0 || skill == SKILL_LEVEL) {
return false;
}
bool sendUpdate = false;
uint32_t oldSkillValue, newSkillValue;
long double oldPercentToNextLevel, newPercentToNextLevel;
if (skill == SKILL_MAGLEVEL) {
uint64_t currReqMana = vocation->getReqMana(magLevel);
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
return false;
}
oldSkillValue = magLevel;
oldPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana;
g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, tries);
uint32_t currMagLevel = magLevel;
while ((manaSpent + tries) >= nextReqMana) {
tries -= nextReqMana - manaSpent;
magLevel++;
manaSpent = 0;
g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel);
sendUpdate = true;
currReqMana = nextReqMana;
nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
tries = 0;
break;
}
}
manaSpent += tries;
if (magLevel != currMagLevel) {
std::ostringstream ss;
ss << "You advanced to magic level " << magLevel << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint8_t newPercent;
if (nextReqMana > currReqMana) {
newPercent = Player::getPercentLevel(manaSpent, nextReqMana);
newPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana;
} else {
newPercent = 0;
newPercentToNextLevel = 0;
}
if (newPercent != magLevelPercent) {
magLevelPercent = newPercent;
sendUpdate = true;
}
newSkillValue = magLevel;
} else {
uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level);
uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
return false;
}
oldSkillValue = skills[skill].level;
oldPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries;
g_events->eventPlayerOnGainSkillTries(this, skill, tries);
uint32_t currSkillLevel = skills[skill].level;
while ((skills[skill].tries + tries) >= nextReqTries) {
tries -= nextReqTries - skills[skill].tries;
skills[skill].level++;
skills[skill].tries = 0;
skills[skill].percent = 0;
g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level);
sendUpdate = true;
currReqTries = nextReqTries;
nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
tries = 0;
break;
}
}
skills[skill].tries += tries;
if (currSkillLevel != skills[skill].level) {
std::ostringstream ss;
ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint8_t newPercent;
if (nextReqTries > currReqTries) {
newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries);
newPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries;
} else {
newPercent = 0;
newPercentToNextLevel = 0;
}
if (skills[skill].percent != newPercent) {
skills[skill].percent = newPercent;
sendUpdate = true;
}
newSkillValue = skills[skill].level;
}
if (sendUpdate) {
sendSkills();
}
std::ostringstream ss;
ss << std::fixed << std::setprecision(2) << "Your " << ucwords(getSkillName(skill)) << " skill changed from level " << oldSkillValue << " (with " << oldPercentToNextLevel << "% progress towards level " << (oldSkillValue + 1) << ") to level " << newSkillValue << " (with " << newPercentToNextLevel << "% progress towards level " << (newSkillValue + 1) << ')';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
return sendUpdate;
}
bool Player::hasModalWindowOpen(uint32_t modalWindowId) const
{
return find(modalWindows.begin(), modalWindows.end(), modalWindowId) != modalWindows.end();
}
void Player::onModalWindowHandled(uint32_t modalWindowId)
{
modalWindows.remove(modalWindowId);
}
void Player::sendModalWindow(const ModalWindow& modalWindow)
{
if (!client) {
return;
}
modalWindows.push_front(modalWindow.id);
client->sendModalWindow(modalWindow);
}
void Player::clearModalWindows()
{
modalWindows.clear();
}
uint16_t Player::getHelpers() const
{
uint16_t helpers;
if (guild && party) {
std::unordered_set<Player*> helperSet;
const auto& guildMembers = guild->getMembersOnline();
helperSet.insert(guildMembers.begin(), guildMembers.end());
const auto& partyMembers = party->getMembers();
helperSet.insert(partyMembers.begin(), partyMembers.end());
const auto& partyInvitees = party->getInvitees();
helperSet.insert(partyInvitees.begin(), partyInvitees.end());
helperSet.insert(party->getLeader());
helpers = helperSet.size();
} else if (guild) {
helpers = guild->getMembersOnline().size();
} else if (party) {
helpers = party->getMemberCount() + party->getInvitationCount() + 1;
} else {
helpers = 0;
}
return helpers;
}
void Player::sendClosePrivate(uint16_t channelId)
{
if (channelId == CHANNEL_GUILD || channelId == CHANNEL_PARTY) {
g_chat->removeUserFromChannel(*this, channelId);
}
if (client) {
client->sendClosePrivate(channelId);
}
}
uint64_t Player::getMoney() const
{
std::vector<const Container*> containers;
uint64_t moneyCount = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
Item* item = inventory[i];
if (!item) {
continue;
}
const Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
moneyCount += item->getWorth();
}
}
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (const Item* item : container->getItemList()) {
const Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
moneyCount += item->getWorth();
}
}
}
return moneyCount;
}
size_t Player::getMaxVIPEntries() const
{
if (group->maxVipEntries != 0) {
return group->maxVipEntries;
} else if (isPremium()) {
return 100;
}
return 20;
}
size_t Player::getMaxDepotItems() const
{
if (group->maxDepotItems != 0) {
return group->maxDepotItems;
} else if (isPremium()) {
return 2000;
}
return 1000;
}
std::forward_list<Condition*> Player::getMuteConditions() const
{
std::forward_list<Condition*> muteConditions;
for (Condition* condition : conditions) {
if (condition->getTicks() <= 0) {
continue;
}
ConditionType_t type = condition->getType();
if (type != CONDITION_MUTED && type != CONDITION_CHANNELMUTEDTICKS && type != CONDITION_YELLTICKS) {
continue;
}
muteConditions.push_front(condition);
}
return muteConditions;
}
void Player::setGuild(Guild* guild)
{
if (guild == this->guild) {
return;
}
Guild* oldGuild = this->guild;
this->guildNick.clear();
this->guild = nullptr;
this->guildRank = nullptr;
if (guild) {
const GuildRank* rank = guild->getRankByLevel(1);
if (!rank) {
return;
}
this->guild = guild;
this->guildRank = rank;
guild->addMember(this);
}
if (oldGuild) {
oldGuild->removeMember(this);
}
}
| 1 | 13,830 | Note that `hasAttacked` that is called before this function already performs this check. You probably don't need to check `hasAttacked` then. | otland-forgottenserver | cpp |
@@ -0,0 +1,10 @@
+// This object is imported into the documentation site. An example for the documentation site should be part of the pull request for the component. The object key is the kabob case of the "URL folder". In the case of `http://localhost:8080/components/app-launcher/`, `app-launcher` is the `key`. The folder name is created by `components.component` value in `package.json`. The following uses webpack's raw-loader plugin to get "text files" that will be eval()'d by CodeMirror within the documentation site on page load.
+
+/* eslint-env node */
+/* eslint-disable global-require */
+
+const siteStories = [
+ require('raw-loader!@salesforce/design-system-react/components/carousel/__examples__/default.jsx'),
+];
+
+module.exports = siteStories; | 1 | 1 | 12,138 | Unfortunately, the site example only has access to imports from `components/index.js` that means that you can't import the items file `carousel-items.js`. You can import that file in all the other storybook files though. | salesforce-design-system-react | js |
|
@@ -79,7 +79,7 @@ setup(
'locale/*/LC_MESSAGES/electrum.mo',
]
},
- scripts=['electrum'],
+ scripts=['electrum', 'privkey2electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin", | 1 | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=[
'pyaes>=0.1a1',
'ecdsa>=0.9',
'pbkdf2',
'requests',
'qrcode',
'protobuf',
'dnspython',
'jsonrpclib-pelix',
'PySocks>=1.6.6',
],
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="[email protected]",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
| 1 | 11,885 | Leave this out and move the script to the scripts folder. | spesmilo-electrum | py |
@@ -1656,7 +1656,7 @@ class TargetLocator {
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
- setParameter('name', nameOrHandle),
+ setParameter('handle', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const Session = require('./session').Session;
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|promise.Promise<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|promise.Promise<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Creates a new WebDriver client, which provides control over a browser.
*
* Every command.Command returns a {@link promise.Promise} that
* represents the result of that command. Callbacks may be registered on this
* object to manipulate the command result or catch an expected error. Any
* commands scheduled with a callback are considered sub-commands and will
* execute before the next command in the current frame. For example:
*
* var message = [];
* driver.call(message.push, message, 'a').then(function() {
* driver.call(message.push, message, 'b');
* });
* driver.call(message.push, message, 'c');
* driver.call(function() {
* alert('message is abc? ' + (message.join('') == 'abc'));
* });
*
*/
class WebDriver {
/**
* @param {!(Session|promise.Promise<!Session>)} session Either a
* known session or a promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
*/
constructor(session, executor, opt_flow) {
/** @private {!promise.Promise<!Session>} */
this.session_ = promise.fulfilled(session);
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {input.FileDetector} */
this.fileDetector_ = null;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(executor, capabilities, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
return new WebDriver(session, executor, flow);
}
/**
* @return {!promise.ControlFlow} The control flow used by this
* instance.
*/
controlFlow() {
return this.flow_;
}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {
var self = this;
checkHasNotQuit();
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(function() {
// A call to WebDriver.quit() may have been scheduled in the same event
// loop as this |command|, which would prevent us from detecting that the
// driver has quit above. Therefore, we need to make another quick check.
// We still check above so we can fail as early as possible.
checkHasNotQuit();
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(self, value));
}, description);
function checkHasNotQuit() {
if (!self.session_) {
throw new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be ' +
'used.');
}
}
}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {
return this.executor_;
}
/**
* @return {!promise.Promise<!Session>} A promise for this client's
* session.
*/
getSession() {
return this.session_;
}
/**
* @return {!promise.Promise<!Capabilities>} A promise
* that will resolve with the this instance's capabilities.
*/
getCapabilities() {
return this.session_.then(session => session.getCapabilities());
}
/**
* Schedules a command to quit the current session. After calling quit, this
* instance will be invalidated and may no longer be used to issue commands
* against the browser.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attemnpting to use a driver post-quit.
return result.finally(() => delete this.session_);
}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {
return new actions.ActionSequence(this);
}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {
return new actions.TouchSequence(this);
}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('[email protected]');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|promise.Promise<T>)} fn The function to
* execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Promise<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
let flow = this.flow_;
return flow.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisified. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisified the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(promise.Promise<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Promise<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @template T
*/
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!promise.Promise} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Promise.<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/**
* Schedules a command to close the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {
return this.navigate().to(url);
}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #isElementPresent} instead.
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Promise.<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Promise.<!Array.<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Promise<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {
return new Options(this);
}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {
return new Navigation(this);
}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Promise<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Promise<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
*/
implicitlyWait(ms) {
return this._scheduleCommand(ms, 'implicit', 'implicitlyWait');
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the script timeout has been set.
*/
setScriptTimeout(ms) {
return this._scheduleCommand(ms, 'script', 'setScriptTimeout');
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the timeout has been set.
*/
pageLoadTimeout(ms) {
return this._scheduleCommand(ms, 'page load', 'pageLoadTimeout');
}
_scheduleCommand(ms, timeoutIdentifier, timeoutName) {
return this.driver_.schedule(
new command.Command(command.Name.SET_TIMEOUT).
setParameter('type', timeoutIdentifier).
setParameter('ms', ms),
`WebDriver.manage().timeouts().${timeoutName}(${ms})`);
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Promise.<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Promise<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Promise.<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Promise<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
setParameter('name', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.id_ = promise.fulfilled(id);
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return promise.fulfilled(true);
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Promise<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this.getId());
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Promise<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the keysequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the keysequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analgous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctionation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.driver_.flow_.execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Promise<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Promise.<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Promise.<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dicted by the {@code disabled} attribute.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.Thenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Promise<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/** @override */
this.cancel = el.cancel.bind(el);
/** @override */
this.isPending = el.isPending.bind(el);
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/** @override */
this.finally = el.finally.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.Thenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.text_ = promise.fulfilled(text);
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Promise<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.Thenable.<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/** @override */
this.cancel = alert.cancel.bind(alert);
/** @override */
this.isPending = alert.isPending.bind(alert);
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/** @override */
this.finally = alert.finally.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.Thenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 13,587 | This should only be sent if the driver is speaking to a W3C conformant remote, so we need an if-condition check like we have in the Python bindings. | SeleniumHQ-selenium | java |
@@ -0,0 +1,17 @@
+using System.Collections.Generic;
+using System.IO.Pipelines;
+using System.Threading.Tasks;
+using Microsoft.AspNetCore.Http.Features;
+
+namespace Microsoft.AspNetCore.Connections
+{
+ public interface IConnection
+ {
+ IDuplexPipe Transport { get; }
+ IFeatureCollection Features { get; }
+
+ Task StartAsync();
+ Task StartAsync(TransferFormat transferFormat);
+ Task DisposeAsync();
+ }
+} | 1 | 1 | 15,380 | TransferFormat.Text in general purpose connection interface? :/ | aspnet-KestrelHttpServer | .cs |
|
@@ -36,10 +36,17 @@ module Selenium
SOCKET_LOCK_TIMEOUT = 45
STOP_TIMEOUT = 20
+ @executable = nil
+ @missing = ''
+
+ class << self
+ attr_accessor :executable, :missing_text
+ end
+
attr_accessor :host
def initialize(executable_path, port, *extra_args)
- @executable_path = executable_path
+ @executable_path = binary_path(executable_path)
@host = Platform.localhost
@port = Integer(port)
@extra_args = extra_args | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
#
# Base class implementing default behavior of service object,
# responsible for starting and stopping driver implementations.
#
# Subclasses must implement the following private methods:
# * #start_process
# * #stop_server
# * #cannot_connect_error_text
#
# @api private
#
class Service
START_TIMEOUT = 20
SOCKET_LOCK_TIMEOUT = 45
STOP_TIMEOUT = 20
attr_accessor :host
def initialize(executable_path, port, *extra_args)
@executable_path = executable_path
@host = Platform.localhost
@port = Integer(port)
@extra_args = extra_args
raise Error::WebDriverError, "invalid port: #{@port}" if @port < 1
end
def start
if process_running?
raise "already started: #{uri.inspect} #{@executable_path.inspect}"
end
Platform.exit_hook { stop } # make sure we don't leave the server running
socket_lock.locked do
find_free_port
start_process
connect_until_stable
end
end
def stop
stop_server
@process.poll_for_exit STOP_TIMEOUT
rescue ChildProcess::TimeoutError
ensure
stop_process
end
def uri
@uri ||= URI.parse("http://#{@host}:#{@port}")
end
private
def connect_to_server
Net::HTTP.start(@host, @port) do |http|
http.open_timeout = STOP_TIMEOUT / 2
http.read_timeout = STOP_TIMEOUT / 2
yield http
end
end
def find_free_port
@port = PortProber.above(@port)
end
def start_process
raise NotImplementedError, 'subclass responsibility'
end
def stop_process
return if process_exited?
@process.stop STOP_TIMEOUT
end
def stop_server
connect_to_server { |http| http.get('/shutdown') }
end
def process_running?
@process && @process.alive?
end
def process_exited?
@process.nil? || @process.exited?
end
def connect_until_stable
socket_poller = SocketPoller.new @host, @port, START_TIMEOUT
return if socket_poller.connected?
raise Error::WebDriverError, cannot_connect_error_text
end
def cannot_connect_error_text
raise NotImplementedError, 'subclass responsibility'
end
def socket_lock
@socket_lock ||= SocketLock.new(@port - 1, SOCKET_LOCK_TIMEOUT)
end
end # Service
end # WebDriver
end # Selenium
| 1 | 13,879 | Is this `@missing_text` ? | SeleniumHQ-selenium | py |
@@ -485,6 +485,12 @@ func (c *client) processRouteInfo(info *Info) {
if !s.getOpts().Cluster.NoAdvertise {
s.addClientConnectURLsAndSendINFOToClients(info.ClientConnectURLs)
}
+
+ // This will allow us to determine the initial RTT without having to
+ // wait for first timer based PING.
+ c.mu.Lock()
+ c.sendPing()
+ c.mu.Unlock()
} else {
c.Debugf("Detected duplicate remote route %q", info.ID)
c.closeConnection(DuplicateRoute) | 1 | // Copyright 2013-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/url"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
)
// RouteType designates the router type
type RouteType int
// Type of Route
const (
// This route we learned from speaking to other routes.
Implicit RouteType = iota
// This route was explicitly configured.
Explicit
)
const (
// RouteProtoZero is the original Route protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
RouteProtoZero = iota
// RouteProtoInfo signals a route can receive more then the original INFO block.
// This can be used to update remote cluster permissions, etc...
RouteProtoInfo
// RouteProtoV2 is the new route/cluster protocol that provides account support.
RouteProtoV2
)
// Include the space for the proto
var (
aSubBytes = []byte{'A', '+', ' '}
aUnsubBytes = []byte{'A', '-', ' '}
rSubBytes = []byte{'R', 'S', '+', ' '}
rUnsubBytes = []byte{'R', 'S', '-', ' '}
)
// Used by tests
var testRouteProto = RouteProtoV2
type route struct {
remoteID string
didSolicit bool
retry bool
routeType RouteType
url *url.URL
authRequired bool
tlsRequired bool
connectURLs []string
replySubs map[*subscription]*time.Timer
gatewayURL string
leafnodeURL string
}
type connectInfo struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Name string `json:"name"`
Gateway string `json:"gateway,omitempty"`
}
// Route protocol constants
const (
ConProto = "CONNECT %s" + _CRLF_
InfoProto = "INFO %s" + _CRLF_
)
// Used to decide if the sending of the route SUBs list should be
// done in place or in separate go routine.
const sendRouteSubsInGoRoutineThreshold = 1024 * 1024 // 1MB
// Warning when user configures cluster TLS insecure
const clusterTLSInsecureWarning = "TLS certificate chain and hostname of solicited routes will not be verified. DO NOT USE IN PRODUCTION!"
// Can be changed for tests
var routeConnectDelay = DEFAULT_ROUTE_CONNECT
// This will add a timer to watch over remote reply subjects in case
// they fail to receive a response. The duration will be taken from the
// accounts map timeout to match.
// Lock should be held upon entering.
func (c *client) addReplySubTimeout(acc *Account, sub *subscription, d time.Duration) {
if c.route.replySubs == nil {
c.route.replySubs = make(map[*subscription]*time.Timer)
}
rs := c.route.replySubs
rs[sub] = time.AfterFunc(d, func() {
c.mu.Lock()
delete(rs, sub)
sub.max = 0
c.mu.Unlock()
c.unsubscribe(acc, sub, true, true)
})
}
// removeReplySub is called when we trip the max on remoteReply subs.
func (c *client) removeReplySub(sub *subscription) {
if sub == nil {
return
}
// Lookup the account based on sub.sid.
if i := bytes.Index(sub.sid, []byte(" ")); i > 0 {
// First part of SID for route is account name.
if acc, _ := c.srv.LookupAccount(string(sub.sid[:i])); acc != nil {
acc.sl.Remove(sub)
}
c.mu.Lock()
c.removeReplySubTimeout(sub)
delete(c.subs, string(sub.sid))
c.mu.Unlock()
}
}
// removeReplySubTimeout will remove a timer if it exists.
// Lock should be held upon entering.
func (c *client) removeReplySubTimeout(sub *subscription) {
// Remove any reply sub timer if it exists.
if c.route == nil || c.route.replySubs == nil {
return
}
if t, ok := c.route.replySubs[sub]; ok {
t.Stop()
delete(c.route.replySubs, sub)
}
}
func (c *client) processAccountSub(arg []byte) error {
c.traceInOp("A+", arg)
accName := string(arg)
if c.kind == GATEWAY {
return c.processGatewayAccountSub(accName)
}
return nil
}
func (c *client) processAccountUnsub(arg []byte) {
c.traceInOp("A-", arg)
accName := string(arg)
if c.kind == GATEWAY {
c.processGatewayAccountUnsub(accName)
}
}
// Process an inbound RMSG specification from the remote route.
func (c *client) processRoutedMsgArgs(trace bool, arg []byte) error {
if trace {
c.traceInOp("RMSG", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 0, 1, 2:
return fmt.Errorf("processRoutedMsgArgs Parse Error: '%s'", args)
case 3:
c.pa.reply = nil
c.pa.queues = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
case 4:
c.pa.reply = args[2]
c.pa.queues = nil
c.pa.szb = args[3]
c.pa.size = parseSize(args[3])
default:
// args[2] is our reply indicator. Should be + or | normally.
if len(args[2]) != 1 {
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Reply Indicator: '%s'", args[2])
}
switch args[2][0] {
case '+':
c.pa.reply = args[3]
case '|':
c.pa.reply = nil
default:
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Reply Indicator: '%s'", args[2])
}
// Grab size.
c.pa.szb = args[len(args)-1]
c.pa.size = parseSize(c.pa.szb)
// Grab queue names.
if c.pa.reply != nil {
c.pa.queues = args[4 : len(args)-1]
} else {
c.pa.queues = args[3 : len(args)-1]
}
}
if c.pa.size < 0 {
return fmt.Errorf("processRoutedMsgArgs Bad or Missing Size: '%s'", args)
}
// Common ones processed after check for arg length
c.pa.account = args[0]
c.pa.subject = args[1]
c.pa.pacache = arg[:len(args[0])+len(args[1])+1]
return nil
}
// processInboundRouteMsg is called to process an inbound msg from a route.
func (c *client) processInboundRoutedMsg(msg []byte) {
// Update statistics
c.in.msgs++
// The msg includes the CR_LF, so pull back out for accounting.
c.in.bytes += int32(len(msg) - LEN_CR_LF)
if c.trace {
c.traceMsg(msg)
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil {
return
}
acc, r := c.getAccAndResultFromCache()
if acc == nil {
c.Debugf("Unknown account %q for routed message on subject: %q", c.pa.account, c.pa.subject)
return
}
// Check to see if we need to map/route to another account.
if acc.imports.services != nil {
c.checkForImportServices(acc, msg)
}
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) == 0 {
return
}
// Check to see if we have a routed message with a service reply.
if isServiceReply(c.pa.reply) && acc != nil {
// Need to add a sub here for local interest to send a response back
// to the originating server/requestor where it will be re-mapped.
sid := make([]byte, 0, len(acc.Name)+len(c.pa.reply)+1)
sid = append(sid, acc.Name...)
sid = append(sid, ' ')
sid = append(sid, c.pa.reply...)
// Copy off the reply since otherwise we are referencing a buffer that will be reused.
reply := make([]byte, len(c.pa.reply))
copy(reply, c.pa.reply)
sub := &subscription{client: c, subject: reply, sid: sid, max: 1}
if err := acc.sl.Insert(sub); err != nil {
c.Errorf("Could not insert subscription: %v", err)
} else {
ttl := acc.AutoExpireTTL()
c.mu.Lock()
c.subs[string(sid)] = sub
c.addReplySubTimeout(acc, sub, ttl)
c.mu.Unlock()
}
}
c.processMsgResults(acc, r, msg, c.pa.subject, c.pa.reply, pmrNoFlag)
}
// Helper function for routes and gateways and leafnodes to create qfilters
// needed for converted subs from imports, etc.
func (c *client) makeQFilter(qsubs [][]*subscription) {
qs := make([][]byte, 0, len(qsubs))
for _, qsub := range qsubs {
if len(qsub) > 0 {
qs = append(qs, qsub[0].queue)
}
}
c.pa.queues = qs
}
// Lock should be held entering here.
func (c *client) sendRouteConnect(tlsRequired bool) {
var user, pass string
if userInfo := c.route.url.User; userInfo != nil {
user = userInfo.Username()
pass, _ = userInfo.Password()
}
cinfo := connectInfo{
Echo: true,
Verbose: false,
Pedantic: false,
User: user,
Pass: pass,
TLS: tlsRequired,
Name: c.srv.info.ID,
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection(ProtocolViolation)
return
}
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
}
// Process the info message if we are a route.
func (c *client) processRouteInfo(info *Info) {
// We may need to update route permissions and will need the account
// sublist. Since getting the account requires server lock, do the
// lookup now.
// FIXME(dlc) - Add account scoping.
gacc := c.srv.globalAccount()
gacc.mu.RLock()
sl := gacc.sl
gacc.mu.RUnlock()
c.mu.Lock()
// Connection can be closed at any time (by auth timeout, etc).
// Does not make sense to continue here if connection is gone.
if c.route == nil || c.nc == nil {
c.mu.Unlock()
return
}
s := c.srv
remoteID := c.route.remoteID
// Check if this is an INFO for gateways...
if info.Gateway != "" {
c.mu.Unlock()
// If this server has no gateway configured, report error and return.
if !s.gateway.enabled {
// FIXME: Should this be a Fatalf()?
s.Errorf("Received information about gateway %q from %s, but gateway is not configured",
info.Gateway, remoteID)
return
}
s.processGatewayInfoFromRoute(info, remoteID, c)
return
}
// We receive an INFO from a server that informs us about another server,
// so the info.ID in the INFO protocol does not match the ID of this route.
if remoteID != "" && remoteID != info.ID {
c.mu.Unlock()
// Process this implicit route. We will check that it is not an explicit
// route and/or that it has not been connected already.
s.processImplicitRoute(info)
return
}
// Need to set this for the detection of the route to self to work
// in closeConnection().
c.route.remoteID = info.ID
// Get the route's proto version
c.opts.Protocol = info.Proto
// Detect route to self.
if c.route.remoteID == s.info.ID {
c.mu.Unlock()
c.closeConnection(DuplicateRoute)
return
}
// Copy over important information.
c.route.authRequired = info.AuthRequired
c.route.tlsRequired = info.TLSRequired
c.route.gatewayURL = info.GatewayURL
// When sent through route INFO, if the field is set, it should be of size 1.
if len(info.LeafNodeURLs) == 1 {
c.route.leafnodeURL = info.LeafNodeURLs[0]
}
// If this is an update due to config reload on the remote server,
// need to possibly send local subs to the remote server.
if c.flags.isSet(infoReceived) {
c.updateRemoteRoutePerms(sl, info)
c.mu.Unlock()
return
}
// Copy over permissions as well.
c.opts.Import = info.Import
c.opts.Export = info.Export
// If we do not know this route's URL, construct one on the fly
// from the information provided.
if c.route.url == nil {
// Add in the URL from host and port
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
if err != nil {
c.Errorf("Error parsing URL from INFO: %v\n", err)
c.mu.Unlock()
c.closeConnection(ParseError)
return
}
c.route.url = url
}
// Mark that the INFO protocol has been received. Will allow
// to detect INFO updates.
c.flags.set(infoReceived)
// Check to see if we have this remote already registered.
// This can happen when both servers have routes to each other.
c.mu.Unlock()
if added, sendInfo := s.addRoute(c, info); added {
c.Debugf("Registering remote route %q", info.ID)
// Send our subs to the other side.
s.sendSubsToRoute(c)
// Send info about the known gateways to this route.
s.sendGatewayConfigsToRoute(c)
// sendInfo will be false if the route that we just accepted
// is the only route there is.
if sendInfo {
// The incoming INFO from the route will have IP set
// if it has Cluster.Advertise. In that case, use that
// otherwise contruct it from the remote TCP address.
if info.IP == "" {
// Need to get the remote IP address.
c.mu.Lock()
switch conn := c.nc.(type) {
case *net.TCPConn, *tls.Conn:
addr := conn.RemoteAddr().(*net.TCPAddr)
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(),
strconv.Itoa(info.Port)))
default:
info.IP = c.route.url.String()
}
c.mu.Unlock()
}
// Now let the known servers know about this new route
s.forwardNewRouteInfoToKnownServers(info)
}
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !s.getOpts().Cluster.NoAdvertise {
s.addClientConnectURLsAndSendINFOToClients(info.ClientConnectURLs)
}
} else {
c.Debugf("Detected duplicate remote route %q", info.ID)
c.closeConnection(DuplicateRoute)
}
}
// Possibly sends local subscriptions interest to this route
// based on changes in the remote's Export permissions.
// Lock assumed held on entry
func (c *client) updateRemoteRoutePerms(sl *Sublist, info *Info) {
// Interested only on Export permissions for the remote server.
// Create "fake" clients that we will use to check permissions
// using the old permissions...
oldPerms := &RoutePermissions{Export: c.opts.Export}
oldPermsTester := &client{}
oldPermsTester.setRoutePermissions(oldPerms)
// and the new ones.
newPerms := &RoutePermissions{Export: info.Export}
newPermsTester := &client{}
newPermsTester.setRoutePermissions(newPerms)
c.opts.Import = info.Import
c.opts.Export = info.Export
var (
_localSubs [4096]*subscription
localSubs = _localSubs[:0]
)
sl.localSubs(&localSubs)
c.sendRouteSubProtos(localSubs, false, func(sub *subscription) bool {
subj := string(sub.subject)
// If the remote can now export but could not before, and this server can import this
// subject, then send SUB protocol.
if newPermsTester.canExport(subj) && !oldPermsTester.canExport(subj) && c.canImport(subj) {
return true
}
return false
})
}
// sendAsyncInfoToClients sends an INFO protocol to all
// connected clients that accept async INFO updates.
// The server lock is held on entry.
func (s *Server) sendAsyncInfoToClients() {
// If there are no clients supporting async INFO protocols, we are done.
// Also don't send if we are shutting down...
if s.cproto == 0 || s.shutdown {
return
}
for _, c := range s.clients {
c.mu.Lock()
// Here, we are going to send only to the clients that are fully
// registered (server has received CONNECT and first PING). For
// clients that are not at this stage, this will happen in the
// processing of the first PING (see client.processPing)
if c.opts.Protocol >= ClientProtoInfo && c.flags.isSet(firstPongSent) {
// sendInfo takes care of checking if the connection is still
// valid or not, so don't duplicate tests here.
c.sendInfo(c.generateClientInfoJSON(s.copyInfo()))
}
c.mu.Unlock()
}
}
// This will process implicit route information received from another server.
// We will check to see if we have configured or are already connected,
// and if so we will ignore. Otherwise we will attempt to connect.
func (s *Server) processImplicitRoute(info *Info) {
remoteID := info.ID
s.mu.Lock()
defer s.mu.Unlock()
// Don't connect to ourself
if remoteID == s.info.ID {
return
}
// Check if this route already exists
if _, exists := s.remotes[remoteID]; exists {
return
}
// Check if we have this route as a configured route
if s.hasThisRouteConfigured(info) {
return
}
// Initiate the connection, using info.IP instead of info.URL here...
r, err := url.Parse(info.IP)
if err != nil {
s.Errorf("Error parsing URL from INFO: %v\n", err)
return
}
// Snapshot server options.
opts := s.getOpts()
if info.AuthRequired {
r.User = url.UserPassword(opts.Cluster.Username, opts.Cluster.Password)
}
s.startGoRoutine(func() { s.connectToRoute(r, false, true) })
}
// hasThisRouteConfigured returns true if info.Host:info.Port is present
// in the server's opts.Routes, false otherwise.
// Server lock is assumed to be held by caller.
func (s *Server) hasThisRouteConfigured(info *Info) bool {
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
for _, ri := range s.getOpts().Routes {
if strings.ToLower(ri.Host) == urlToCheckExplicit {
return true
}
}
return false
}
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
// to all routes known by this server. In turn, each server will contact this
// new route.
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
s.mu.Lock()
defer s.mu.Unlock()
b, _ := json.Marshal(info)
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
for _, r := range s.routes {
r.mu.Lock()
if r.route.remoteID != info.ID {
r.sendInfo(infoJSON)
}
r.mu.Unlock()
}
}
// canImport is whether or not we will send a SUB for interest to the other side.
// This is for ROUTER connections only.
// Lock is held on entry.
func (c *client) canImport(subject string) bool {
// Use pubAllowed() since this checks Publish permissions which
// is what Import maps to.
return c.pubAllowedFullCheck(subject, false)
}
// canExport is whether or not we will accept a SUB from the remote for a given subject.
// This is for ROUTER connections only.
// Lock is held on entry
func (c *client) canExport(subject string) bool {
// Use canSubscribe() since this checks Subscribe permissions which
// is what Export maps to.
return c.canSubscribe(subject)
}
// Initialize or reset cluster's permissions.
// This is for ROUTER connections only.
// Client lock is held on entry
func (c *client) setRoutePermissions(perms *RoutePermissions) {
// Reset if some were set
if perms == nil {
c.perms = nil
c.mperms = nil
return
}
// Convert route permissions to user permissions.
// The Import permission is mapped to Publish
// and Export permission is mapped to Subscribe.
// For meaning of Import/Export, see canImport and canExport.
p := &Permissions{
Publish: perms.Import,
Subscribe: perms.Export,
}
c.setPermissions(p)
}
// Type used to hold a list of subs on a per account basis.
type asubs struct {
acc *Account
subs []*subscription
}
// removeRemoteSubs will walk the subs and remove them from the appropriate account.
func (c *client) removeRemoteSubs() {
// We need to gather these on a per account basis.
// FIXME(dlc) - We should be smarter about this..
as := map[string]*asubs{}
c.mu.Lock()
srv := c.srv
subs := c.subs
c.subs = make(map[string]*subscription)
c.mu.Unlock()
for key, sub := range subs {
c.mu.Lock()
sub.max = 0
c.mu.Unlock()
// Grab the account
accountName := strings.Fields(key)[0]
ase := as[accountName]
if ase == nil {
acc, _ := srv.LookupAccount(accountName)
if acc == nil {
continue
}
as[accountName] = &asubs{acc: acc, subs: []*subscription{sub}}
} else {
ase.subs = append(ase.subs, sub)
}
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(accountName, sub, -1)
}
}
// Now remove the subs by batch for each account sublist.
for _, ase := range as {
c.Debugf("Removing %d subscriptions for account %q", len(ase.subs), ase.acc.Name)
ase.acc.sl.RemoveBatch(ase.subs)
}
}
func (c *client) parseUnsubProto(arg []byte) (string, []byte, []byte, error) {
c.traceInOp("RS-", arg)
// Indicate any activity, so pub and sub or unsubs.
c.in.subs++
args := splitArg(arg)
var queue []byte
switch len(args) {
case 2:
case 3:
queue = args[2]
default:
return "", nil, nil, fmt.Errorf("parse error: '%s'", arg)
}
return string(args[0]), args[1], queue, nil
}
// Indicates no more interest in the given account/subject for the remote side.
func (c *client) processRemoteUnsub(arg []byte) (err error) {
srv := c.srv
if srv == nil {
return nil
}
accountName, subject, _, err := c.parseUnsubProto(arg)
if err != nil {
return fmt.Errorf("processRemoteUnsub %s", err.Error())
}
// Lookup the account
acc, _ := c.srv.LookupAccount(accountName)
if acc == nil {
c.Debugf("Unknown account %q for subject %q", accountName, subject)
// Mark this account as not interested since we received a RS- and we
// do not have any record of it.
return nil
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
updateGWs := false
// We store local subs by account and subject and optionally queue name.
// RS- will have the arg exactly as the key.
key := string(arg)
sub, ok := c.subs[key]
if ok {
delete(c.subs, key)
acc.sl.Remove(sub)
c.removeReplySubTimeout(sub)
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if updateGWs {
srv.gatewayUpdateSubInterest(accountName, sub, -1)
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
if c.opts.Verbose {
c.sendOK()
}
return nil
}
func (c *client) processRemoteSub(argo []byte) (err error) {
c.traceInOp("RS+", argo)
// Indicate activity.
c.in.subs++
srv := c.srv
if srv == nil {
return nil
}
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.queue = nil
case 4:
sub.queue = args[2]
sub.qw = int32(parseSize(args[3]))
default:
return fmt.Errorf("processRemoteSub Parse Error: '%s'", arg)
}
sub.subject = args[1]
// Lookup the account
// FIXME(dlc) - This may start having lots of contention?
accountName := string(args[0])
acc, _ := c.srv.LookupAccount(accountName)
if acc == nil {
if !srv.NewAccountsAllowed() {
c.Debugf("Unknown account %q for subject %q", accountName, sub.subject)
return nil
}
acc, _ = srv.LookupOrRegisterAccount(accountName)
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Check permissions if applicable.
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
c.Debugf("Can not export %q, ignoring remote subscription request", sub.subject)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// We store local subs by account and subject and optionally queue name.
// If we have a queue it will have a trailing weight which we do not want.
if sub.queue != nil {
sub.sid = arg[:len(arg)-len(args[3])-1]
} else {
sub.sid = arg
}
key := string(sub.sid)
osub := c.subs[key]
updateGWs := false
if osub == nil {
c.subs[key] = sub
// Now place into the account sl.
if err = acc.sl.Insert(sub); err != nil {
delete(c.subs, key)
c.mu.Unlock()
c.Errorf("Could not insert subscription: %v", err)
c.sendErr("Invalid Subscription")
return nil
}
updateGWs = srv.gateway.enabled
} else if sub.queue != nil {
// For a queue we need to update the weight.
atomic.StoreInt32(&osub.qw, sub.qw)
acc.sl.UpdateRemoteQSub(osub)
}
c.mu.Unlock()
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, 1)
if c.opts.Verbose {
c.sendOK()
}
return nil
}
// sendSubsToRoute will send over our subject interest to
// the remote side. For each account we will send the
// complete interest for all subjects, both normal as a binary
// and queue group weights.
func (s *Server) sendSubsToRoute(route *client) {
s.mu.Lock()
// Estimated size of all protocols. It does not have to be accurate at all.
eSize := 0
// Send over our account subscriptions.
// copy accounts into array first
accs := make([]*Account, 0, 32)
s.accounts.Range(func(k, v interface{}) bool {
a := v.(*Account)
accs = append(accs, a)
a.mu.RLock()
// Proto looks like: "RS+ <account name> <subject>[ <queue weight>]\r\n"
// If we wanted to have better estimates (or even accurate), we would
// collect the subs here instead of capturing the accounts and then
// later going over each account.
eSize += len(a.rm) * (4 + len(a.Name) + 256)
a.mu.RUnlock()
return true
})
s.mu.Unlock()
sendSubs := func(accs []*Account) {
var raw [32]*subscription
var closed bool
route.mu.Lock()
for _, a := range accs {
subs := raw[:0]
a.mu.RLock()
c := a.randomClient()
if c == nil {
nsubs := len(a.rm)
accName := a.Name
a.mu.RUnlock()
if nsubs > 0 {
route.Warnf("Ignoring account %q with %d subs, no clients", accName, nsubs)
}
continue
}
for key, n := range a.rm {
// FIXME(dlc) - Just pass rme around.
// Construct a sub on the fly. We need to place
// a client (or im) to properly set the account.
var subj, qn []byte
s := strings.Split(key, " ")
subj = []byte(s[0])
if len(s) > 1 {
qn = []byte(s[1])
}
// TODO(dlc) - This code needs to change, but even if left alone could be more
// efficient with these tmp subs.
sub := &subscription{client: c, subject: subj, queue: qn, qw: n}
subs = append(subs, sub)
}
a.mu.RUnlock()
closed = route.sendRouteSubProtos(subs, false, route.importFilter)
if closed {
route.mu.Unlock()
return
}
}
route.mu.Unlock()
if !closed {
route.Debugf("Sent local subscriptions to route")
}
}
// Decide if we call above function in go routine or in place.
if eSize > sendRouteSubsInGoRoutineThreshold {
s.startGoRoutine(func() {
sendSubs(accs)
s.grWG.Done()
})
} else {
sendSubs(accs)
}
}
// Sends SUBs protocols for the given subscriptions. If a filter is specified, it is
// invoked for each subscription. If the filter returns false, the subscription is skipped.
// This function may release the route's lock due to flushing of outbound data. A boolean
// is returned to indicate if the connection has been closed during this call.
// Lock is held on entry.
func (c *client) sendRouteSubProtos(subs []*subscription, trace bool, filter func(sub *subscription) bool) bool {
return c.sendRouteSubOrUnSubProtos(subs, true, trace, filter)
}
// Sends UNSUBs protocols for the given subscriptions. If a filter is specified, it is
// invoked for each subscription. If the filter returns false, the subscription is skipped.
// This function may release the route's lock due to flushing of outbound data. A boolean
// is returned to indicate if the connection has been closed during this call.
// Lock is held on entry.
func (c *client) sendRouteUnSubProtos(subs []*subscription, trace bool, filter func(sub *subscription) bool) bool {
return c.sendRouteSubOrUnSubProtos(subs, false, trace, filter)
}
// Low-level function that sends RS+ or RS- protocols for the given subscriptions.
// Use sendRouteSubProtos or sendRouteUnSubProtos instead for clarity.
// Lock is held on entry.
func (c *client) sendRouteSubOrUnSubProtos(subs []*subscription, isSubProto, trace bool, filter func(sub *subscription) bool) bool {
var (
_buf [1024]byte // array on stack
buf = _buf[:0] // our buffer will initially point to the stack buffer
mbs = maxBufSize * 2 // max size of the buffer
mpMax = int(c.out.mp / 2) // 50% of max_pending
closed bool
)
// We need to make sure that we stay below the user defined max pending bytes.
if mbs > mpMax {
mbs = mpMax
}
for _, sub := range subs {
if filter != nil && !filter(sub) {
continue
}
// Determine the account. If sub has an ImportMap entry, use that, otherwise scoped to
// client. Default to global if all else fails.
var accName string
if sub.client != nil && sub.client != c {
sub.client.mu.Lock()
}
if sub.im != nil {
accName = sub.im.acc.Name
} else if sub.client != nil && sub.client.acc != nil {
accName = sub.client.acc.Name
} else {
c.Debugf("Falling back to default account for sending subs")
accName = globalAccountName
}
if sub.client != nil && sub.client != c {
sub.client.mu.Unlock()
}
// Check if proto is going to fit.
curSize := len(buf)
// "RS+/- " + account + " " + subject + " " [+ queue + " " + weight] + CRLF
curSize += 4 + len(accName) + 1 + len(sub.subject) + 1 + 2
if len(sub.queue) > 0 {
curSize += len(sub.queue)
if isSubProto {
// Estimate weightlen in 1000s
curSize += 1 + 4
}
}
if curSize >= mbs {
if c.queueOutbound(buf) {
// Need to allocate new array
buf = make([]byte, 0, mbs)
} else {
// We can reuse previous buffer
buf = buf[:0]
}
// Update last activity because flushOutbound() will release
// the lock, which could cause pingTimer to think that this
// connection is stale otherwise.
c.last = time.Now()
if !c.flushOutbound() {
// Another go-routine has set this and is either
// doing the write or waiting to re-acquire the
// lock post write. Release lock to give it a
// chance to complete.
c.mu.Unlock()
runtime.Gosched()
c.mu.Lock()
}
if closed = c.flags.isSet(clearConnection); closed {
break
}
}
as := len(buf)
if isSubProto {
buf = append(buf, rSubBytes...)
} else {
buf = append(buf, rUnsubBytes...)
}
buf = append(buf, accName...)
buf = append(buf, ' ')
buf = append(buf, sub.subject...)
if len(sub.queue) > 0 {
buf = append(buf, ' ')
buf = append(buf, sub.queue...)
// Send our weight if we are a sub proto
if isSubProto {
buf = append(buf, ' ')
var b [12]byte
var i = len(b)
for l := sub.qw; l > 0; l /= 10 {
i--
b[i] = digits[l%10]
}
buf = append(buf, b[i:]...)
}
}
if trace {
c.traceOutOp("", buf[as:])
}
buf = append(buf, CR_LF...)
}
if !closed && len(buf) > 0 {
c.queueOutbound(buf)
c.flushOutbound()
closed = c.flags.isSet(clearConnection)
}
return closed
}
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
// Snapshot server options.
opts := s.getOpts()
didSolicit := rURL != nil
r := &route{didSolicit: didSolicit}
for _, route := range opts.Routes {
if rURL != nil && (strings.EqualFold(rURL.Host, route.Host)) {
r.routeType = Explicit
}
}
c := &client{srv: s, nc: conn, opts: clientOpts{}, kind: ROUTER, msubs: -1, mpay: -1, route: r}
// Grab server variables
s.mu.Lock()
s.generateRouteInfoJSON()
infoJSON := s.routeInfoJSON
authRequired := s.routeInfo.AuthRequired
tlsRequired := s.routeInfo.TLSRequired
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
if didSolicit {
// Do this before the TLS code, otherwise, in case of failure
// and if route is explicit, it would try to reconnect to 'nil'...
r.url = rURL
}
// Check for TLS
if tlsRequired {
// Copy off the config to add in ServerName if we need to.
tlsConfig := opts.Cluster.TLSConfig.Clone()
// If we solicited, we will act like the client, otherwise the server.
if didSolicit {
c.Debugf("Starting TLS route client handshake")
// Specify the ServerName we are expecting.
host, _, _ := net.SplitHostPort(rURL.Host)
tlsConfig.ServerName = host
c.nc = tls.Client(c.nc, tlsConfig)
} else {
c.Debugf("Starting TLS route server handshake")
c.nc = tls.Server(c.nc, tlsConfig)
}
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.Cluster.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS route handshake error: %v", err)
c.sendErr("Secure Connection - TLS Required")
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Verify that the connection did not go away while we released the lock.
if c.nc == nil {
c.mu.Unlock()
return nil
}
}
// Do final client initialization
// Initialize the per-account cache.
c.in.pacache = make(map[string]*perAccountCache)
if didSolicit {
// Set permissions associated with the route user (if applicable).
// No lock needed since we are already under client lock.
c.setRoutePermissions(opts.Cluster.Permissions)
}
// Set the Ping timer
c.setPingTimer()
// For routes, the "client" is added to s.routes only when processing
// the INFO protocol, that is much later.
// In the meantime, if the server shutsdown, there would be no reference
// to the client (connection) to be closed, leaving this readLoop
// uinterrupted, causing the Shutdown() to wait indefinitively.
// We need to store the client in a special map, under a special lock.
if !s.addToTempClients(c.cid, c) {
c.mu.Unlock()
c.setNoReconnect()
c.closeConnection(ServerShutdown)
return nil
}
// Check for Auth required state for incoming connections.
// Make sure to do this before spinning up readLoop.
if authRequired && !didSolicit {
ttl := secondsToDuration(opts.Cluster.AuthTimeout)
c.setAuthTimer(ttl)
}
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
// Queue Connect proto if we solicited the connection.
if didSolicit {
c.Debugf("Route connect msg sent")
c.sendRouteConnect(tlsRequired)
}
// Send our info to the other side.
// Our new version requires dynamic information for accounts and a nonce.
c.sendInfo(infoJSON)
c.mu.Unlock()
c.Noticef("Route connection created")
return c
}
const (
_CRLF_ = "\r\n"
_EMPTY_ = ""
)
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
id := c.route.remoteID
sendInfo := false
s.mu.Lock()
if !s.running {
s.mu.Unlock()
return false, false
}
remote, exists := s.remotes[id]
if !exists {
s.routes[c.cid] = c
s.remotes[id] = c
c.mu.Lock()
c.route.connectURLs = info.ClientConnectURLs
cid := c.cid
c.mu.Unlock()
// Now that we have registered the route, we can remove from the temp map.
s.removeFromTempClients(cid)
// we don't need to send if the only route is the one we just accepted.
sendInfo = len(s.routes) > 1
// If the INFO contains a Gateway URL, add it to the list for our cluster.
if info.GatewayURL != "" {
s.addGatewayURL(info.GatewayURL)
}
// Add the remote's leafnodeURL to our list of URLs and send the update
// to all LN connections. (Note that when coming from a route, LeafNodeURLs
// is an array of size 1 max).
if len(info.LeafNodeURLs) == 1 && s.addLeafNodeURL(info.LeafNodeURLs[0]) {
s.sendAsyncLeafNodeInfo()
}
}
s.mu.Unlock()
if exists {
var r *route
c.mu.Lock()
// upgrade to solicited?
if c.route.didSolicit {
// Make a copy
rs := *c.route
r = &rs
}
// Since this duplicate route is going to be removed, make sure we clear
// c.route.leafnodeURL, otherwise, when processing the disconnect, this
// would cause the leafnode URL for that remote server to be removed
// from our list.
c.route.leafnodeURL = _EMPTY_
c.mu.Unlock()
remote.mu.Lock()
// r will be not nil if c.route.didSolicit was true
if r != nil {
// If we upgrade to solicited, we still want to keep the remote's
// connectURLs. So transfer those.
r.connectURLs = remote.route.connectURLs
remote.route = r
}
// This is to mitigate the issue where both sides add the route
// on the opposite connection, and therefore end-up with both
// connections being dropped.
remote.route.retry = true
remote.mu.Unlock()
}
return !exists, sendInfo
}
// Import filter check.
func (c *client) importFilter(sub *subscription) bool {
return c.canImport(string(sub.subject))
}
// updateRouteSubscriptionMap will make sure to update the route map for the subscription. Will
// also forward to all routes if needed.
func (s *Server) updateRouteSubscriptionMap(acc *Account, sub *subscription, delta int32) {
if acc == nil || sub == nil {
return
}
// We only store state on local subs for transmission across all other routes.
if sub.client == nil || (sub.client.kind != CLIENT && sub.client.kind != SYSTEM && sub.client.kind != LEAF) {
return
}
// Copy to hold outside acc lock.
var n int32
var ok bool
acc.mu.Lock()
// This is non-nil when we know we are in cluster mode.
rm, lqws := acc.rm, acc.lqws
if rm == nil {
acc.mu.Unlock()
return
}
// Create the fast key which will use the subject or 'subject<spc>queue' for queue subscribers.
key := keyFromSub(sub)
isq := len(sub.queue) > 0
// Decide whether we need to send an update out to all the routes.
update := isq
// This is where we do update to account. For queues we need to take
// special care that this order of updates is same as what is sent out
// over routes.
if n, ok = rm[key]; ok {
n += delta
if n <= 0 {
delete(rm, key)
if isq {
delete(lqws, key)
}
update = true // Update for deleting (N->0)
} else {
rm[key] = n
}
} else if delta > 0 {
n = delta
rm[key] = delta
update = true // Adding a new entry for normal sub means update (0->1)
}
acc.mu.Unlock()
if !update {
return
}
// If we are sending a queue sub, make a copy and place in the queue weight.
// FIXME(dlc) - We can be smarter here and avoid copying and acquiring the lock.
if isq {
sub.client.mu.Lock()
nsub := *sub
sub.client.mu.Unlock()
nsub.qw = n
sub = &nsub
}
// We need to send out this update. Gather routes
var _routes [32]*client
routes := _routes[:0]
s.mu.Lock()
for _, route := range s.routes {
routes = append(routes, route)
}
trace := atomic.LoadInt32(&s.logging.trace) == 1
s.mu.Unlock()
// If we are a queue subscriber we need to make sure our updates are serialized from
// potential multiple connections. We want to make sure that the order above is preserved
// here but not necessarily all updates need to be sent. We need to block and recheck the
// n count with the lock held through sending here. We will suppress duplicate sends of same qw.
if isq {
acc.mu.Lock()
defer acc.mu.Unlock()
n = rm[key]
sub.qw = n
// Check the last sent weight here. If same, then someone
// beat us to it and we can just return here. Otherwise update
if ls, ok := lqws[key]; ok && ls == n {
return
} else {
lqws[key] = n
}
}
// Snapshot into array
subs := []*subscription{sub}
// Deliver to all routes.
for _, route := range routes {
route.mu.Lock()
// Note that queue unsubs where n > 0 are still
// subscribes with a smaller weight.
route.sendRouteSubOrUnSubProtos(subs, n > 0, trace, route.importFilter)
route.mu.Unlock()
}
}
func (s *Server) routeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
// Snapshot server options.
port := opts.Cluster.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on router port: %d - %v", opts.Cluster.Port, e)
return
}
s.Noticef("Listening for route connections on %s",
net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
s.mu.Lock()
// For tests, we want to be able to make this server behave
// as an older server so we use the variable which we can override.
proto := testRouteProto
// Check for TLSConfig
tlsReq := opts.Cluster.TLSConfig != nil
info := Info{
ID: s.info.ID,
Version: s.info.Version,
GoVersion: runtime.Version(),
AuthRequired: false,
TLSRequired: tlsReq,
TLSVerify: tlsReq,
MaxPayload: s.info.MaxPayload,
Proto: proto,
GatewayURL: s.getGatewayURL(),
}
// Set this if only if advertise is not disabled
if !opts.Cluster.NoAdvertise {
info.ClientConnectURLs = s.clientConnectURLs
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.Cluster.Port = l.Addr().(*net.TCPAddr).Port
}
// Check for Auth items
if opts.Cluster.Username != "" {
info.AuthRequired = true
}
// Check for permissions.
if opts.Cluster.Permissions != nil {
info.Import = opts.Cluster.Permissions.Import
info.Export = opts.Cluster.Permissions.Export
}
// If this server has a LeafNode accept loop, s.leafNodeInfo.IP is,
// at this point, set to the host:port for the leafnode accept URL,
// taking into account possible advertise setting. Use the LeafNodeURLs
// and set this server's leafnode accept URL. This will be sent to
// routed servers.
if !opts.LeafNode.NoAdvertise && s.leafNodeInfo.IP != _EMPTY_ {
info.LeafNodeURLs = []string{s.leafNodeInfo.IP}
}
s.routeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setRouteInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting route INFO with Cluster.Advertise value of %s, err=%v", s.opts.Cluster.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Setup state that can enable shutdown
s.routeListener = l
// Warn if using Cluster.Insecure
if tlsReq && opts.Cluster.TLSConfig.InsecureSkipVerify {
s.Warnf(clusterTLSInsecureWarning)
}
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
tmpDelay = s.acceptError("Route", err, tmpDelay)
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createRoute(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Router accept loop exiting..")
s.done <- true
}
// Similar to setInfoHostPortAndGenerateJSON, but for routeInfo.
func (s *Server) setRouteInfoHostPortAndIP() error {
if s.opts.Cluster.Advertise != "" {
advHost, advPort, err := parseHostPort(s.opts.Cluster.Advertise, s.opts.Cluster.Port)
if err != nil {
return err
}
s.routeInfo.Host = advHost
s.routeInfo.Port = advPort
s.routeInfo.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(advHost, strconv.Itoa(advPort)))
} else {
s.routeInfo.Host = s.opts.Cluster.Host
s.routeInfo.Port = s.opts.Cluster.Port
s.routeInfo.IP = ""
}
// (re)generate the routeInfoJSON byte array
s.generateRouteInfoJSON()
return nil
}
// StartRouting will start the accept loop on the cluster host:port
// and will actively try to connect to listed routes.
func (s *Server) StartRouting(clientListenReady chan struct{}) {
defer s.grWG.Done()
// Wait for the client listen port to be opened, and
// the possible ephemeral port to be selected.
<-clientListenReady
// Spin up the accept loop
ch := make(chan struct{})
go s.routeAcceptLoop(ch)
<-ch
// Solicit Routes if needed.
s.solicitRoutes(s.getOpts().Routes)
}
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
tryForEver := rtype == Explicit
// If A connects to B, and B to A (regardless if explicit or
// implicit - due to auto-discovery), and if each server first
// registers the route on the opposite TCP connection, the
// two connections will end-up being closed.
// Add some random delay to reduce risk of repeated failures.
delay := time.Duration(rand.Intn(100)) * time.Millisecond
if tryForEver {
delay += DEFAULT_ROUTE_RECONNECT
}
select {
case <-time.After(delay):
case <-s.quitCh:
s.grWG.Done()
return
}
s.connectToRoute(rURL, tryForEver, false)
}
// Checks to make sure the route is still valid.
func (s *Server) routeStillValid(rURL *url.URL) bool {
for _, ri := range s.getOpts().Routes {
if urlsAreEqual(ri, rURL) {
return true
}
}
return false
}
func (s *Server) connectToRoute(rURL *url.URL, tryForEver, firstConnect bool) {
// Snapshot server options.
opts := s.getOpts()
defer s.grWG.Done()
const connErrFmt = "Error trying to connect to route (attempt %v): %v"
attempts := 0
for s.isRunning() && rURL != nil {
if tryForEver && !s.routeStillValid(rURL) {
return
}
s.Debugf("Trying to connect to route on %s", rURL.Host)
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
if err != nil {
attempts++
if s.shouldReportConnectErr(firstConnect, attempts) {
s.Errorf(connErrFmt, attempts, err)
} else {
s.Debugf(connErrFmt, attempts, err)
}
if !tryForEver {
if opts.Cluster.ConnectRetries <= 0 {
return
}
if attempts > opts.Cluster.ConnectRetries {
return
}
}
select {
case <-s.quitCh:
return
case <-time.After(routeConnectDelay):
continue
}
}
if tryForEver && !s.routeStillValid(rURL) {
conn.Close()
return
}
// We have a route connection here.
// Go ahead and create it and exit this func.
s.createRoute(conn, rURL)
return
}
}
func (c *client) isSolicitedRoute() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.kind == ROUTER && c.route != nil && c.route.didSolicit
}
func (s *Server) solicitRoutes(routes []*url.URL) {
for _, r := range routes {
route := r
s.startGoRoutine(func() { s.connectToRoute(route, true, true) })
}
}
func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
if lang != "" {
c.sendErrAndErr(ErrClientConnectedToRoutePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToRoutePort
}
// Unmarshal as a route connect protocol
proto := &connectInfo{}
if err := json.Unmarshal(arg, proto); err != nil {
return err
}
// Reject if this has Gateway which means that it would be from a gateway
// connection that incorrectly connects to the Route port.
if proto.Gateway != "" {
errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the Route port", proto.Gateway)
c.Errorf(errTxt)
c.sendErr(errTxt)
c.closeConnection(WrongGateway)
return ErrWrongGateway
}
var perms *RoutePermissions
if srv != nil {
perms = srv.getOpts().Cluster.Permissions
}
// Grab connection name of remote route.
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.setRoutePermissions(perms)
c.mu.Unlock()
return nil
}
func (s *Server) removeRoute(c *client) {
var rID string
var lnURL string
c.mu.Lock()
cid := c.cid
r := c.route
if r != nil {
rID = r.remoteID
lnURL = r.leafnodeURL
}
c.mu.Unlock()
s.mu.Lock()
delete(s.routes, cid)
if r != nil {
rc, ok := s.remotes[rID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, rID)
}
s.removeGatewayURL(r.gatewayURL)
// Remove the remote's leafNode URL from
// our list and send update to LN connections.
if lnURL != _EMPTY_ && s.removeLeafNodeURL(lnURL) {
s.sendAsyncLeafNodeInfo()
}
}
s.removeFromTempClients(cid)
s.mu.Unlock()
}
| 1 | 9,296 | This work for both sides? | nats-io-nats-server | go |
@@ -32,9 +32,12 @@ import {
isDataZeroSearchConsole,
} from '../dashboard/util';
-const { __, sprintf } = wp.i18n;
-const { map } = lodash;
-const { Component } = wp.element;
+/**
+ * WordPress dependencies
+ */
+import { __, sprintf } from '@wordpress/i18n';
+import { map } from 'lodash';
+import { Component } from '@wordpress/element';
class DashboardWidgetPopularKeywordsTable extends Component {
static renderLayout( component ) { | 1 | /**
* DashboardWidgetPopularKeywordsTable component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import withData from 'GoogleComponents/higherorder/withdata';
import { TYPE_MODULES } from 'GoogleComponents/data';
import { getTimeInSeconds, numberFormat } from 'GoogleUtil';
import { getDataTableFromData, TableOverflowContainer } from 'GoogleComponents/data-table';
import PreviewTable from 'GoogleComponents/preview-table';
import Layout from 'GoogleComponents/layout/layout';
/**
* Internal dependencies
*/
import {
isDataZeroSearchConsole,
} from '../dashboard/util';
const { __, sprintf } = wp.i18n;
const { map } = lodash;
const { Component } = wp.element;
class DashboardWidgetPopularKeywordsTable extends Component {
static renderLayout( component ) {
return (
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-6-desktop
mdc-layout-grid__cell--span-4-tablet
">
<Layout
className="googlesitekit-popular-content"
footer
footerCtaLabel={ __( 'Search Console', 'google-site-kit' ) }
footerCtaLink={
sprintf( 'https://search.google.com/u/1/search-console?resource_id=%s', googlesitekit.admin.siteURL )
}
fill
>
{ component }
</Layout>
</div>
);
}
render() {
const { data } = this.props;
if ( ! data || ! data.length ) {
return null;
}
const headers = [
{
title: __( 'Top search queries for your site', 'google-site-kit' ),
tooltip: __( 'Most searched for keywords related to your content', 'google-site-kit' ),
},
{
title: __( 'Clicks', 'google-site-kit' ),
tooltip: __( 'Number of times users clicked on your content in search results', 'google-site-kit' ),
},
{
title: __( 'Impressions', 'google-site-kit' ),
tooltip: __( 'Counted each time your content appears in search results', 'google-site-kit' ),
},
];
const domain = googlesitekit.admin.siteURL;
const links = [];
const dataMapped = map( data, ( row, i ) => {
const query = row.keys[ 0 ];
links[ i ] = sprintf(
'https://search.google.com/search-console/performance/search-analytics?resource_id=%s&query=!%s&num_of_days=28',
domain,
query
);
return [
query,
numberFormat( row.clicks ),
numberFormat( row.impressions ),
];
} );
const options = {
hideHeader: false,
chartsEnabled: false,
links,
};
const dataTable = getDataTableFromData( dataMapped, headers, options );
return (
DashboardWidgetPopularKeywordsTable.renderLayout(
<TableOverflowContainer>
{ dataTable }
</TableOverflowContainer>
)
);
}
}
export default withData(
DashboardWidgetPopularKeywordsTable,
[
{
type: TYPE_MODULES,
identifier: 'search-console',
datapoint: 'searchanalytics',
data: {
dimensions: 'query',
limit: 10,
},
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: [ 'Dashboard' ],
},
],
DashboardWidgetPopularKeywordsTable.renderLayout(
<PreviewTable padding />
),
{
inGrid: true,
createGrid: true,
},
isDataZeroSearchConsole
);
| 1 | 24,754 | `lodash` shouldn't be grouped under WordPress dependencies | google-site-kit-wp | js |
@@ -117,6 +117,13 @@ class ComputeTest(unittest_utils.ForsetiTestCase):
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_firewall_rules(self.project_id))
+
+ def test_get_global_operations(self):
+ """Test get_global_operations"""
+ http_mocks.mock_http_response(fake_compute.GLOBAL_OPERATIONS_RESPONSE)
+ results = self.gce_api_client.get_global_operations(
+ self.project_id, operation_id='operation-1234')
+
def test_get_quota(self):
"""Test get quota."""
http_mocks.mock_http_response(fake_compute.GET_PROJECT_RESPONSE) | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Compute client."""
import unittest
import mock
from oauth2client import client
import parameterized
from tests import unittest_utils
from tests.common.gcp_api.test_data import fake_compute_responses as fake_compute
from tests.common.gcp_api.test_data import http_mocks
from google.cloud.security.common.gcp_api import compute
from google.cloud.security.common.gcp_api import errors as api_errors
ERROR_TEST_CASES = [
('api_not_enabled', fake_compute.API_NOT_ENABLED, '403',
api_errors.ApiNotEnabledError),
('access_denied', fake_compute.ACCESS_DENIED, '403',
api_errors.ApiExecutionError),
]
class ComputeTest(unittest_utils.ForsetiTestCase):
"""Test the Compute client."""
@classmethod
@mock.patch.object(client, 'GoogleCredentials', spec=True)
def setUpClass(cls, mock_google_credential):
"""Set up."""
fake_global_configs = {'max_compute_api_calls_per_second': 2000}
cls.gce_api_client = compute.ComputeClient(
global_configs=fake_global_configs)
cls.project_id = fake_compute.FAKE_PROJECT_ID
@mock.patch.object(client, 'GoogleCredentials')
def test_no_quota(self, mock_google_credential):
"""Verify no rate limiter is used if the configuration is missing."""
gce_api_client = compute.ComputeClient(global_configs={})
self.assertEqual(None, gce_api_client.repository._rate_limiter)
def test_get_backend_services(self):
"""Test get backend services."""
mock_responses = []
for page in fake_compute.LIST_BACKEND_SERVICES_RESPONSES:
mock_responses.append(({'status': '200'}, page))
http_mocks.mock_http_response_sequence(mock_responses)
results = self.gce_api_client.get_backend_services(self.project_id)
self.assertEquals(fake_compute.EXPECTED_BACKEND_SERVICES_NAMES,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_backend_services_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get backend services."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_backend_services(self.project_id))
def test_get_disks(self):
"""Test get disks."""
http_mocks.mock_http_response(
fake_compute.DISKS_AGGREGATED_LIST)
results = self.gce_api_client.get_disks(self.project_id)
self.assertEquals(fake_compute.EXPECTED_DISKS_SELFLINKS,
[r.get('selfLink') for r in results])
def test_get_disks_by_zone(self):
"""Test get disks rules by zone."""
http_mocks.mock_http_response(fake_compute.DISKS_LIST)
results = self.gce_api_client.get_disks(
self.project_id, fake_compute.FAKE_DISK_ZONE)
self.assertEquals(fake_compute.EXPECTED_DISKS_SELFLINKS,
[r.get('selfLink') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_disks_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get disks."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_disks(self.project_id))
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_disks(
self.project_id, fake_compute.FAKE_DISK_ZONE))
def test_get_firewall_rules(self):
"""Test get firewall rules."""
mock_responses = []
for page in fake_compute.LIST_FIREWALLS_RESPONSES:
mock_responses.append(({'status': '200'}, page))
http_mocks.mock_http_response_sequence(mock_responses)
results = self.gce_api_client.get_firewall_rules(self.project_id)
self.assertEquals(fake_compute.EXPECTED_FIREWALL_NAMES,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_firewall_rules_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get firewall rules."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_firewall_rules(self.project_id))
def test_get_quota(self):
"""Test get quota."""
http_mocks.mock_http_response(fake_compute.GET_PROJECT_RESPONSE)
results = self.gce_api_client.get_quota(self.project_id,
metric='SNAPSHOTS')
self.assertEquals(fake_compute.GET_QUOTA_RESPONSE, results)
def test_get_quota_no_metric(self):
"""Test get quota with no metrics"""
http_mocks.mock_http_response(fake_compute.GET_PROJECT_RESPONSE)
with self.assertRaises(KeyError):
list(self.gce_api_client.get_quota(self.project_id, metric=' '))
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_quota_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get quota."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_quota(self.project_id, metric=None))
def test_get_firewall_quota(self):
"""Test get firewall quota"""
http_mocks.mock_http_response(fake_compute.GET_PROJECT_RESPONSE)
results = self.gce_api_client.get_firewall_quota(self.project_id)
self.assertEquals(fake_compute.GET_FIREWALL_QUOTA_RESPONSE, results)
def test_get_forwarding_rules(self):
"""Test get forwarding rules."""
http_mocks.mock_http_response(
fake_compute.FORWARDING_RULES_AGGREGATED_LIST)
results = self.gce_api_client.get_forwarding_rules(self.project_id)
self.assertEquals(fake_compute.EXPECTED_FORWARDING_RULE_NAMES,
[r.get('name') for r in results])
def test_get_forwarding_rules_by_region(self):
"""Test get forwarding rules by region."""
http_mocks.mock_http_response(fake_compute.FORWARDING_RULES_LIST)
results = self.gce_api_client.get_forwarding_rules(
self.project_id, fake_compute.FAKE_FORWARDING_RULE_REGION)
self.assertEquals(fake_compute.EXPECTED_FORWARDING_RULE_NAMES,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_forwarding_rules_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get forwarding rules."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_forwarding_rules(self.project_id))
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_forwarding_rules(
self.project_id, fake_compute.FAKE_FORWARDING_RULE_REGION))
def test_get_instance_group_instances_by_zone(self):
"""Test get instances group instances by zone."""
http_mocks.mock_http_response(
fake_compute.INSTANCE_GROUP_LIST_INSTANCES)
results = self.gce_api_client.get_instance_group_instances(
self.project_id, fake_compute.FAKE_INSTANCE_GROUP,
zone=fake_compute.FAKE_INSTANCE_ZONE)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_GROUP_ZONE_URLS,
list(results))
def test_get_instance_group_instances_by_region(self):
"""Test get instance group instances by region."""
http_mocks.mock_http_response(
fake_compute.REGION_INSTANCE_GROUP_LIST_INSTANCES)
results = self.gce_api_client.get_instance_group_instances(
self.project_id, fake_compute.FAKE_INSTANCE_GROUP,
region=fake_compute.FAKE_INSTANCE_GROUP_REGION)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_GROUP_REGION_URLS,
list(results))
def test_get_instance_group_instances_region_and_zone_raises(self):
"""Verify passing both or neither a region and a zone raises exception."""
with self.assertRaises(ValueError):
self.gce_api_client.get_instance_group_instances(
self.project_id, fake_compute.FAKE_INSTANCE_GROUP,
zone=fake_compute.FAKE_INSTANCE_ZONE,
region=fake_compute.FAKE_INSTANCE_GROUP_REGION)
with self.assertRaises(ValueError):
self.gce_api_client.get_instance_group_instances(
self.project_id, fake_compute.FAKE_INSTANCE_GROUP)
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_instance_group_instances_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get instance group instances."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_instance_group_instances(
self.project_id, fake_compute.FAKE_INSTANCE_GROUP,
zone=fake_compute.FAKE_INSTANCE_ZONE))
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_instance_group_instances(
self.project_id, fake_compute.FAKE_INSTANCE_GROUP,
region=fake_compute.FAKE_INSTANCE_GROUP_REGION))
def test_get_instance_group_managers(self):
"""Test get instance group managers."""
http_mocks.mock_http_response(
fake_compute.INSTANCE_GROUP_MANAGERS_AGGREGATED_LIST)
results = self.gce_api_client.get_instance_group_managers(
self.project_id)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_GROUP_MANAGER_NAMES,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_instance_group_managers_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get instance group managers."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
self.gce_api_client.get_instance_group_managers(self.project_id)
def test_get_instance_groups(self):
"""Test get instance groups."""
mock_responses = [
({'status': '200'},
fake_compute.INSTANCE_GROUPS_AGGREGATED_LIST),
({'status': '200'},
fake_compute.INSTANCE_GROUP_LIST_INSTANCES),
({'status': '200'},
fake_compute.REGION_INSTANCE_GROUP_LIST_INSTANCES)
]
http_mocks.mock_http_response_sequence(mock_responses)
results = self.gce_api_client.get_instance_groups(self.project_id)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_GROUP_NAMES,
[r.get('name') for r in results])
self.assertEquals(fake_compute.EXPECTED_INSTANCE_GROUP_URLS,
[r.get('instance_urls') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_instance_groups_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get instance groups."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_instance_groups(self.project_id))
def test_get_instance_templates(self):
"""Test get instance templates."""
http_mocks.mock_http_response(fake_compute.INSTANCE_TEMPLATES_LIST)
results = self.gce_api_client.get_instance_templates(self.project_id)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_TEMPLATE_NAMES,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_instance_templates_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get instance templates."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_instance_templates(self.project_id))
def test_get_instances(self):
"""Test get instances."""
http_mocks.mock_http_response(fake_compute.INSTANCES_AGGREGATED_LIST)
results = self.gce_api_client.get_instances(self.project_id)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_NAMES,
[r.get('name') for r in results])
def test_get_instances_by_zone(self):
"""Test get instances by zone."""
http_mocks.mock_http_response(fake_compute.INSTANCES_LIST)
results = self.gce_api_client.get_instances(
self.project_id, fake_compute.FAKE_INSTANCE_ZONE)
self.assertEquals(fake_compute.EXPECTED_INSTANCE_NAMES,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_instances_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get instances."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_instances(self.project_id))
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_instances(
self.project_id, fake_compute.FAKE_INSTANCE_ZONE))
def test_get_networks(self):
"""Test get networks."""
mock_responses = []
for page in fake_compute.LIST_NETWORKS_RESPONSES:
mock_responses.append(({'status': '200'}, page))
http_mocks.mock_http_response_sequence(mock_responses)
results = self.gce_api_client.get_networks(self.project_id)
self.assertEquals(fake_compute.EXPECTED_NETWORK_NAME,
[r.get('name') for r in results])
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_networks_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get networks."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_networks(self.project_id))
def test_get_project(self):
"""Test get project."""
http_mocks.mock_http_response(
fake_compute.GET_PROJECT_RESPONSE)
results = self.gce_api_client.get_project(self.project_id)
self.assertEquals(self.project_id, results.get('name'))
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_project_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get instance templates."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
self.gce_api_client.get_project(self.project_id)
def test_get_subnetworks(self):
"""Test get subnetworks."""
mock_responses = []
for page in fake_compute.SUBNETWORKS_AGGREGATED_LIST:
mock_responses.append(({'status': '200'}, page))
http_mocks.mock_http_response_sequence(mock_responses)
results = self.gce_api_client.get_subnetworks(self.project_id)
self.assertEquals(
fake_compute.EXPECTED_SUBNETWORKS_AGGREGATEDLIST_SELFLINKS,
frozenset([r.get('selfLink') for r in results]))
def test_get_subnetworks_by_region(self):
"""Test get subnetworks by region."""
http_mocks.mock_http_response(fake_compute.SUBNETWORKS_LIST)
results = self.gce_api_client.get_subnetworks(
self.project_id, fake_compute.FAKE_SUBNETWORK_REGION)
self.assertEquals(fake_compute.EXPECTED_SUBNETWORKS_LIST_SELFLINKS,
frozenset([r.get('selfLink') for r in results]))
@parameterized.parameterized.expand(ERROR_TEST_CASES)
def test_get_subnetworks_errors(self, name, response, status,
expected_exception):
"""Verify error conditions for get subnetworks."""
http_mocks.mock_http_response(response, status)
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_subnetworks(self.project_id))
with self.assertRaises(expected_exception):
list(self.gce_api_client.get_subnetworks(
self.project_id, fake_compute.FAKE_SUBNETWORK_REGION))
def test_is_api_enabled_true(self):
"""Verify that a positive response from the API returns True."""
http_mocks.mock_http_response(
fake_compute.GET_PROJECT_NAME_RESPONSE)
results = self.gce_api_client.is_api_enabled(self.project_id)
self.assertTrue(results)
def test_is_api_enabled_false(self):
"""Verify that a positive response from the API returns True."""
http_mocks.mock_http_response(fake_compute.API_NOT_ENABLED, '403')
results = self.gce_api_client.is_api_enabled(self.project_id)
self.assertFalse(results)
def test_is_api_enabled_error(self):
"""Verify that a positive response from the API returns True."""
http_mocks.mock_http_response(fake_compute.ACCESS_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.gce_api_client.is_api_enabled(self.project_id)
if __name__ == '__main__':
unittest.main()
| 1 | 27,776 | Include the standard parameterized error tests as well. | forseti-security-forseti-security | py |
@@ -242,6 +242,16 @@ func runWeb(ctx *cli.Context) {
m.Patch("/hooks/:id:int", bind(api.EditHookOption{}), v1.EditRepoHook)
m.Get("/raw/*", middleware.RepoRef(), v1.GetRepoRawFile)
m.Get("/archive/*", v1.GetRepoArchive)
+ m.Post("/forks", v1.ForkRepo)
+
+ m.Get("/commits/:commitId", middleware.RepoRef(), v1.CommitById)
+ m.Get("/commits/current", middleware.RepoRef(), v1.CurrentCommit)
+
+ m.Group("/releases", func() {
+ m.Combo("").Get(v1.ListReleases).
+ Post(bindIgnErr(api.CreateReleaseOption{}), v1.CreateRelease)
+ m.Get("/:release", v1.ReleaseByName)
+ }, middleware.RepoRef())
}, middleware.ApiRepoAssignment(), middleware.ApiReqToken())
})
| 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"crypto/tls"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"net/http/fcgi"
"os"
"path"
"strings"
"github.com/Unknwon/macaron"
"github.com/codegangsta/cli"
"github.com/go-xorm/xorm"
"github.com/macaron-contrib/binding"
"github.com/macaron-contrib/cache"
"github.com/macaron-contrib/captcha"
"github.com/macaron-contrib/csrf"
"github.com/macaron-contrib/i18n"
"github.com/macaron-contrib/oauth2"
"github.com/macaron-contrib/session"
"github.com/macaron-contrib/toolbox"
"github.com/mcuadros/go-version"
"gopkg.in/ini.v1"
api "github.com/gogits/go-gogs-client"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/auth/apiv1"
"github.com/gogits/gogs/modules/avatar"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/bindata"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/middleware"
"github.com/gogits/gogs/modules/setting"
"github.com/gogits/gogs/routers"
"github.com/gogits/gogs/routers/admin"
"github.com/gogits/gogs/routers/api/v1"
"github.com/gogits/gogs/routers/dev"
"github.com/gogits/gogs/routers/org"
"github.com/gogits/gogs/routers/repo"
"github.com/gogits/gogs/routers/user"
)
var CmdWeb = cli.Command{
Name: "web",
Usage: "Start Gogs web server",
Description: `Gogs web server is the only thing you need to run,
and it takes care of all the other things for you`,
Action: runWeb,
Flags: []cli.Flag{
cli.StringFlag{"port, p", "3000", "Temporary port number to prevent conflict", ""},
cli.StringFlag{"config, c", "custom/conf/app.ini", "Custom configuration file path", ""},
},
}
type VerChecker struct {
ImportPath string
Version func() string
Expected string
}
// checkVersion checks if binary matches the version of templates files.
func checkVersion() {
// Templates.
data, err := ioutil.ReadFile(setting.StaticRootPath + "/templates/.VERSION")
if err != nil {
log.Fatal(4, "Fail to read 'templates/.VERSION': %v", err)
}
if string(data) != setting.AppVer {
log.Fatal(4, "Binary and template file version does not match, did you forget to recompile?")
}
// Check dependency version.
checkers := []VerChecker{
{"github.com/go-xorm/xorm", func() string { return xorm.Version }, "0.4.3.0806"},
{"github.com/Unknwon/macaron", macaron.Version, "0.5.4"},
{"github.com/macaron-contrib/binding", binding.Version, "0.1.0"},
{"github.com/macaron-contrib/cache", cache.Version, "0.1.2"},
{"github.com/macaron-contrib/csrf", csrf.Version, "0.0.3"},
{"github.com/macaron-contrib/i18n", i18n.Version, "0.0.7"},
{"github.com/macaron-contrib/session", session.Version, "0.1.6"},
{"gopkg.in/ini.v1", ini.Version, "1.3.4"},
}
for _, c := range checkers {
if !version.Compare(c.Version(), c.Expected, ">=") {
log.Fatal(4, "Package '%s' version is too old(%s -> %s), did you forget to update?", c.ImportPath, c.Version(), c.Expected)
}
}
}
// newMacaron initializes Macaron instance.
func newMacaron() *macaron.Macaron {
m := macaron.New()
if !setting.DisableRouterLog {
m.Use(macaron.Logger())
}
m.Use(macaron.Recovery())
if setting.EnableGzip {
m.Use(macaron.Gziper())
}
if setting.Protocol == setting.FCGI {
m.SetURLPrefix(setting.AppSubUrl)
}
m.Use(macaron.Static(
path.Join(setting.StaticRootPath, "public"),
macaron.StaticOptions{
SkipLogging: setting.DisableRouterLog,
},
))
m.Use(macaron.Static(
setting.AvatarUploadPath,
macaron.StaticOptions{
Prefix: "avatars",
SkipLogging: setting.DisableRouterLog,
},
))
m.Use(macaron.Renderer(macaron.RenderOptions{
Directory: path.Join(setting.StaticRootPath, "templates"),
Funcs: []template.FuncMap{base.TemplateFuncs},
IndentJSON: macaron.Env != macaron.PROD,
}))
localeNames, err := bindata.AssetDir("conf/locale")
if err != nil {
log.Fatal(4, "Fail to list locale files: %v", err)
}
localFiles := make(map[string][]byte)
for _, name := range localeNames {
localFiles[name] = bindata.MustAsset("conf/locale/" + name)
}
m.Use(i18n.I18n(i18n.Options{
SubURL: setting.AppSubUrl,
Files: localFiles,
CustomDirectory: path.Join(setting.CustomPath, "conf/locale"),
Langs: setting.Langs,
Names: setting.Names,
Redirect: true,
}))
m.Use(cache.Cacher(cache.Options{
Adapter: setting.CacheAdapter,
AdapterConfig: setting.CacheConn,
Interval: setting.CacheInternal,
}))
m.Use(captcha.Captchaer(captcha.Options{
SubURL: setting.AppSubUrl,
}))
m.Use(session.Sessioner(setting.SessionConfig))
m.Use(csrf.Csrfer(csrf.Options{
Secret: setting.SecretKey,
SetCookie: true,
Header: "X-Csrf-Token",
CookiePath: setting.AppSubUrl,
}))
m.Use(toolbox.Toolboxer(m, toolbox.Options{
HealthCheckFuncs: []*toolbox.HealthCheckFuncDesc{
&toolbox.HealthCheckFuncDesc{
Desc: "Database connection",
Func: models.Ping,
},
},
}))
// OAuth 2.
if setting.OauthService != nil {
for _, info := range setting.OauthService.OauthInfos {
m.Use(oauth2.NewOAuth2Provider(info.Options, info.AuthUrl, info.TokenUrl))
}
}
m.Use(middleware.Contexter())
return m
}
func runWeb(ctx *cli.Context) {
if ctx.IsSet("config") {
setting.CustomConf = ctx.String("config")
}
routers.GlobalInit()
checkVersion()
m := newMacaron()
reqSignIn := middleware.Toggle(&middleware.ToggleOptions{SignInRequire: true})
ignSignIn := middleware.Toggle(&middleware.ToggleOptions{SignInRequire: setting.Service.RequireSignInView})
ignSignInAndCsrf := middleware.Toggle(&middleware.ToggleOptions{DisableCsrf: true})
reqSignOut := middleware.Toggle(&middleware.ToggleOptions{SignOutRequire: true})
bind := binding.Bind
bindIgnErr := binding.BindIgnErr
// Routers.
m.Get("/", ignSignIn, routers.Home)
m.Get("/explore", ignSignIn, routers.Explore)
m.Combo("/install", routers.InstallInit).Get(routers.Install).
Post(bindIgnErr(auth.InstallForm{}), routers.InstallPost)
m.Get("/:type(issues|pulls)", reqSignIn, user.Issues)
// ***** START: API *****
// FIXME: custom form error response.
m.Group("/api", func() {
m.Group("/v1", func() {
// Miscellaneous.
m.Post("/markdown", bindIgnErr(apiv1.MarkdownForm{}), v1.Markdown)
m.Post("/markdown/raw", v1.MarkdownRaw)
// Users.
m.Group("/users", func() {
m.Get("/search", v1.SearchUsers)
m.Group("/:username", func() {
m.Get("", v1.GetUserInfo)
m.Group("/tokens", func() {
m.Combo("").Get(v1.ListAccessTokens).
Post(bind(v1.CreateAccessTokenForm{}), v1.CreateAccessToken)
}, middleware.ApiReqBasicAuth())
})
})
// Repositories.
m.Combo("/user/repos", middleware.ApiReqToken()).Get(v1.ListMyRepos).
Post(bind(api.CreateRepoOption{}), v1.CreateRepo)
m.Post("/org/:org/repos", middleware.ApiReqToken(), bind(api.CreateRepoOption{}), v1.CreateOrgRepo)
m.Group("/repos", func() {
m.Get("/search", v1.SearchRepos)
m.Group("", func() {
m.Post("/migrate", bindIgnErr(auth.MigrateRepoForm{}), v1.MigrateRepo)
}, middleware.ApiReqToken())
m.Group("/:username/:reponame", func() {
m.Combo("/hooks").Get(v1.ListRepoHooks).
Post(bind(api.CreateHookOption{}), v1.CreateRepoHook)
m.Patch("/hooks/:id:int", bind(api.EditHookOption{}), v1.EditRepoHook)
m.Get("/raw/*", middleware.RepoRef(), v1.GetRepoRawFile)
m.Get("/archive/*", v1.GetRepoArchive)
}, middleware.ApiRepoAssignment(), middleware.ApiReqToken())
})
m.Any("/*", func(ctx *middleware.Context) {
ctx.HandleAPI(404, "Page not found")
})
})
}, ignSignIn)
// ***** END: API *****
// ***** START: User *****
m.Group("/user", func() {
m.Get("/login", user.SignIn)
m.Post("/login", bindIgnErr(auth.SignInForm{}), user.SignInPost)
m.Get("/info/:name", user.SocialSignIn)
m.Get("/sign_up", user.SignUp)
m.Post("/sign_up", bindIgnErr(auth.RegisterForm{}), user.SignUpPost)
m.Get("/reset_password", user.ResetPasswd)
m.Post("/reset_password", user.ResetPasswdPost)
}, reqSignOut)
m.Group("/user/settings", func() {
m.Get("", user.Settings)
m.Post("", bindIgnErr(auth.UpdateProfileForm{}), user.SettingsPost)
m.Post("/avatar", binding.MultipartForm(auth.UploadAvatarForm{}), user.SettingsAvatar)
m.Get("/email", user.SettingsEmails)
m.Post("/email", bindIgnErr(auth.AddEmailForm{}), user.SettingsEmailPost)
m.Get("/password", user.SettingsPassword)
m.Post("/password", bindIgnErr(auth.ChangePasswordForm{}), user.SettingsPasswordPost)
m.Combo("/ssh").Get(user.SettingsSSHKeys).
Post(bindIgnErr(auth.AddSSHKeyForm{}), user.SettingsSSHKeysPost)
m.Post("/ssh/delete", user.DeleteSSHKey)
m.Get("/social", user.SettingsSocial)
m.Combo("/applications").Get(user.SettingsApplications).
Post(bindIgnErr(auth.NewAccessTokenForm{}), user.SettingsApplicationsPost)
m.Post("/applications/delete", user.SettingsDeleteApplication)
m.Route("/delete", "GET,POST", user.SettingsDelete)
}, reqSignIn, func(ctx *middleware.Context) {
ctx.Data["PageIsUserSettings"] = true
ctx.Data["HasOAuthService"] = setting.OauthService != nil
})
m.Group("/user", func() {
// r.Get("/feeds", binding.Bind(auth.FeedsForm{}), user.Feeds)
m.Any("/activate", user.Activate)
m.Any("/activate_email", user.ActivateEmail)
m.Get("/email2user", user.Email2User)
m.Get("/forget_password", user.ForgotPasswd)
m.Post("/forget_password", user.ForgotPasswdPost)
m.Get("/logout", user.SignOut)
})
// ***** END: User *****
// Gravatar service.
avt := avatar.CacheServer("public/img/avatar/", "public/img/avatar_default.jpg")
os.MkdirAll("public/img/avatar/", os.ModePerm)
m.Get("/avatar/:hash", avt.ServeHTTP)
adminReq := middleware.Toggle(&middleware.ToggleOptions{SignInRequire: true, AdminRequire: true})
// ***** START: Admin *****
m.Group("/admin", func() {
m.Get("", adminReq, admin.Dashboard)
m.Get("/config", admin.Config)
m.Get("/monitor", admin.Monitor)
m.Group("/users", func() {
m.Get("", admin.Users)
m.Get("/new", admin.NewUser)
m.Post("/new", bindIgnErr(auth.RegisterForm{}), admin.NewUserPost)
m.Get("/:userid", admin.EditUser)
m.Post("/:userid", bindIgnErr(auth.AdminEditUserForm{}), admin.EditUserPost)
m.Post("/:userid/delete", admin.DeleteUser)
})
m.Group("/orgs", func() {
m.Get("", admin.Organizations)
})
m.Group("/repos", func() {
m.Get("", admin.Repositories)
})
m.Group("/auths", func() {
m.Get("", admin.Authentications)
m.Get("/new", admin.NewAuthSource)
m.Post("/new", bindIgnErr(auth.AuthenticationForm{}), admin.NewAuthSourcePost)
m.Get("/:authid", admin.EditAuthSource)
m.Post("/:authid", bindIgnErr(auth.AuthenticationForm{}), admin.EditAuthSourcePost)
m.Post("/:authid/delete", admin.DeleteAuthSource)
})
m.Group("/notices", func() {
m.Get("", admin.Notices)
m.Get("/:id:int/delete", admin.DeleteNotice)
})
}, adminReq)
// ***** END: Admin *****
m.Group("", func() {
m.Get("/:username", user.Profile)
m.Get("/attachments/:uuid", func(ctx *middleware.Context) {
attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
if err != nil {
if models.IsErrAttachmentNotExist(err) {
ctx.Error(404)
} else {
ctx.Handle(500, "GetAttachmentByUUID", err)
}
return
}
fr, err := os.Open(attach.LocalPath())
if err != nil {
ctx.Handle(500, "Open", err)
return
}
defer fr.Close()
ctx.Header().Set("Cache-Control", "public,max-age=86400")
// Fix #312. Attachments with , in their name are not handled correctly by Google Chrome.
// We must put the name in " manually.
if err = repo.ServeData(ctx, "\""+attach.Name+"\"", fr); err != nil {
ctx.Handle(500, "ServeData", err)
return
}
})
m.Post("/issues/attachments", repo.UploadIssueAttachment)
}, ignSignIn)
if macaron.Env == macaron.DEV {
m.Get("/template/*", dev.TemplatePreview)
}
reqRepoAdmin := middleware.RequireRepoAdmin()
// ***** START: Organization *****
m.Group("/org", func() {
m.Get("/create", org.Create)
m.Post("/create", bindIgnErr(auth.CreateOrgForm{}), org.CreatePost)
m.Group("/:org", func() {
m.Get("/dashboard", user.Dashboard)
m.Get("/:type(issues|pulls)", user.Issues)
m.Get("/members", org.Members)
m.Get("/members/action/:action", org.MembersAction)
m.Get("/teams", org.Teams)
m.Get("/teams/:team", org.TeamMembers)
m.Get("/teams/:team/repositories", org.TeamRepositories)
m.Get("/teams/:team/action/:action", org.TeamsAction)
m.Get("/teams/:team/action/repo/:action", org.TeamsRepoAction)
}, middleware.OrgAssignment(true, true))
m.Group("/:org", func() {
m.Get("/teams/new", org.NewTeam)
m.Post("/teams/new", bindIgnErr(auth.CreateTeamForm{}), org.NewTeamPost)
m.Get("/teams/:team/edit", org.EditTeam)
m.Post("/teams/:team/edit", bindIgnErr(auth.CreateTeamForm{}), org.EditTeamPost)
m.Post("/teams/:team/delete", org.DeleteTeam)
m.Group("/settings", func() {
m.Combo("").Get(org.Settings).
Post(bindIgnErr(auth.UpdateOrgSettingForm{}), org.SettingsPost)
m.Post("/avatar", binding.MultipartForm(auth.UploadAvatarForm{}), org.SettingsAvatar)
m.Group("/hooks", func() {
m.Get("", org.Webhooks)
m.Post("/delete", org.DeleteWebhook)
m.Get("/:type/new", repo.WebhooksNew)
m.Post("/gogs/new", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksNewPost)
m.Post("/slack/new", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksNewPost)
m.Get("/:id", repo.WebHooksEdit)
m.Post("/gogs/:id", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksEditPost)
m.Post("/slack/:id", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksEditPost)
})
m.Route("/delete", "GET,POST", org.SettingsDelete)
})
m.Route("/invitations/new", "GET,POST", org.Invitation)
}, middleware.OrgAssignment(true, true, true))
}, reqSignIn)
m.Group("/org", func() {
m.Get("/:org", org.Home)
}, ignSignIn, middleware.OrgAssignment(true))
// ***** END: Organization *****
// ***** START: Repository *****
m.Group("/repo", func() {
m.Get("/create", repo.Create)
m.Post("/create", bindIgnErr(auth.CreateRepoForm{}), repo.CreatePost)
m.Get("/migrate", repo.Migrate)
m.Post("/migrate", bindIgnErr(auth.MigrateRepoForm{}), repo.MigratePost)
m.Combo("/fork/:repoid").Get(repo.Fork).
Post(bindIgnErr(auth.CreateRepoForm{}), repo.ForkPost)
}, reqSignIn)
m.Group("/:username/:reponame", func() {
m.Group("/settings", func() {
m.Combo("").Get(repo.Settings).
Post(bindIgnErr(auth.RepoSettingForm{}), repo.SettingsPost)
m.Route("/collaboration", "GET,POST", repo.Collaboration)
m.Group("/hooks", func() {
m.Get("", repo.Webhooks)
m.Post("/delete", repo.DeleteWebhook)
m.Get("/:type/new", repo.WebhooksNew)
m.Post("/gogs/new", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksNewPost)
m.Post("/slack/new", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksNewPost)
m.Get("/:id", repo.WebHooksEdit)
m.Post("/gogs/:id", bindIgnErr(auth.NewWebhookForm{}), repo.WebHooksEditPost)
m.Post("/slack/:id", bindIgnErr(auth.NewSlackHookForm{}), repo.SlackHooksEditPost)
m.Group("/git", func() {
m.Get("", repo.GitHooks)
m.Combo("/:name").Get(repo.GitHooksEdit).
Post(repo.GitHooksEditPost)
}, middleware.GitHookService())
})
m.Group("/keys", func() {
m.Combo("").Get(repo.DeployKeys).
Post(bindIgnErr(auth.AddSSHKeyForm{}), repo.DeployKeysPost)
m.Post("/delete", repo.DeleteDeployKey)
})
})
}, reqSignIn, middleware.RepoAssignment(true), reqRepoAdmin)
m.Group("/:username/:reponame", func() {
m.Get("/action/:action", repo.Action)
m.Group("/issues", func() {
m.Combo("/new").Get(repo.NewIssue).
Post(bindIgnErr(auth.CreateIssueForm{}), repo.NewIssuePost)
m.Combo("/:index/comments").Post(bindIgnErr(auth.CreateCommentForm{}), repo.NewComment)
m.Group("/:index", func() {
m.Post("/label", repo.UpdateIssueLabel)
m.Post("/milestone", repo.UpdateIssueMilestone)
m.Post("/assignee", repo.UpdateIssueAssignee)
}, reqRepoAdmin)
m.Group("/:index", func() {
m.Post("/title", repo.UpdateIssueTitle)
m.Post("/content", repo.UpdateIssueContent)
})
})
m.Post("/comments/:id", repo.UpdateCommentContent)
m.Group("/labels", func() {
m.Post("/new", bindIgnErr(auth.CreateLabelForm{}), repo.NewLabel)
m.Post("/edit", bindIgnErr(auth.CreateLabelForm{}), repo.UpdateLabel)
m.Post("/delete", repo.DeleteLabel)
}, reqRepoAdmin)
m.Group("/milestones", func() {
m.Get("/new", repo.NewMilestone)
m.Post("/new", bindIgnErr(auth.CreateMilestoneForm{}), repo.NewMilestonePost)
m.Get("/:id/edit", repo.EditMilestone)
m.Post("/:id/edit", bindIgnErr(auth.CreateMilestoneForm{}), repo.EditMilestonePost)
m.Get("/:id/:action", repo.ChangeMilestonStatus)
m.Post("/delete", repo.DeleteMilestone)
}, reqRepoAdmin)
m.Group("/releases", func() {
m.Get("/new", repo.NewRelease)
m.Post("/new", bindIgnErr(auth.NewReleaseForm{}), repo.NewReleasePost)
m.Get("/edit/:tagname", repo.EditRelease)
m.Post("/edit/:tagname", bindIgnErr(auth.EditReleaseForm{}), repo.EditReleasePost)
}, reqRepoAdmin, middleware.RepoRef())
m.Combo("/compare/*").Get(repo.CompareAndPullRequest).
Post(bindIgnErr(auth.CreateIssueForm{}), repo.CompareAndPullRequestPost)
}, reqSignIn, middleware.RepoAssignment(true))
m.Group("/:username/:reponame", func() {
m.Get("/releases", middleware.RepoRef(), repo.Releases)
m.Get("/:type(issues|pulls)", repo.RetrieveLabels, repo.Issues)
m.Get("/:type(issues|pulls)/:index", repo.ViewIssue)
m.Get("/labels/", repo.RetrieveLabels, repo.Labels)
m.Get("/milestones", repo.Milestones)
m.Get("/branches", repo.Branches)
m.Get("/archive/*", repo.Download)
m.Group("/pulls/:index", func() {
m.Get("/commits", repo.ViewPullCommits)
m.Get("/files", repo.ViewPullFiles)
m.Post("/merge", reqRepoAdmin, repo.MergePullRequest)
})
m.Group("", func() {
m.Get("/src/*", repo.Home)
m.Get("/raw/*", repo.SingleDownload)
m.Get("/commits/*", repo.RefCommits)
m.Get("/commit/*", repo.Diff)
}, middleware.RepoRef())
m.Get("/compare/:before([a-z0-9]{40})...:after([a-z0-9]{40})", repo.CompareDiff)
}, ignSignIn, middleware.RepoAssignment(true))
m.Group("/:username", func() {
m.Group("/:reponame", func() {
m.Get("", repo.Home)
m.Get("\\.git$", repo.Home)
}, ignSignIn, middleware.RepoAssignment(true, true), middleware.RepoRef())
m.Group("/:reponame", func() {
m.Any("/*", ignSignInAndCsrf, repo.Http)
m.Head("/hooks/trigger", repo.TriggerHook)
})
})
// ***** END: Repository *****
// robots.txt
m.Get("/robots.txt", func(ctx *middleware.Context) {
if setting.HasRobotsTxt {
ctx.ServeFileContent(path.Join(setting.CustomPath, "robots.txt"))
} else {
ctx.Error(404)
}
})
// Not found handler.
m.NotFound(routers.NotFound)
// Flag for port number in case first time run conflict.
if ctx.IsSet("port") {
setting.AppUrl = strings.Replace(setting.AppUrl, setting.HttpPort, ctx.String("port"), 1)
setting.HttpPort = ctx.String("port")
}
var err error
listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort)
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl)
switch setting.Protocol {
case setting.HTTP:
err = http.ListenAndServe(listenAddr, m)
case setting.HTTPS:
server := &http.Server{Addr: listenAddr, TLSConfig: &tls.Config{MinVersion: tls.VersionTLS10}, Handler: m}
err = server.ListenAndServeTLS(setting.CertFile, setting.KeyFile)
case setting.FCGI:
err = fcgi.Serve(nil, m)
default:
log.Fatal(4, "Invalid protocol: %s", setting.Protocol)
}
if err != nil {
log.Fatal(4, "Fail to start server: %v", err)
}
}
| 1 | 9,348 | All lowercase in route rule. | gogs-gogs | go |
@@ -61,8 +61,16 @@ export const actions = {
const { dispatch } = yield Data.commonActions.getRegistry();
yield actions.receiveAccounts( response.accounts );
- dispatch( STORE_NAME ).receiveProperties( response.properties );
- dispatch( STORE_NAME ).receiveProfiles( response.profiles );
+
+ if ( response.properties.length && response.properties[ 0 ] && response.properties[ 0 ].accountId ) {
+ const accountID = response.properties[ 0 ].accountId;
+ dispatch( STORE_NAME ).receiveProperties( response.properties, { accountID } );
+ }
+
+ if ( response.properties.length && response.properties[ 0 ] && response.properties[ 0 ].id ) {
+ const propertyID = response.properties[ 0 ].id;
+ dispatch( STORE_NAME ).receiveProfiles( response.profiles, { propertyID } );
+ }
if ( response.matchedProperty ) {
dispatch( STORE_NAME ).receiveMatchedProperty( response.matchedProperty ); | 1 | /**
* modules/analytics data store: accounts.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import invariant from 'invariant';
/**
* Internal dependencies
*/
import API from 'googlesitekit-api';
import Data from 'googlesitekit-data';
import { isValidAccountSelection } from '../util';
import { STORE_NAME, ACCOUNT_CREATE, PROPERTY_CREATE } from './constants';
import { actions as tagActions } from './tags';
// Actions
const FETCH_ACCOUNTS_PROPERTIES_PROFILES = 'FETCH_ACCOUNTS_PROPERTIES_PROFILES';
const START_FETCH_ACCOUNTS_PROPERTIES_PROFILES = 'START_FETCH_ACCOUNTS_PROPERTIES_PROFILES';
const FINISH_FETCH_ACCOUNTS_PROPERTIES_PROFILES = 'FINISH_FETCH_ACCOUNTS_PROPERTIES_PROFILES';
const CATCH_FETCH_ACCOUNTS_PROPERTIES_PROFILES = 'CATCH_FETCH_ACCOUNTS_PROPERTIES_PROFILES';
const RECEIVE_ACCOUNTS = 'RECEIVE_ACCOUNTS';
const RESET_ACCOUNTS = 'RESET_ACCOUNTS';
export const INITIAL_STATE = {
accounts: undefined,
isFetchingAccountsPropertiesProfiles: false,
};
export const actions = {
*fetchAccountsPropertiesProfiles( data ) {
let response, error;
yield {
payload: { data },
type: START_FETCH_ACCOUNTS_PROPERTIES_PROFILES,
};
try {
response = yield {
payload: { data },
type: FETCH_ACCOUNTS_PROPERTIES_PROFILES,
};
const { dispatch } = yield Data.commonActions.getRegistry();
yield actions.receiveAccounts( response.accounts );
dispatch( STORE_NAME ).receiveProperties( response.properties );
dispatch( STORE_NAME ).receiveProfiles( response.profiles );
if ( response.matchedProperty ) {
dispatch( STORE_NAME ).receiveMatchedProperty( response.matchedProperty );
}
yield {
payload: { data },
type: FINISH_FETCH_ACCOUNTS_PROPERTIES_PROFILES,
};
} catch ( e ) {
error = e;
yield {
payload: {
data,
error,
},
type: CATCH_FETCH_ACCOUNTS_PROPERTIES_PROFILES,
};
}
return { response, error };
},
/**
* Creates an action for receiving accounts.
*
* @since n.e.x.t
* @private
*
* @param {Array} accounts Accounts to receive.
* @return {Object} action object.
*/
receiveAccounts( accounts ) {
invariant( Array.isArray( accounts ), 'accounts must be an array.' );
return {
payload: { accounts },
type: RECEIVE_ACCOUNTS,
};
},
*resetAccounts() {
const registry = yield Data.commonActions.getRegistry();
yield { type: RESET_ACCOUNTS };
return registry.stores[ STORE_NAME ].getActions()
.invalidateResolutionForStoreSelector( 'getAccounts' );
},
*selectAccount( accountID ) {
invariant( isValidAccountSelection( accountID ), 'A valid accountID is required to select.' );
const registry = yield Data.commonActions.getRegistry();
registry.dispatch( STORE_NAME ).setAccountID( accountID );
registry.dispatch( STORE_NAME ).setPropertyID( '' );
registry.dispatch( STORE_NAME ).setInternalWebPropertyID( '' );
registry.dispatch( STORE_NAME ).setProfileID( '' );
if ( ACCOUNT_CREATE === accountID ) {
return;
}
// Trigger cascading selections.
const properties = registry.select( STORE_NAME ).getProperties( accountID );
if ( properties === undefined ) {
return; // Selection will happen in resolver.
}
const property = properties[ 0 ] || { id: PROPERTY_CREATE };
registry.dispatch( STORE_NAME ).selectProperty( property.id );
},
};
export const controls = {
[ FETCH_ACCOUNTS_PROPERTIES_PROFILES ]: ( { payload } ) => {
return API.get( 'modules', 'analytics', 'accounts-properties-profiles', payload.data, {
useCache: false,
} );
},
};
export const reducer = ( state, { type, payload } ) => {
switch ( type ) {
case START_FETCH_ACCOUNTS_PROPERTIES_PROFILES: {
return {
...state,
isFetchingAccountsPropertiesProfiles: true,
};
}
case RECEIVE_ACCOUNTS: {
const { accounts } = payload;
return {
...state,
accounts: [ ...accounts ],
};
}
case FINISH_FETCH_ACCOUNTS_PROPERTIES_PROFILES: {
return {
...state,
isFetchingAccountsPropertiesProfiles: false,
};
}
case CATCH_FETCH_ACCOUNTS_PROPERTIES_PROFILES: {
const { error } = payload;
return {
...state,
error,
isFetchingAccountsPropertiesProfiles: false,
};
}
case RESET_ACCOUNTS: {
return {
...state,
accounts: undefined,
settings: {
...state.settings,
accountID: undefined,
propertyID: undefined,
internalWebPropertyID: undefined,
profileID: undefined,
},
};
}
default: {
return { ...state };
}
}
};
export const resolvers = {
*getAccounts() {
const registry = yield Data.commonActions.getRegistry();
const existingAccounts = registry.select( STORE_NAME ).getAccounts();
let matchedProperty = registry.select( STORE_NAME ).getMatchedProperty();
// Only fetch accounts if there are none in the store.
if ( ! existingAccounts ) {
yield tagActions.waitForExistingTag();
const existingTag = registry.select( STORE_NAME ).getExistingTag();
const { response } = yield actions.fetchAccountsPropertiesProfiles( {
existingPropertyID: existingTag,
} );
if ( response ) {
( { matchedProperty } = response );
}
}
const accountID = registry.select( STORE_NAME ).getAccountID();
// Pre-select values from the matched property if no account is selected.
if ( matchedProperty && ! accountID ) {
registry.dispatch( STORE_NAME ).setAccountID( matchedProperty.accountId ); // Capitalization rule exception: accountId
registry.dispatch( STORE_NAME ).selectProperty( matchedProperty.id, matchedProperty.internalWebPropertyId ); // Capitalization rule exception: internalWebPropertyId
}
},
};
export const selectors = {
/**
* Gets all Google Analytics accounts this user can access.
*
* Returns an array of all analytics accounts.
*
* Returns `undefined` if accounts have not yet loaded.
*
* @since n.e.x.t
*
* @param {Object} state Data store's state.
* @return {?Array.<Object>} An array of Analytics accounts; `undefined` if not loaded.
*/
getAccounts( state ) {
const { accounts } = state;
return accounts;
},
/**
* Gets an error encountered by this store or its side effects.
*
* Returns an object with the shape when there is an error:
* ```
* {
* code,
* message,
* }
* ```
*
* Returns `null` if there was no error.
*
* Marked as private, because in the future we'll have more robust error
* handling.
*
* @since n.e.x.t
* @private
*
* @param {Object} state Data store's state.
* @return {?Object} Any error encountered with requests in state.
*/
getError( state ) {
const { error } = state;
return error || null;
},
/**
* Checks whether accounts are currently being fetched.
*
* @since n.e.x.t
* @private
*
* @param {Object} state Data store's state.
* @return {boolean} Whether accounts are currently being fetched or not.
*/
isDoingGetAccounts( state ) {
return !! state.isFetchingAccountsPropertiesProfiles;
},
};
export default {
INITIAL_STATE,
actions,
controls,
reducer,
resolvers,
selectors,
};
| 1 | 28,518 | I'd suggest we use `response.profiles[ 0 ].webPropertyId` here instead, to make sure it's based on the profiles retrieved. The value _should_ be the same, but I think it's a bit more accurate like that. | google-site-kit-wp | js |
@@ -314,8 +314,8 @@ public class OverviewFragment extends Fragment implements View.OnClickListener,
bgGraph.setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
- rangeToDisplay += 6;
- rangeToDisplay = rangeToDisplay > 24 ? 6 : rangeToDisplay;
+ rangeToDisplay += 3;
+ rangeToDisplay = rangeToDisplay > 24 ? 3 : rangeToDisplay;
SP.putInt(R.string.key_rangetodisplay, rangeToDisplay);
updateGUI("rangeChange");
return false; | 1 | package info.nightscout.androidaps.plugins.Overview;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.NotificationManager;
import android.content.ActivityNotFoundException;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.graphics.Color;
import android.graphics.Paint;
import android.os.Bundle;
import android.os.Handler;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentActivity;
import android.support.v4.app.FragmentManager;
import android.support.v4.content.res.ResourcesCompat;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.PopupMenu;
import android.support.v7.widget.RecyclerView;
import android.text.SpannableString;
import android.text.style.ForegroundColorSpan;
import android.util.DisplayMetrics;
import android.util.TypedValue;
import android.view.ContextMenu;
import android.view.HapticFeedbackConstants;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.ImageButton;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.crashlytics.android.answers.CustomEvent;
import com.jjoe64.graphview.GraphView;
import com.squareup.otto.Subscribe;
import org.json.JSONException;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.DecimalFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import info.nightscout.androidaps.Config;
import info.nightscout.androidaps.Constants;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.data.DetailedBolusInfo;
import info.nightscout.androidaps.data.GlucoseStatus;
import info.nightscout.androidaps.data.IobTotal;
import info.nightscout.androidaps.data.Profile;
import info.nightscout.androidaps.data.QuickWizardEntry;
import info.nightscout.androidaps.db.BgReading;
import info.nightscout.androidaps.db.CareportalEvent;
import info.nightscout.androidaps.db.DatabaseHelper;
import info.nightscout.androidaps.db.ExtendedBolus;
import info.nightscout.androidaps.db.Source;
import info.nightscout.androidaps.db.TempTarget;
import info.nightscout.androidaps.db.TemporaryBasal;
import info.nightscout.androidaps.events.EventCareportalEventChange;
import info.nightscout.androidaps.events.EventExtendedBolusChange;
import info.nightscout.androidaps.events.EventInitializationChanged;
import info.nightscout.androidaps.events.EventPreferenceChange;
import info.nightscout.androidaps.events.EventProfileSwitchChange;
import info.nightscout.androidaps.events.EventPumpStatusChanged;
import info.nightscout.androidaps.events.EventRefreshOverview;
import info.nightscout.androidaps.events.EventTempBasalChange;
import info.nightscout.androidaps.events.EventTempTargetChange;
import info.nightscout.androidaps.events.EventTreatmentChange;
import info.nightscout.androidaps.interfaces.Constraint;
import info.nightscout.androidaps.interfaces.PluginType;
import info.nightscout.androidaps.interfaces.PumpDescription;
import info.nightscout.androidaps.interfaces.PumpInterface;
import info.nightscout.androidaps.plugins.Careportal.CareportalFragment;
import info.nightscout.androidaps.plugins.Careportal.Dialogs.NewNSTreatmentDialog;
import info.nightscout.androidaps.plugins.Careportal.OptionsToShow;
import info.nightscout.androidaps.plugins.ConfigBuilder.ConfigBuilderPlugin;
import info.nightscout.androidaps.plugins.ConstraintsObjectives.ObjectivesPlugin;
import info.nightscout.androidaps.plugins.IobCobCalculator.CobInfo;
import info.nightscout.androidaps.plugins.IobCobCalculator.IobCobCalculatorPlugin;
import info.nightscout.androidaps.plugins.IobCobCalculator.events.EventAutosensCalculationFinished;
import info.nightscout.androidaps.plugins.IobCobCalculator.events.EventIobCalculationProgress;
import info.nightscout.androidaps.plugins.Loop.LoopPlugin;
import info.nightscout.androidaps.plugins.Loop.events.EventNewOpenLoopNotification;
import info.nightscout.androidaps.plugins.NSClientInternal.data.NSDeviceStatus;
import info.nightscout.androidaps.plugins.Overview.Dialogs.CalibrationDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.ErrorHelperActivity;
import info.nightscout.androidaps.plugins.Overview.Dialogs.NewCarbsDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.NewInsulinDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.NewTreatmentDialog;
import info.nightscout.androidaps.plugins.Overview.Dialogs.WizardDialog;
import info.nightscout.androidaps.plugins.Overview.activities.QuickWizardListActivity;
import info.nightscout.androidaps.plugins.Overview.events.EventSetWakeLock;
import info.nightscout.androidaps.plugins.Overview.graphData.GraphData;
import info.nightscout.androidaps.plugins.Overview.notifications.NotificationRecyclerViewAdapter;
import info.nightscout.androidaps.plugins.Overview.notifications.NotificationStore;
import info.nightscout.androidaps.plugins.Source.SourceDexcomG5Plugin;
import info.nightscout.androidaps.plugins.Source.SourceXdripPlugin;
import info.nightscout.androidaps.plugins.Treatments.TreatmentsPlugin;
import info.nightscout.androidaps.plugins.Treatments.fragments.ProfileViewerDialog;
import info.nightscout.androidaps.queue.Callback;
import info.nightscout.utils.BolusWizard;
import info.nightscout.utils.DateUtil;
import info.nightscout.utils.DecimalFormatter;
import info.nightscout.utils.FabricPrivacy;
import info.nightscout.utils.NSUpload;
import info.nightscout.utils.OKDialog;
import info.nightscout.utils.Profiler;
import info.nightscout.utils.SP;
import info.nightscout.utils.SingleClickButton;
import info.nightscout.utils.ToastUtils;
public class OverviewFragment extends Fragment implements View.OnClickListener, View.OnLongClickListener {
private static Logger log = LoggerFactory.getLogger(OverviewFragment.class);
TextView timeView;
TextView bgView;
TextView arrowView;
TextView timeAgoView;
TextView deltaView;
TextView avgdeltaView;
TextView baseBasalView;
TextView extendedBolusView;
TextView activeProfileView;
TextView iobView;
TextView cobView;
TextView apsModeView;
TextView tempTargetView;
TextView pumpStatusView;
TextView pumpDeviceStatusView;
TextView openapsDeviceStatusView;
TextView uploaderDeviceStatusView;
TextView iobCalculationProgressView;
LinearLayout loopStatusLayout;
LinearLayout pumpStatusLayout;
GraphView bgGraph;
GraphView iobGraph;
ImageButton chartButton;
TextView iage;
TextView cage;
TextView sage;
TextView pbage;
RecyclerView notificationsView;
LinearLayoutManager llm;
LinearLayout acceptTempLayout;
SingleClickButton acceptTempButton;
SingleClickButton treatmentButton;
SingleClickButton wizardButton;
SingleClickButton calibrationButton;
SingleClickButton insulinButton;
SingleClickButton carbsButton;
SingleClickButton cgmButton;
SingleClickButton quickWizardButton;
CheckBox lockScreen;
boolean smallWidth;
boolean smallHeight;
public static boolean shorttextmode = false;
private boolean accepted;
private int rangeToDisplay = 6; // for graph
Handler sLoopHandler = new Handler();
Runnable sRefreshLoop = null;
final Object updateSync = new Object();
public enum CHARTTYPE {PRE, BAS, IOB, COB, DEV, SEN, DEVSLOPE}
private static final ScheduledExecutorService worker = Executors.newSingleThreadScheduledExecutor();
private static ScheduledFuture<?> scheduledUpdate = null;
public OverviewFragment() {
super();
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
try {
//check screen width
final DisplayMetrics dm = new DisplayMetrics();
getActivity().getWindowManager().getDefaultDisplay().getMetrics(dm);
int screen_width = dm.widthPixels;
int screen_height = dm.heightPixels;
smallWidth = screen_width <= Constants.SMALL_WIDTH;
smallHeight = screen_height <= Constants.SMALL_HEIGHT;
boolean landscape = screen_height < screen_width;
View view;
if (MainApp.sResources.getBoolean(R.bool.isTablet) && (Config.NSCLIENT || Config.G5UPLOADER)) {
view = inflater.inflate(R.layout.overview_fragment_nsclient_tablet, container, false);
} else if (Config.NSCLIENT || Config.G5UPLOADER) {
view = inflater.inflate(R.layout.overview_fragment_nsclient, container, false);
shorttextmode = true;
} else if (smallHeight || landscape) {
view = inflater.inflate(R.layout.overview_fragment_smallheight, container, false);
} else {
view = inflater.inflate(R.layout.overview_fragment, container, false);
}
timeView = (TextView) view.findViewById(R.id.overview_time);
bgView = (TextView) view.findViewById(R.id.overview_bg);
arrowView = (TextView) view.findViewById(R.id.overview_arrow);
if (smallWidth) {
arrowView.setTextSize(TypedValue.COMPLEX_UNIT_DIP, 35);
}
timeAgoView = (TextView) view.findViewById(R.id.overview_timeago);
deltaView = (TextView) view.findViewById(R.id.overview_delta);
avgdeltaView = (TextView) view.findViewById(R.id.overview_avgdelta);
baseBasalView = (TextView) view.findViewById(R.id.overview_basebasal);
extendedBolusView = (TextView) view.findViewById(R.id.overview_extendedbolus);
activeProfileView = (TextView) view.findViewById(R.id.overview_activeprofile);
pumpStatusView = (TextView) view.findViewById(R.id.overview_pumpstatus);
pumpDeviceStatusView = (TextView) view.findViewById(R.id.overview_pump);
openapsDeviceStatusView = (TextView) view.findViewById(R.id.overview_openaps);
uploaderDeviceStatusView = (TextView) view.findViewById(R.id.overview_uploader);
iobCalculationProgressView = (TextView) view.findViewById(R.id.overview_iobcalculationprogess);
loopStatusLayout = (LinearLayout) view.findViewById(R.id.overview_looplayout);
pumpStatusLayout = (LinearLayout) view.findViewById(R.id.overview_pumpstatuslayout);
pumpStatusView.setBackgroundColor(MainApp.sResources.getColor(R.color.colorInitializingBorder));
iobView = (TextView) view.findViewById(R.id.overview_iob);
cobView = (TextView) view.findViewById(R.id.overview_cob);
apsModeView = (TextView) view.findViewById(R.id.overview_apsmode);
tempTargetView = (TextView) view.findViewById(R.id.overview_temptarget);
iage = (TextView) view.findViewById(R.id.careportal_insulinage);
cage = (TextView) view.findViewById(R.id.careportal_canulaage);
sage = (TextView) view.findViewById(R.id.careportal_sensorage);
pbage = (TextView) view.findViewById(R.id.careportal_pbage);
bgGraph = (GraphView) view.findViewById(R.id.overview_bggraph);
iobGraph = (GraphView) view.findViewById(R.id.overview_iobgraph);
treatmentButton = (SingleClickButton) view.findViewById(R.id.overview_treatmentbutton);
treatmentButton.setOnClickListener(this);
wizardButton = (SingleClickButton) view.findViewById(R.id.overview_wizardbutton);
wizardButton.setOnClickListener(this);
insulinButton = (SingleClickButton) view.findViewById(R.id.overview_insulinbutton);
if (insulinButton != null)
insulinButton.setOnClickListener(this);
carbsButton = (SingleClickButton) view.findViewById(R.id.overview_carbsbutton);
if (carbsButton != null)
carbsButton.setOnClickListener(this);
acceptTempButton = (SingleClickButton) view.findViewById(R.id.overview_accepttempbutton);
if (acceptTempButton != null)
acceptTempButton.setOnClickListener(this);
quickWizardButton = (SingleClickButton) view.findViewById(R.id.overview_quickwizardbutton);
quickWizardButton.setOnClickListener(this);
quickWizardButton.setOnLongClickListener(this);
calibrationButton = (SingleClickButton) view.findViewById(R.id.overview_calibrationbutton);
if (calibrationButton != null)
calibrationButton.setOnClickListener(this);
cgmButton = (SingleClickButton) view.findViewById(R.id.overview_cgmbutton);
if (cgmButton != null)
cgmButton.setOnClickListener(this);
acceptTempLayout = (LinearLayout) view.findViewById(R.id.overview_accepttemplayout);
notificationsView = (RecyclerView) view.findViewById(R.id.overview_notifications);
notificationsView.setHasFixedSize(true);
llm = new LinearLayoutManager(view.getContext());
notificationsView.setLayoutManager(llm);
int axisWidth = 50;
if (dm.densityDpi <= 120)
axisWidth = 3;
else if (dm.densityDpi <= 160)
axisWidth = 10;
else if (dm.densityDpi <= 320)
axisWidth = 35;
else if (dm.densityDpi <= 420)
axisWidth = 50;
else if (dm.densityDpi <= 560)
axisWidth = 70;
else
axisWidth = 80;
bgGraph.getGridLabelRenderer().setGridColor(MainApp.sResources.getColor(R.color.graphgrid));
bgGraph.getGridLabelRenderer().reloadStyles();
iobGraph.getGridLabelRenderer().setGridColor(MainApp.sResources.getColor(R.color.graphgrid));
iobGraph.getGridLabelRenderer().reloadStyles();
iobGraph.getGridLabelRenderer().setHorizontalLabelsVisible(false);
bgGraph.getGridLabelRenderer().setLabelVerticalWidth(axisWidth);
iobGraph.getGridLabelRenderer().setLabelVerticalWidth(axisWidth);
iobGraph.getGridLabelRenderer().setNumVerticalLabels(5);
rangeToDisplay = SP.getInt(R.string.key_rangetodisplay, 6);
bgGraph.setOnLongClickListener(new View.OnLongClickListener() {
@Override
public boolean onLongClick(View v) {
rangeToDisplay += 6;
rangeToDisplay = rangeToDisplay > 24 ? 6 : rangeToDisplay;
SP.putInt(R.string.key_rangetodisplay, rangeToDisplay);
updateGUI("rangeChange");
return false;
}
});
setupChartMenu(view);
lockScreen = (CheckBox) view.findViewById(R.id.overview_lockscreen);
if (lockScreen != null) {
lockScreen.setChecked(SP.getBoolean("lockscreen", false));
lockScreen.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
SP.putBoolean("lockscreen", isChecked);
MainApp.bus().post(new EventSetWakeLock(isChecked));
}
});
}
return view;
} catch (Exception e) {
FabricPrivacy.logException(e);
log.debug("Runtime Exception", e);
}
return null;
}
private void setupChartMenu(View view) {
chartButton = (ImageButton) view.findViewById(R.id.overview_chartMenuButton);
chartButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun;
final boolean predictionsAvailable = finalLastRun != null && finalLastRun.request.hasPredictions;
MenuItem item;
CharSequence title;
SpannableString s;
PopupMenu popup = new PopupMenu(v.getContext(), v);
if (predictionsAvailable) {
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.PRE.ordinal(), Menu.NONE, "Predictions");
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.prediction, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showprediction", true));
}
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.BAS.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_basals));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.basal, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showbasals", true));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.IOB.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_iob));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.iob, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showiob", true));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.COB.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_cob));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.cob, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showcob", true));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.DEV.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_deviations));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.deviations, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showdeviations", false));
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.SEN.ordinal(), Menu.NONE, MainApp.gs(R.string.overview_show_sensitivity));
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.ratio, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showratios", false));
if (MainApp.devBranch) {
item = popup.getMenu().add(Menu.NONE, CHARTTYPE.DEVSLOPE.ordinal(), Menu.NONE, "Deviation slope");
title = item.getTitle();
s = new SpannableString(title);
s.setSpan(new ForegroundColorSpan(ResourcesCompat.getColor(getResources(), R.color.devslopepos, null)), 0, s.length(), 0);
item.setTitle(s);
item.setCheckable(true);
item.setChecked(SP.getBoolean("showdevslope", false));
}
popup.setOnMenuItemClickListener(new PopupMenu.OnMenuItemClickListener() {
@Override
public boolean onMenuItemClick(MenuItem item) {
if (item.getItemId() == CHARTTYPE.PRE.ordinal()) {
SP.putBoolean("showprediction", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.BAS.ordinal()) {
SP.putBoolean("showbasals", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.IOB.ordinal()) {
SP.putBoolean("showiob", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.COB.ordinal()) {
SP.putBoolean("showcob", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.DEV.ordinal()) {
SP.putBoolean("showdeviations", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.SEN.ordinal()) {
SP.putBoolean("showratios", !item.isChecked());
} else if (item.getItemId() == CHARTTYPE.DEVSLOPE.ordinal()) {
SP.putBoolean("showdevslope", !item.isChecked());
}
scheduleUpdateGUI("onGraphCheckboxesCheckedChanged");
return true;
}
});
chartButton.setImageResource(R.drawable.ic_arrow_drop_up_white_24dp);
popup.setOnDismissListener(new PopupMenu.OnDismissListener() {
@Override
public void onDismiss(PopupMenu menu) {
chartButton.setImageResource(R.drawable.ic_arrow_drop_down_white_24dp);
}
});
popup.show();
}
});
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) {
super.onCreateContextMenu(menu, v, menuInfo);
if (v == apsModeView) {
final LoopPlugin loopPlugin = LoopPlugin.getPlugin();
final PumpDescription pumpDescription = ConfigBuilderPlugin.getActivePump().getPumpDescription();
if (loopPlugin == null || !MainApp.getConfigBuilder().isProfileValid("ContexMenuCreation"))
return;
menu.setHeaderTitle(MainApp.gs(R.string.loop));
if (loopPlugin.isEnabled(PluginType.LOOP)) {
menu.add(MainApp.gs(R.string.disableloop));
if (!loopPlugin.isSuspended()) {
menu.add(MainApp.gs(R.string.suspendloopfor1h));
menu.add(MainApp.gs(R.string.suspendloopfor2h));
menu.add(MainApp.gs(R.string.suspendloopfor3h));
menu.add(MainApp.gs(R.string.suspendloopfor10h));
if (pumpDescription.tempDurationStep15mAllowed)
menu.add(MainApp.gs(R.string.disconnectpumpfor15m));
if (pumpDescription.tempDurationStep30mAllowed)
menu.add(MainApp.gs(R.string.disconnectpumpfor30m));
menu.add(MainApp.gs(R.string.disconnectpumpfor1h));
menu.add(MainApp.gs(R.string.disconnectpumpfor2h));
menu.add(MainApp.gs(R.string.disconnectpumpfor3h));
} else {
menu.add(MainApp.gs(R.string.resume));
}
}
if (!loopPlugin.isEnabled(PluginType.LOOP))
menu.add(MainApp.gs(R.string.enableloop));
} else if (v == activeProfileView) {
menu.setHeaderTitle(MainApp.gs(R.string.profile));
menu.add(MainApp.gs(R.string.danar_viewprofile));
if (MainApp.getConfigBuilder().getActiveProfileInterface().getProfile() != null) {
menu.add(MainApp.gs(R.string.careportal_profileswitch));
}
}
}
@Override
public boolean onContextItemSelected(MenuItem item) {
final Profile profile = MainApp.getConfigBuilder().getProfile();
if (profile == null)
return true;
final LoopPlugin loopPlugin = LoopPlugin.getPlugin();
if (item.getTitle().equals(MainApp.gs(R.string.disableloop))) {
loopPlugin.setPluginEnabled(PluginType.LOOP, false);
loopPlugin.setFragmentVisible(PluginType.LOOP, false);
MainApp.getConfigBuilder().storeSettings("DisablingLoop");
updateGUI("suspendmenu");
ConfigBuilderPlugin.getCommandQueue().cancelTempBasal(true, new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.tempbasaldeliveryerror));
}
}
});
NSUpload.uploadOpenAPSOffline(24 * 60); // upload 24h, we don't know real duration
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.enableloop))) {
loopPlugin.setPluginEnabled(PluginType.LOOP, true);
loopPlugin.setFragmentVisible(PluginType.LOOP, true);
MainApp.getConfigBuilder().storeSettings("EnablingLoop");
updateGUI("suspendmenu");
NSUpload.uploadOpenAPSOffline(0);
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.resume))) {
loopPlugin.suspendTo(0L);
updateGUI("suspendmenu");
ConfigBuilderPlugin.getCommandQueue().cancelTempBasal(true, new Callback() {
@Override
public void run() {
if (!result.success) {
ToastUtils.showToastInUiThread(MainApp.instance().getApplicationContext(), MainApp.gs(R.string.tempbasaldeliveryerror));
}
}
});
NSUpload.uploadOpenAPSOffline(0);
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor1h))) {
MainApp.getConfigBuilder().suspendLoop(60);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor2h))) {
MainApp.getConfigBuilder().suspendLoop(120);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor3h))) {
MainApp.getConfigBuilder().suspendLoop(180);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.suspendloopfor10h))) {
MainApp.getConfigBuilder().suspendLoop(600);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor15m))) {
MainApp.getConfigBuilder().disconnectPump(15, profile);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor30m))) {
MainApp.getConfigBuilder().disconnectPump(30, profile);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor1h))) {
MainApp.getConfigBuilder().disconnectPump(60, profile);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor2h))) {
MainApp.getConfigBuilder().disconnectPump(120, profile);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.disconnectpumpfor3h))) {
MainApp.getConfigBuilder().disconnectPump(180, profile);
updateGUI("suspendmenu");
return true;
} else if (item.getTitle().equals(MainApp.gs(R.string.careportal_profileswitch))) {
NewNSTreatmentDialog newDialog = new NewNSTreatmentDialog();
final OptionsToShow profileswitch = CareportalFragment.PROFILESWITCHDIRECT;
profileswitch.executeProfileSwitch = true;
newDialog.setOptions(profileswitch, R.string.careportal_profileswitch);
newDialog.show(getFragmentManager(), "NewNSTreatmentDialog");
} else if (item.getTitle().equals(MainApp.gs(R.string.danar_viewprofile))) {
ProfileViewerDialog pvd = ProfileViewerDialog.newInstance(System.currentTimeMillis());
FragmentManager manager = getFragmentManager();
pvd.show(manager, "ProfileViewDialog");
}
return super.onContextItemSelected(item);
}
@Override
public void onClick(View v) {
boolean xdrip = MainApp.getSpecificPlugin(SourceXdripPlugin.class) != null && MainApp.getSpecificPlugin(SourceXdripPlugin.class).isEnabled(PluginType.BGSOURCE);
boolean g5 = MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class) != null && MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class).isEnabled(PluginType.BGSOURCE);
String units = MainApp.getConfigBuilder().getProfileUnits();
FragmentManager manager = getFragmentManager();
switch (v.getId()) {
case R.id.overview_accepttempbutton:
onClickAcceptTemp();
break;
case R.id.overview_quickwizardbutton:
onClickQuickwizard();
break;
case R.id.overview_wizardbutton:
WizardDialog wizardDialog = new WizardDialog();
wizardDialog.show(manager, "WizardDialog");
break;
case R.id.overview_calibrationbutton:
if (xdrip) {
CalibrationDialog calibrationDialog = new CalibrationDialog();
calibrationDialog.show(manager, "CalibrationDialog");
} else if (g5) {
try {
Intent i = new Intent("com.dexcom.cgm.activities.MeterEntryActivity");
startActivity(i);
} catch (ActivityNotFoundException e) {
ToastUtils.showToastInUiThread(getActivity(), MainApp.gs(R.string.g5appnotdetected));
}
}
break;
case R.id.overview_cgmbutton:
if (xdrip)
openCgmApp("com.eveningoutpost.dexdrip");
else if (g5 && units.equals(Constants.MGDL))
openCgmApp("com.dexcom.cgm.region5.mgdl");
else if (g5 && units.equals(Constants.MMOL))
openCgmApp("com.dexcom.cgm.region5.mmol");
break;
case R.id.overview_treatmentbutton:
NewTreatmentDialog treatmentDialogFragment = new NewTreatmentDialog();
treatmentDialogFragment.show(manager, "TreatmentDialog");
break;
case R.id.overview_insulinbutton:
new NewInsulinDialog().show(manager, "InsulinDialog");
break;
case R.id.overview_carbsbutton:
new NewCarbsDialog().show(manager, "CarbsDialog");
break;
case R.id.overview_pumpstatus:
if (ConfigBuilderPlugin.getActivePump().isSuspended() || !ConfigBuilderPlugin.getActivePump().isInitialized())
ConfigBuilderPlugin.getCommandQueue().readStatus("RefreshClicked", null);
break;
}
}
public boolean openCgmApp(String packageName) {
PackageManager packageManager = getContext().getPackageManager();
try {
Intent intent = packageManager.getLaunchIntentForPackage(packageName);
if (intent == null) {
throw new ActivityNotFoundException();
}
intent.addCategory(Intent.CATEGORY_LAUNCHER);
getContext().startActivity(intent);
return true;
} catch (ActivityNotFoundException e) {
new AlertDialog.Builder(getContext())
.setMessage(R.string.error_starting_cgm)
.setPositiveButton("OK", null)
.show();
return false;
}
}
@Override
public boolean onLongClick(View v) {
switch (v.getId()) {
case R.id.overview_quickwizardbutton:
Intent i = new Intent(v.getContext(), QuickWizardListActivity.class);
startActivity(i);
return true;
}
return false;
}
private void onClickAcceptTemp() {
Profile profile = MainApp.getConfigBuilder().getProfile();
if (LoopPlugin.getPlugin().isEnabled(PluginType.LOOP) && profile != null) {
LoopPlugin.getPlugin().invoke("Accept temp button", false);
final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun;
if (finalLastRun != null && finalLastRun.lastAPSRun != null && finalLastRun.constraintsProcessed.isChangeRequested()) {
AlertDialog.Builder builder = new AlertDialog.Builder(getContext());
builder.setTitle(MainApp.gs(R.string.confirmation));
builder.setMessage(MainApp.gs(R.string.setbasalquestion) + "\n" + finalLastRun.constraintsProcessed);
builder.setPositiveButton(MainApp.gs(R.string.ok), (dialog, id) -> {
hideTempRecommendation();
clearNotification();
MainApp.getConfigBuilder().applyTBRRequest(finalLastRun.constraintsProcessed, profile, new Callback() {
@Override
public void run() {
if (result.enacted) {
finalLastRun.tbrSetByPump = result;
finalLastRun.lastEnact = new Date();
finalLastRun.lastOpenModeAccept = new Date();
NSUpload.uploadDeviceStatus();
ObjectivesPlugin objectivesPlugin = MainApp.getSpecificPlugin(ObjectivesPlugin.class);
if (objectivesPlugin != null) {
ObjectivesPlugin.manualEnacts++;
ObjectivesPlugin.saveProgress();
}
}
scheduleUpdateGUI("onClickAcceptTemp");
}
});
FabricPrivacy.getInstance().logCustom(new CustomEvent("AcceptTemp"));
});
builder.setNegativeButton(MainApp.gs(R.string.cancel), null);
builder.show();
}
}
}
void onClickQuickwizard() {
final BgReading actualBg = DatabaseHelper.actualBg();
final Profile profile = MainApp.getConfigBuilder().getProfile();
final TempTarget tempTarget = TreatmentsPlugin.getPlugin().getTempTargetFromHistory();
final QuickWizardEntry quickWizardEntry = OverviewPlugin.getPlugin().quickWizard.getActive();
if (quickWizardEntry != null && actualBg != null && profile != null) {
quickWizardButton.setVisibility(View.VISIBLE);
final BolusWizard wizard = quickWizardEntry.doCalc(profile, tempTarget, actualBg, true);
final JSONObject boluscalcJSON = new JSONObject();
try {
boluscalcJSON.put("eventTime", DateUtil.toISOString(new Date()));
boluscalcJSON.put("targetBGLow", wizard.targetBGLow);
boluscalcJSON.put("targetBGHigh", wizard.targetBGHigh);
boluscalcJSON.put("isf", wizard.sens);
boluscalcJSON.put("ic", wizard.ic);
boluscalcJSON.put("iob", -(wizard.insulingFromBolusIOB + wizard.insulingFromBasalsIOB));
boluscalcJSON.put("bolusiobused", true);
boluscalcJSON.put("basaliobused", true);
boluscalcJSON.put("bg", actualBg.valueToUnits(profile.getUnits()));
boluscalcJSON.put("insulinbg", wizard.insulinFromBG);
boluscalcJSON.put("insulinbgused", true);
boluscalcJSON.put("bgdiff", wizard.bgDiff);
boluscalcJSON.put("insulincarbs", wizard.insulinFromCarbs);
boluscalcJSON.put("carbs", quickWizardEntry.carbs());
boluscalcJSON.put("othercorrection", 0d);
boluscalcJSON.put("insulintrend", wizard.insulinFromTrend);
boluscalcJSON.put("insulin", wizard.calculatedTotalInsulin);
} catch (JSONException e) {
log.error("Unhandled exception", e);
}
if (wizard.calculatedTotalInsulin > 0d && quickWizardEntry.carbs() > 0d) {
DecimalFormat formatNumber2decimalplaces = new DecimalFormat("0.00");
String confirmMessage = MainApp.gs(R.string.entertreatmentquestion);
Double insulinAfterConstraints = MainApp.getConstraintChecker().applyBolusConstraints(new Constraint<>(wizard.calculatedTotalInsulin)).value();
Integer carbsAfterConstraints = MainApp.getConstraintChecker().applyCarbsConstraints(new Constraint<>(quickWizardEntry.carbs())).value();
confirmMessage += "\n" + MainApp.gs(R.string.bolus) + ": " + formatNumber2decimalplaces.format(insulinAfterConstraints) + "U";
confirmMessage += "\n" + MainApp.gs(R.string.carbs) + ": " + carbsAfterConstraints + "g";
if (!insulinAfterConstraints.equals(wizard.calculatedTotalInsulin) || !carbsAfterConstraints.equals(quickWizardEntry.carbs())) {
AlertDialog.Builder builder = new AlertDialog.Builder(getContext());
builder.setTitle(MainApp.gs(R.string.treatmentdeliveryerror));
builder.setMessage(MainApp.gs(R.string.constraints_violation) + "\n" + MainApp.gs(R.string.changeyourinput));
builder.setPositiveButton(MainApp.gs(R.string.ok), null);
builder.show();
return;
}
final Double finalInsulinAfterConstraints = insulinAfterConstraints;
final Integer finalCarbsAfterConstraints = carbsAfterConstraints;
final Context context = getContext();
final AlertDialog.Builder builder = new AlertDialog.Builder(context);
accepted = false;
builder.setTitle(MainApp.gs(R.string.confirmation));
builder.setMessage(confirmMessage);
builder.setPositiveButton(MainApp.gs(R.string.ok), (dialog, id) -> {
synchronized (builder) {
if (accepted) {
log.debug("guarding: already accepted");
return;
}
accepted = true;
if (finalInsulinAfterConstraints > 0 || finalCarbsAfterConstraints > 0) {
if (wizard.superBolus) {
final LoopPlugin loopPlugin = LoopPlugin.getPlugin();
if (loopPlugin.isEnabled(PluginType.LOOP)) {
loopPlugin.superBolusTo(System.currentTimeMillis() + 2 * 60L * 60 * 1000);
MainApp.bus().post(new EventRefreshOverview("WizardDialog"));
}
ConfigBuilderPlugin.getCommandQueue().tempBasalPercent(0, 120, true, profile, new Callback() {
@Override
public void run() {
if (!result.success) {
Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class);
i.putExtra("soundid", R.raw.boluserror);
i.putExtra("status", result.comment);
i.putExtra("title", MainApp.gs(R.string.tempbasaldeliveryerror));
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
MainApp.instance().startActivity(i);
}
}
});
}
DetailedBolusInfo detailedBolusInfo = new DetailedBolusInfo();
detailedBolusInfo.eventType = CareportalEvent.BOLUSWIZARD;
detailedBolusInfo.insulin = finalInsulinAfterConstraints;
detailedBolusInfo.carbs = finalCarbsAfterConstraints;
detailedBolusInfo.context = context;
detailedBolusInfo.boluscalc = boluscalcJSON;
detailedBolusInfo.source = Source.USER;
if (finalInsulinAfterConstraints > 0 || ConfigBuilderPlugin.getActivePump().getPumpDescription().storesCarbInfo) {
ConfigBuilderPlugin.getCommandQueue().bolus(detailedBolusInfo, new Callback() {
@Override
public void run() {
if (!result.success) {
Intent i = new Intent(MainApp.instance(), ErrorHelperActivity.class);
i.putExtra("soundid", R.raw.boluserror);
i.putExtra("status", result.comment);
i.putExtra("title", MainApp.gs(R.string.treatmentdeliveryerror));
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
MainApp.instance().startActivity(i);
}
}
});
} else {
TreatmentsPlugin.getPlugin().addToHistoryTreatment(detailedBolusInfo);
}
FabricPrivacy.getInstance().logCustom(new CustomEvent("QuickWizard"));
}
}
});
builder.setNegativeButton(MainApp.gs(R.string.cancel), null);
builder.show();
}
}
}
@Override
public void onPause() {
super.onPause();
MainApp.bus().unregister(this);
sLoopHandler.removeCallbacksAndMessages(null);
unregisterForContextMenu(apsModeView);
unregisterForContextMenu(activeProfileView);
}
@Override
public void onResume() {
super.onResume();
MainApp.bus().register(this);
sRefreshLoop = () -> {
scheduleUpdateGUI("refreshLoop");
sLoopHandler.postDelayed(sRefreshLoop, 60 * 1000L);
};
sLoopHandler.postDelayed(sRefreshLoop, 60 * 1000L);
registerForContextMenu(apsModeView);
registerForContextMenu(activeProfileView);
updateGUI("onResume");
}
@Subscribe
public void onStatusEvent(final EventInitializationChanged ev) {
scheduleUpdateGUI("EventInitializationChanged");
}
@Subscribe
public void onStatusEvent(final EventPreferenceChange ev) {
scheduleUpdateGUI("EventPreferenceChange");
}
@Subscribe
public void onStatusEvent(final EventRefreshOverview ev) {
scheduleUpdateGUI(ev.from);
}
@Subscribe
public void onStatusEvent(final EventAutosensCalculationFinished ev) {
scheduleUpdateGUI("EventAutosensCalculationFinished");
}
@Subscribe
public void onStatusEvent(final EventTreatmentChange ev) {
scheduleUpdateGUI("EventTreatmentChange");
}
@Subscribe
public void onStatusEvent(final EventCareportalEventChange ev) {
scheduleUpdateGUI("EventCareportalEventChange");
}
@Subscribe
public void onStatusEvent(final EventTempBasalChange ev) {
scheduleUpdateGUI("EventTempBasalChange");
}
@Subscribe
public void onStatusEvent(final EventExtendedBolusChange ev) {
scheduleUpdateGUI("EventExtendedBolusChange");
}
@Subscribe
public void onStatusEvent(final EventNewOpenLoopNotification ev) {
scheduleUpdateGUI("EventNewOpenLoopNotification");
}
@Subscribe
public void onStatusEvent(final EventTempTargetChange ev) {
scheduleUpdateGUI("EventTempTargetChange");
}
@Subscribe
public void onStatusEvent(final EventProfileSwitchChange ev) {
scheduleUpdateGUI("EventProfileSwitchChange");
}
@Subscribe
public void onStatusEvent(final EventPumpStatusChanged s) {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(() -> updatePumpStatus(s.textStatus()));
}
@Subscribe
public void onStatusEvent(final EventIobCalculationProgress e) {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(() -> {
if (iobCalculationProgressView != null)
iobCalculationProgressView.setText(e.progress);
});
}
private void hideTempRecommendation() {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(() -> {
if (acceptTempLayout != null)
acceptTempLayout.setVisibility(View.GONE);
});
}
private void clearNotification() {
NotificationManager notificationManager =
(NotificationManager) MainApp.instance().getSystemService(Context.NOTIFICATION_SERVICE);
notificationManager.cancel(Constants.notificationID);
}
private void updatePumpStatus(String status) {
if (!status.equals("")) {
pumpStatusView.setText(status);
pumpStatusLayout.setVisibility(View.VISIBLE);
loopStatusLayout.setVisibility(View.GONE);
} else {
pumpStatusLayout.setVisibility(View.GONE);
loopStatusLayout.setVisibility(View.VISIBLE);
}
}
public void scheduleUpdateGUI(final String from) {
class UpdateRunnable implements Runnable {
public void run() {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(() -> {
updateGUI(from);
scheduledUpdate = null;
});
}
}
// prepare task for execution in 400 msec
// cancel waiting task to prevent multiple updates
if (scheduledUpdate != null)
scheduledUpdate.cancel(false);
Runnable task = new UpdateRunnable();
final int msec = 500;
scheduledUpdate = worker.schedule(task, msec, TimeUnit.MILLISECONDS);
}
@SuppressLint("SetTextI18n")
public void updateGUI(final String from) {
log.debug("updateGUI entered from: " + from);
final Date updateGUIStart = new Date();
if (getActivity() == null)
return;
if (timeView != null) { //must not exists
timeView.setText(DateUtil.timeString(new Date()));
}
if (!MainApp.getConfigBuilder().isProfileValid("Overview")) {
pumpStatusView.setText(R.string.noprofileset);
pumpStatusLayout.setVisibility(View.VISIBLE);
loopStatusLayout.setVisibility(View.GONE);
return;
}
pumpStatusLayout.setVisibility(View.GONE);
loopStatusLayout.setVisibility(View.VISIBLE);
updateNotifications();
CareportalFragment.updateAge(getActivity(), sage, iage, cage, pbage);
BgReading actualBG = DatabaseHelper.actualBg();
BgReading lastBG = DatabaseHelper.lastBg();
final PumpInterface pump = ConfigBuilderPlugin.getActivePump();
final Profile profile = MainApp.getConfigBuilder().getProfile();
final String units = profile.getUnits();
final double lowLine = OverviewPlugin.getPlugin().determineLowLine(units);
final double highLine = OverviewPlugin.getPlugin().determineHighLine(units);
//Start with updating the BG as it is unaffected by loop.
// **** BG value ****
if (lastBG != null) {
int color = MainApp.sResources.getColor(R.color.inrange);
if (lastBG.valueToUnits(units) < lowLine)
color = MainApp.sResources.getColor(R.color.low);
else if (lastBG.valueToUnits(units) > highLine)
color = MainApp.sResources.getColor(R.color.high);
bgView.setText(lastBG.valueToUnitsToString(units));
arrowView.setText(lastBG.directionToSymbol());
bgView.setTextColor(color);
arrowView.setTextColor(color);
GlucoseStatus glucoseStatus = GlucoseStatus.getGlucoseStatusData();
if (glucoseStatus != null) {
deltaView.setText("Δ " + Profile.toUnitsString(glucoseStatus.delta, glucoseStatus.delta * Constants.MGDL_TO_MMOLL, units) + " " + units);
if (avgdeltaView != null)
avgdeltaView.setText("øΔ15m: " + Profile.toUnitsString(glucoseStatus.short_avgdelta, glucoseStatus.short_avgdelta * Constants.MGDL_TO_MMOLL, units) +
" øΔ40m: " + Profile.toUnitsString(glucoseStatus.long_avgdelta, glucoseStatus.long_avgdelta * Constants.MGDL_TO_MMOLL, units));
} else {
deltaView.setText("Δ " + MainApp.gs(R.string.notavailable));
if (avgdeltaView != null)
avgdeltaView.setText("");
}
}
Constraint<Boolean> closedLoopEnabled = MainApp.getConstraintChecker().isClosedLoopAllowed();
// open loop mode
final LoopPlugin.LastRun finalLastRun = LoopPlugin.lastRun;
if (Config.APS && pump.getPumpDescription().isTempBasalCapable) {
apsModeView.setVisibility(View.VISIBLE);
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.loopenabled));
apsModeView.setTextColor(Color.BLACK);
final LoopPlugin loopPlugin = LoopPlugin.getPlugin();
if (loopPlugin.isEnabled(PluginType.LOOP) && loopPlugin.isSuperBolus()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(String.format(MainApp.gs(R.string.loopsuperbolusfor), loopPlugin.minutesToEndOfSuspend()));
apsModeView.setTextColor(Color.WHITE);
} else if (loopPlugin.isEnabled(PluginType.LOOP) && loopPlugin.isDisconnected()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(String.format(MainApp.gs(R.string.loopdisconnectedfor), loopPlugin.minutesToEndOfSuspend()));
apsModeView.setTextColor(Color.WHITE);
} else if (loopPlugin.isEnabled(PluginType.LOOP) && loopPlugin.isSuspended()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(String.format(MainApp.gs(R.string.loopsuspendedfor), loopPlugin.minutesToEndOfSuspend()));
apsModeView.setTextColor(Color.WHITE);
} else if (pump.isSuspended()) {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.looppumpsuspended));
apsModeView.setText(MainApp.gs(R.string.pumpsuspended));
apsModeView.setTextColor(Color.WHITE);
} else if (loopPlugin.isEnabled(PluginType.LOOP)) {
if (closedLoopEnabled.value()) {
apsModeView.setText(MainApp.gs(R.string.closedloop));
} else {
apsModeView.setText(MainApp.gs(R.string.openloop));
}
} else {
apsModeView.setBackgroundColor(MainApp.sResources.getColor(R.color.loopdisabled));
apsModeView.setText(MainApp.gs(R.string.disabledloop));
apsModeView.setTextColor(Color.WHITE);
}
} else {
apsModeView.setVisibility(View.GONE);
}
// temp target
TempTarget tempTarget = TreatmentsPlugin.getPlugin().getTempTargetFromHistory();
if (tempTarget != null) {
tempTargetView.setTextColor(Color.BLACK);
tempTargetView.setBackgroundColor(MainApp.sResources.getColor(R.color.tempTargetBackground));
tempTargetView.setVisibility(View.VISIBLE);
tempTargetView.setText(Profile.toTargetRangeString(tempTarget.low, tempTarget.high, Constants.MGDL, units) + " " + DateUtil.untilString(tempTarget.end()));
} else {
tempTargetView.setTextColor(Color.WHITE);
tempTargetView.setBackgroundColor(MainApp.sResources.getColor(R.color.tempTargetDisabledBackground));
tempTargetView.setText(Profile.toTargetRangeString(profile.getTargetLow(), profile.getTargetHigh(), units, units));
tempTargetView.setVisibility(View.VISIBLE);
}
// **** Temp button ****
if (acceptTempLayout != null) {
boolean showAcceptButton = !closedLoopEnabled.value(); // Open mode needed
showAcceptButton = showAcceptButton && finalLastRun != null && finalLastRun.lastAPSRun != null; // aps result must exist
showAcceptButton = showAcceptButton && (finalLastRun.lastOpenModeAccept == null || finalLastRun.lastOpenModeAccept.getTime() < finalLastRun.lastAPSRun.getTime()); // never accepted or before last result
showAcceptButton = showAcceptButton && finalLastRun.constraintsProcessed.isChangeRequested(); // change is requested
if (showAcceptButton && pump.isInitialized() && !pump.isSuspended() && LoopPlugin.getPlugin().isEnabled(PluginType.LOOP)) {
acceptTempLayout.setVisibility(View.VISIBLE);
acceptTempButton.setText(MainApp.gs(R.string.setbasalquestion) + "\n" + finalLastRun.constraintsProcessed);
} else {
acceptTempLayout.setVisibility(View.GONE);
}
}
// **** Calibration & CGM buttons ****
boolean xDripIsBgSource = MainApp.getSpecificPlugin(SourceXdripPlugin.class) != null && MainApp.getSpecificPlugin(SourceXdripPlugin.class).isEnabled(PluginType.BGSOURCE);
boolean g5IsBgSource = MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class) != null && MainApp.getSpecificPlugin(SourceDexcomG5Plugin.class).isEnabled(PluginType.BGSOURCE);
boolean bgAvailable = DatabaseHelper.actualBg() != null;
if (calibrationButton != null) {
if ((xDripIsBgSource || g5IsBgSource) && bgAvailable && SP.getBoolean(R.string.key_show_calibration_button, true)) {
calibrationButton.setVisibility(View.VISIBLE);
} else {
calibrationButton.setVisibility(View.GONE);
}
}
if (cgmButton != null) {
if (xDripIsBgSource && SP.getBoolean(R.string.key_show_cgm_button, false)) {
cgmButton.setVisibility(View.VISIBLE);
} else if (g5IsBgSource && SP.getBoolean(R.string.key_show_cgm_button, false)) {
cgmButton.setVisibility(View.VISIBLE);
} else {
cgmButton.setVisibility(View.GONE);
}
}
final TemporaryBasal activeTemp = TreatmentsPlugin.getPlugin().getTempBasalFromHistory(System.currentTimeMillis());
String basalText = "";
if (shorttextmode) {
if (activeTemp != null) {
basalText = "T: " + activeTemp.toStringVeryShort();
} else {
basalText = DecimalFormatter.to2Decimal(profile.getBasal()) + "U/h";
}
baseBasalView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String fullText = MainApp.gs(R.string.pump_basebasalrate_label) + ": " + DecimalFormatter.to2Decimal(profile.getBasal()) + "U/h\n";
if (activeTemp != null) {
fullText += MainApp.gs(R.string.pump_tempbasal_label) + ": " + activeTemp.toStringFull();
}
OKDialog.show(getActivity(), MainApp.gs(R.string.basal), fullText, null);
}
});
} else {
if (activeTemp != null) {
basalText = activeTemp.toStringFull() + " ";
}
if (Config.NSCLIENT || Config.G5UPLOADER)
basalText += "(" + DecimalFormatter.to2Decimal(profile.getBasal()) + " U/h)";
else if (pump.getPumpDescription().isTempBasalCapable) {
basalText += "(" + DecimalFormatter.to2Decimal(pump.getBaseBasalRate()) + "U/h)";
}
}
if (activeTemp != null) {
baseBasalView.setTextColor(MainApp.sResources.getColor(R.color.basal));
} else {
baseBasalView.setTextColor(Color.WHITE);
}
baseBasalView.setText(basalText);
final ExtendedBolus extendedBolus = TreatmentsPlugin.getPlugin().getExtendedBolusFromHistory(System.currentTimeMillis());
String extendedBolusText = "";
if (extendedBolusView != null) { // must not exists in all layouts
if (shorttextmode) {
if (extendedBolus != null && !pump.isFakingTempsByExtendedBoluses()) {
extendedBolusText = DecimalFormatter.to2Decimal(extendedBolus.absoluteRate()) + "U/h";
}
extendedBolusView.setText(extendedBolusText);
extendedBolusView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
OKDialog.show(getActivity(), MainApp.gs(R.string.extendedbolus), extendedBolus.toString(), null);
}
});
} else {
if (extendedBolus != null && !pump.isFakingTempsByExtendedBoluses()) {
extendedBolusText = extendedBolus.toString();
}
extendedBolusView.setText(extendedBolusText);
}
if (extendedBolusText.equals(""))
extendedBolusView.setVisibility(View.GONE);
else
extendedBolusView.setVisibility(View.VISIBLE);
}
activeProfileView.setText(MainApp.getConfigBuilder().getProfileName());
activeProfileView.setBackgroundColor(Color.GRAY);
tempTargetView.setOnLongClickListener(view -> {
view.performHapticFeedback(HapticFeedbackConstants.LONG_PRESS);
NewNSTreatmentDialog newTTDialog = new NewNSTreatmentDialog();
final OptionsToShow temptarget = CareportalFragment.TEMPTARGET;
temptarget.executeTempTarget = true;
newTTDialog.setOptions(temptarget, R.string.careportal_temporarytarget);
newTTDialog.show(getFragmentManager(), "NewNSTreatmentDialog");
return true;
});
tempTargetView.setLongClickable(true);
// QuickWizard button
QuickWizardEntry quickWizardEntry = OverviewPlugin.getPlugin().quickWizard.getActive();
if (quickWizardEntry != null && lastBG != null && pump.isInitialized() && !pump.isSuspended()) {
quickWizardButton.setVisibility(View.VISIBLE);
String text = quickWizardEntry.buttonText() + "\n" + DecimalFormatter.to0Decimal(quickWizardEntry.carbs()) + "g";
BolusWizard wizard = quickWizardEntry.doCalc(profile, tempTarget, lastBG, false);
text += " " + DecimalFormatter.toPumpSupportedBolus(wizard.calculatedTotalInsulin) + "U";
quickWizardButton.setText(text);
if (wizard.calculatedTotalInsulin <= 0)
quickWizardButton.setVisibility(View.GONE);
} else
quickWizardButton.setVisibility(View.GONE);
// **** Various treatment buttons ****
if (carbsButton != null) {
if (SP.getBoolean(R.string.key_show_carbs_button, true)
&& (!ConfigBuilderPlugin.getActivePump().getPumpDescription().storesCarbInfo ||
(pump.isInitialized() && !pump.isSuspended()))) {
carbsButton.setVisibility(View.VISIBLE);
} else {
carbsButton.setVisibility(View.GONE);
}
}
if (pump.isInitialized() && !pump.isSuspended()) {
if (treatmentButton != null) {
if (SP.getBoolean(R.string.key_show_treatment_button, false)) {
treatmentButton.setVisibility(View.VISIBLE);
} else {
treatmentButton.setVisibility(View.GONE);
}
}
if (wizardButton != null) {
if (SP.getBoolean(R.string.key_show_wizard_button, true)) {
wizardButton.setVisibility(View.VISIBLE);
} else {
wizardButton.setVisibility(View.GONE);
}
}
if (insulinButton != null) {
if (SP.getBoolean(R.string.key_show_insulin_button, true)) {
insulinButton.setVisibility(View.VISIBLE);
} else {
insulinButton.setVisibility(View.GONE);
}
}
}
// **** BG value ****
if (lastBG == null) { //left this here as it seems you want to exit at this point if it is null...
return;
}
Integer flag = bgView.getPaintFlags();
if (actualBG == null) {
flag |= Paint.STRIKE_THRU_TEXT_FLAG;
} else
flag &= ~Paint.STRIKE_THRU_TEXT_FLAG;
bgView.setPaintFlags(flag);
timeAgoView.setText(DateUtil.minAgo(lastBG.date));
// iob
TreatmentsPlugin.getPlugin().updateTotalIOBTreatments();
TreatmentsPlugin.getPlugin().updateTotalIOBTempBasals();
final IobTotal bolusIob = TreatmentsPlugin.getPlugin().getLastCalculationTreatments().round();
final IobTotal basalIob = TreatmentsPlugin.getPlugin().getLastCalculationTempBasals().round();
if (shorttextmode) {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U";
iobView.setText(iobtext);
iobView.setOnClickListener(v -> {
String iobtext1 = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U\n"
+ MainApp.gs(R.string.bolus) + ": " + DecimalFormatter.to2Decimal(bolusIob.iob) + "U\n"
+ MainApp.gs(R.string.basal) + ": " + DecimalFormatter.to2Decimal(basalIob.basaliob) + "U\n";
OKDialog.show(getActivity(), MainApp.gs(R.string.iob), iobtext1, null);
});
} else if (MainApp.sResources.getBoolean(R.bool.isTablet)) {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U ("
+ MainApp.gs(R.string.bolus) + ": " + DecimalFormatter.to2Decimal(bolusIob.iob) + "U "
+ MainApp.gs(R.string.basal) + ": " + DecimalFormatter.to2Decimal(basalIob.basaliob) + "U)";
iobView.setText(iobtext);
} else {
String iobtext = DecimalFormatter.to2Decimal(bolusIob.iob + basalIob.basaliob) + "U ("
+ DecimalFormatter.to2Decimal(bolusIob.iob) + "/"
+ DecimalFormatter.to2Decimal(basalIob.basaliob) + ")";
iobView.setText(iobtext);
}
// cob
if (cobView != null) { // view must not exists
String cobText = MainApp.gs(R.string.value_unavailable_short);
CobInfo cobInfo = IobCobCalculatorPlugin.getPlugin().getCobInfo(false, "Overview COB");
if (cobInfo.displayCob != null) {
cobText = DecimalFormatter.to0Decimal(cobInfo.displayCob);
if (cobInfo.futureCarbs > 0)
cobText += "(" + DecimalFormatter.to0Decimal(cobInfo.futureCarbs) + ")";
}
cobView.setText(cobText);
}
final boolean predictionsAvailable = finalLastRun != null && finalLastRun.request.hasPredictions;
// pump status from ns
if (pumpDeviceStatusView != null) {
pumpDeviceStatusView.setText(NSDeviceStatus.getInstance().getPumpStatus());
pumpDeviceStatusView.setOnClickListener(v -> OKDialog.show(getActivity(), MainApp.gs(R.string.pump), NSDeviceStatus.getInstance().getExtendedPumpStatus(), null));
}
// OpenAPS status from ns
if (openapsDeviceStatusView != null) {
openapsDeviceStatusView.setText(NSDeviceStatus.getInstance().getOpenApsStatus());
openapsDeviceStatusView.setOnClickListener(v -> OKDialog.show(getActivity(), MainApp.gs(R.string.openaps), NSDeviceStatus.getInstance().getExtendedOpenApsStatus(), null));
}
// Uploader status from ns
if (uploaderDeviceStatusView != null) {
uploaderDeviceStatusView.setText(NSDeviceStatus.getInstance().getUploaderStatus());
uploaderDeviceStatusView.setOnClickListener(v -> OKDialog.show(getActivity(), MainApp.gs(R.string.uploader), NSDeviceStatus.getInstance().getExtendedUploaderStatus(), null));
}
// ****** GRAPH *******
new Thread(() -> {
// allign to hours
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(System.currentTimeMillis());
calendar.set(Calendar.MILLISECOND, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.add(Calendar.HOUR, 1);
int hoursToFetch;
final long toTime;
final long fromTime;
final long endTime;
if (predictionsAvailable && SP.getBoolean("showprediction", false)) {
int predHours = (int) (Math.ceil(finalLastRun.constraintsProcessed.getLatestPredictionsTime() - System.currentTimeMillis()) / (60 * 60 * 1000));
predHours = Math.min(2, predHours);
predHours = Math.max(0, predHours);
hoursToFetch = rangeToDisplay - predHours;
toTime = calendar.getTimeInMillis() + 100000; // little bit more to avoid wrong rounding - Graphview specific
fromTime = toTime - hoursToFetch * 60 * 60 * 1000L;
endTime = toTime + predHours * 60 * 60 * 1000L;
} else {
hoursToFetch = rangeToDisplay;
toTime = calendar.getTimeInMillis() + 100000; // little bit more to avoid wrong rounding - Graphview specific
fromTime = toTime - hoursToFetch * 60 * 60 * 1000L;
endTime = toTime;
}
final long now = System.currentTimeMillis();
// ------------------ 1st graph
Profiler.log(log, from + " - 1st graph - START", updateGUIStart);
final GraphData graphData = new GraphData(bgGraph, IobCobCalculatorPlugin.getPlugin());
// **** In range Area ****
graphData.addInRangeArea(fromTime, endTime, lowLine, highLine);
// **** BG ****
if (predictionsAvailable && SP.getBoolean("showprediction", false))
graphData.addBgReadings(fromTime, toTime, lowLine, highLine,
finalLastRun.constraintsProcessed.getPredictions());
else
graphData.addBgReadings(fromTime, toTime, lowLine, highLine, null);
// set manual x bounds to have nice steps
graphData.formatAxis(fromTime, endTime);
// Treatments
graphData.addTreatments(fromTime, endTime);
// add basal data
if (pump.getPumpDescription().isTempBasalCapable && SP.getBoolean("showbasals", true)) {
graphData.addBasals(fromTime, now, lowLine / graphData.maxY / 1.2d);
}
// add target line
graphData.addTargetLine(fromTime, toTime, profile);
// **** NOW line ****
graphData.addNowLine(now);
// ------------------ 2nd graph
Profiler.log(log, from + " - 2nd graph - START", updateGUIStart);
final GraphData secondGraphData = new GraphData(iobGraph, IobCobCalculatorPlugin.getPlugin());
boolean useIobForScale = false;
boolean useCobForScale = false;
boolean useDevForScale = false;
boolean useRatioForScale = false;
boolean useDSForScale = false;
if (SP.getBoolean("showiob", true)) {
useIobForScale = true;
} else if (SP.getBoolean("showcob", true)) {
useCobForScale = true;
} else if (SP.getBoolean("showdeviations", false)) {
useDevForScale = true;
} else if (SP.getBoolean("showratios", false)) {
useRatioForScale = true;
} else if (SP.getBoolean("showdevslope", false)) {
useDSForScale = true;
}
if (SP.getBoolean("showiob", true))
secondGraphData.addIob(fromTime, now, useIobForScale, 1d);
if (SP.getBoolean("showcob", true))
secondGraphData.addCob(fromTime, now, useCobForScale, useCobForScale ? 1d : 0.5d);
if (SP.getBoolean("showdeviations", false))
secondGraphData.addDeviations(fromTime, now, useDevForScale, 1d);
if (SP.getBoolean("showratios", false))
secondGraphData.addRatio(fromTime, now, useRatioForScale, 1d);
if (SP.getBoolean("showdevslope", false))
secondGraphData.addDeviationSlope(fromTime, now, useDSForScale, 1d);
// **** NOW line ****
// set manual x bounds to have nice steps
secondGraphData.formatAxis(fromTime, endTime);
secondGraphData.addNowLine(now);
// do GUI update
FragmentActivity activity = getActivity();
if (activity != null) {
activity.runOnUiThread(() -> {
if (SP.getBoolean("showiob", true)
|| SP.getBoolean("showcob", true)
|| SP.getBoolean("showdeviations", false)
|| SP.getBoolean("showratios", false)
|| SP.getBoolean("showdevslope", false)) {
iobGraph.setVisibility(View.VISIBLE);
} else {
iobGraph.setVisibility(View.GONE);
}
// finally enforce drawing of graphs
graphData.performUpdate();
secondGraphData.performUpdate();
Profiler.log(log, from + " - onDataChanged", updateGUIStart);
});
}
}).start();
Profiler.log(log, from, updateGUIStart);
}
//Notifications
void updateNotifications() {
NotificationStore nstore = OverviewPlugin.getPlugin().notificationStore;
nstore.removeExpired();
nstore.unSnooze();
if (nstore.store.size() > 0) {
NotificationRecyclerViewAdapter adapter = new NotificationRecyclerViewAdapter(nstore.store);
notificationsView.setAdapter(adapter);
notificationsView.setVisibility(View.VISIBLE);
} else {
notificationsView.setVisibility(View.GONE);
}
}
}
| 1 | 30,488 | This doesn't change the default of 6h, but will increase the displayed timeframe by 3h for each long press, before wrapping around to 3h. This requires 7 long-presses - quiet a journey ... | MilosKozak-AndroidAPS | java |
@@ -91,6 +91,7 @@ type nodeChainReader interface {
GetTipSetStateRoot(tsKey types.SortedCidSet) (cid.Cid, error)
HeadEvents() *ps.PubSub
Load(context.Context) error
+ SetHead(context.Context, types.TipSet) error
Stop()
}
| 1 | package node
import (
"context"
"encoding/json"
"fmt"
"os"
"sync"
"time"
ps "github.com/cskr/pubsub"
"github.com/ipfs/go-bitswap"
bsnet "github.com/ipfs/go-bitswap/network"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-hamt-ipld"
bstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-exchange-interface"
"github.com/ipfs/go-ipfs-exchange-offline"
offroute "github.com/ipfs/go-ipfs-routing/offline"
logging "github.com/ipfs/go-log"
"github.com/ipfs/go-merkledag"
"github.com/libp2p/go-libp2p"
autonatsvc "github.com/libp2p/go-libp2p-autonat-svc"
circuit "github.com/libp2p/go-libp2p-circuit"
"github.com/libp2p/go-libp2p-host"
"github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/opts"
p2pmetrics "github.com/libp2p/go-libp2p-metrics"
libp2ppeer "github.com/libp2p/go-libp2p-peer"
dhtprotocol "github.com/libp2p/go-libp2p-protocol"
libp2pps "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p-routing"
rhost "github.com/libp2p/go-libp2p/p2p/host/routed"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/actor/builtin"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/flags"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/mining"
"github.com/filecoin-project/go-filecoin/net"
"github.com/filecoin-project/go-filecoin/net/pubsub"
"github.com/filecoin-project/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/plumbing"
"github.com/filecoin-project/go-filecoin/plumbing/cfg"
"github.com/filecoin-project/go-filecoin/plumbing/cst"
"github.com/filecoin-project/go-filecoin/plumbing/dag"
"github.com/filecoin-project/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/plumbing/strgdls"
"github.com/filecoin-project/go-filecoin/porcelain"
"github.com/filecoin-project/go-filecoin/proofs"
"github.com/filecoin-project/go-filecoin/proofs/sectorbuilder"
"github.com/filecoin-project/go-filecoin/protocol/block"
"github.com/filecoin-project/go-filecoin/protocol/hello"
"github.com/filecoin-project/go-filecoin/protocol/retrieval"
"github.com/filecoin-project/go-filecoin/protocol/storage"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/sampling"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
vmerr "github.com/filecoin-project/go-filecoin/vm/errors"
"github.com/filecoin-project/go-filecoin/wallet"
)
const (
filecoinDHTProtocol dhtprotocol.ID = "/fil/kad/1.0.0"
)
var log = logging.Logger("node") // nolint: deadcode
var (
// ErrNoMinerAddress is returned when the node is not configured to have any miner addresses.
ErrNoMinerAddress = errors.New("no miner addresses configured")
)
type pubSubProcessorFunc func(ctx context.Context, msg pubsub.Message) error
type nodeChainReader interface {
GenesisCid() cid.Cid
GetBlock(context.Context, cid.Cid) (*types.Block, error)
GetHead() types.SortedCidSet
GetTipSet(types.SortedCidSet) (types.TipSet, error)
GetTipSetStateRoot(tsKey types.SortedCidSet) (cid.Cid, error)
HeadEvents() *ps.PubSub
Load(context.Context) error
Stop()
}
// Node represents a full Filecoin node.
type Node struct {
host host.Host
PeerHost host.Host
Consensus consensus.Protocol
ChainReader nodeChainReader
Syncer chain.Syncer
PowerTable consensus.PowerTableView
BlockMiningAPI *block.MiningAPI
PorcelainAPI *porcelain.API
RetrievalAPI *retrieval.API
StorageAPI *storage.API
// HeavyTipSetCh is a subscription to the heaviest tipset topic on the chain.
HeaviestTipSetCh chan interface{}
// HeavyTipSetHandled is a hook for tests because pubsub notifications
// arrive async. It's called after handling a new heaviest tipset.
// Remove this after replacing the tipset "pubsub" with a synchronous event bus:
// https://github.com/filecoin-project/go-filecoin/issues/2309
HeaviestTipSetHandled func()
// Incoming messages for block mining.
Inbox *core.Inbox
// Messages sent and not yet mined.
Outbox *core.Outbox
Wallet *wallet.Wallet
// Mining stuff.
AddNewlyMinedBlock newBlockFunc
blockTime time.Duration
cancelMining context.CancelFunc
MiningWorker mining.Worker
MiningScheduler mining.Scheduler
mining struct {
sync.Mutex
isMining bool
}
miningCtx context.Context
miningDoneWg *sync.WaitGroup
// Storage Market Interfaces
StorageMiner *storage.Miner
// Retrieval Interfaces
RetrievalMiner *retrieval.Miner
// Network Fields
BlockSub pubsub.Subscription
MessageSub pubsub.Subscription
HelloSvc *hello.Handler
Bootstrapper *net.Bootstrapper
// Data Storage Fields
// Repo is the repo this node was created with
// it contains all persistent artifacts of the filecoin node
Repo repo.Repo
// SectorBuilder is used by the miner to fill and seal sectors.
sectorBuilder sectorbuilder.SectorBuilder
// Fetcher is the interface for fetching data from nodes.
Fetcher *net.Fetcher
// Exchange is the interface for fetching data from other nodes.
Exchange exchange.Interface
// Blockstore is the un-networked blocks interface
Blockstore bstore.Blockstore
// Blockservice is a higher level interface for fetching data
blockservice bserv.BlockService
// CborStore is a temporary interface for interacting with IPLD objects.
cborStore *hamt.CborIpldStore
// cancelSubscriptionsCtx is a handle to cancel the block and message subscriptions.
cancelSubscriptionsCtx context.CancelFunc
// OfflineMode, when true, disables libp2p
OfflineMode bool
// Router is a router from IPFS
Router routing.IpfsRouting
}
// Config is a helper to aid in the construction of a filecoin node.
type Config struct {
BlockTime time.Duration
Libp2pOpts []libp2p.Option
OfflineMode bool
Verifier proofs.Verifier
Rewarder consensus.BlockRewarder
Repo repo.Repo
IsRelay bool
}
// ConfigOpt is a configuration option for a filecoin node.
type ConfigOpt func(*Config) error
// OfflineMode enables or disables offline mode.
func OfflineMode(offlineMode bool) ConfigOpt {
return func(c *Config) error {
c.OfflineMode = offlineMode
return nil
}
}
// IsRelay configures node to act as a libp2p relay.
func IsRelay() ConfigOpt {
return func(c *Config) error {
c.IsRelay = true
return nil
}
}
// BlockTime sets the blockTime.
func BlockTime(blockTime time.Duration) ConfigOpt {
return func(c *Config) error {
c.BlockTime = blockTime
return nil
}
}
// Libp2pOptions returns a node config option that sets up the libp2p node
func Libp2pOptions(opts ...libp2p.Option) ConfigOpt {
return func(nc *Config) error {
// Quietly having your options overridden leads to hair loss
if len(nc.Libp2pOpts) > 0 {
panic("Libp2pOptions can only be called once")
}
nc.Libp2pOpts = opts
return nil
}
}
// VerifierConfigOption returns a function that sets the verifier to use in the node consensus
func VerifierConfigOption(verifier proofs.Verifier) ConfigOpt {
return func(c *Config) error {
c.Verifier = verifier
return nil
}
}
// RewarderConfigOption returns a function that sets the rewarder to use in the node consensus
func RewarderConfigOption(rewarder consensus.BlockRewarder) ConfigOpt {
return func(c *Config) error {
c.Rewarder = rewarder
return nil
}
}
// New creates a new node.
func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) {
n := &Config{}
for _, o := range opts {
if err := o(n); err != nil {
return nil, err
}
}
return n.Build(ctx)
}
type blankValidator struct{}
func (blankValidator) Validate(_ string, _ []byte) error { return nil }
func (blankValidator) Select(_ string, _ [][]byte) (int, error) { return 0, nil }
// readGenesisCid is a helper function that queries the provided datastore for
// an entry with the genesisKey cid, returning if found.
func readGenesisCid(ds datastore.Datastore) (cid.Cid, error) {
bb, err := ds.Get(chain.GenesisKey)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to read genesisKey")
}
var c cid.Cid
err = json.Unmarshal(bb, &c)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to cast genesisCid")
}
return c, nil
}
// buildHost determines if we are publically dialable. If so use public
// Address, if not configure node to announce relay address.
func (nc *Config) buildHost(ctx context.Context, makeDHT func(host host.Host) (routing.IpfsRouting, error)) (host.Host, error) {
// Node must build a host acting as a libp2p relay. Additionally it
// runs the autoNAT service which allows other nodes to check for their
// own dialability by having this node attempt to dial them.
makeDHTRightType := func(h host.Host) (routing.PeerRouting, error) {
return makeDHT(h)
}
if nc.IsRelay {
cfg := nc.Repo.Config()
publicAddr, err := ma.NewMultiaddr(cfg.Swarm.PublicRelayAddress)
if err != nil {
return nil, err
}
publicAddrFactory := func(lc *libp2p.Config) error {
lc.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
if cfg.Swarm.PublicRelayAddress == "" {
return addrs
}
return append(addrs, publicAddr)
}
return nil
}
relayHost, err := libp2p.New(
ctx,
libp2p.EnableRelay(circuit.OptHop),
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
publicAddrFactory,
libp2p.ChainOptions(nc.Libp2pOpts...),
)
if err != nil {
return nil, err
}
// Set up autoNATService as a streamhandler on the host.
_, err = autonatsvc.NewAutoNATService(ctx, relayHost)
if err != nil {
return nil, err
}
return relayHost, nil
}
return libp2p.New(
ctx,
libp2p.EnableAutoRelay(),
libp2p.Routing(makeDHTRightType),
libp2p.ChainOptions(nc.Libp2pOpts...),
)
}
// Build instantiates a filecoin Node from the settings specified in the config.
func (nc *Config) Build(ctx context.Context) (*Node, error) {
if nc.Repo == nil {
nc.Repo = repo.NewInMemoryRepo()
}
bs := bstore.NewBlockstore(nc.Repo.Datastore())
validator := blankValidator{}
var peerHost host.Host
var router routing.IpfsRouting
bandwidthTracker := p2pmetrics.NewBandwidthCounter()
nc.Libp2pOpts = append(nc.Libp2pOpts, libp2p.BandwidthReporter(bandwidthTracker))
if !nc.OfflineMode {
makeDHT := func(h host.Host) (routing.IpfsRouting, error) {
r, err := dht.New(
ctx,
h,
dhtopts.Datastore(nc.Repo.Datastore()),
dhtopts.NamespacedValidator("v", validator),
dhtopts.Protocols(filecoinDHTProtocol),
)
if err != nil {
return nil, errors.Wrap(err, "failed to setup routing")
}
router = r
return r, err
}
var err error
peerHost, err = nc.buildHost(ctx, makeDHT)
if err != nil {
return nil, err
}
} else {
router = offroute.NewOfflineRouter(nc.Repo.Datastore(), validator)
peerHost = rhost.Wrap(noopLibP2PHost{}, router)
}
// set up pinger
pingService := ping.NewPingService(peerHost)
// set up bitswap
nwork := bsnet.NewFromIpfsHost(peerHost, router)
//nwork := bsnet.NewFromIpfsHost(innerHost, router)
bswap := bitswap.New(ctx, nwork, bs)
bservice := bserv.New(bs, bswap)
fetcher := net.NewFetcher(ctx, bservice)
cstOffline := hamt.CborIpldStore{Blocks: bserv.New(bs, offline.Exchange(bs))}
genCid, err := readGenesisCid(nc.Repo.Datastore())
if err != nil {
return nil, err
}
// set up chainstore
chainStore := chain.NewDefaultStore(nc.Repo.ChainDatastore(), genCid)
chainState := cst.NewChainStateProvider(chainStore, &cstOffline)
powerTable := &consensus.MarketView{}
// set up processor
var processor consensus.Processor
if nc.Rewarder == nil {
processor = consensus.NewDefaultProcessor()
} else {
processor = consensus.NewConfiguredProcessor(consensus.NewDefaultMessageValidator(), nc.Rewarder)
}
// set up consensus
var nodeConsensus consensus.Protocol
if nc.Verifier == nil {
nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, &proofs.RustVerifier{})
} else {
nodeConsensus = consensus.NewExpected(&cstOffline, bs, processor, powerTable, genCid, nc.Verifier)
}
// Set up libp2p network
fsub, err := libp2pps.NewFloodSub(ctx, peerHost)
if err != nil {
return nil, errors.Wrap(err, "failed to set up network")
}
backend, err := wallet.NewDSBackend(nc.Repo.WalletDatastore())
if err != nil {
return nil, errors.Wrap(err, "failed to set up wallet backend")
}
fcWallet := wallet.New(backend)
// only the syncer gets the storage which is online connected
chainSyncer := chain.NewDefaultSyncer(&cstOffline, nodeConsensus, chainStore, fetcher, chain.Syncing)
msgPool := core.NewMessagePool(nc.Repo.Config().Mpool, consensus.NewIngestionValidator(chainState, nc.Repo.Config().Mpool))
inbox := core.NewInbox(msgPool, core.InboxMaxAgeTipsets, chainStore)
msgQueue := core.NewMessageQueue()
outboxPolicy := core.NewMessageQueuePolicy(chainStore, core.OutboxMaxAgeRounds)
msgPublisher := newDefaultMessagePublisher(pubsub.NewPublisher(fsub), core.Topic, msgPool)
outbox := core.NewOutbox(fcWallet, consensus.NewOutboundMessageValidator(), msgQueue, msgPublisher, outboxPolicy, chainStore, chainState)
PorcelainAPI := porcelain.New(plumbing.New(&plumbing.APIDeps{
Bitswap: bswap,
Chain: chainState,
Config: cfg.NewConfig(nc.Repo),
DAG: dag.NewDAG(merkledag.NewDAGService(bservice)),
Deals: strgdls.New(nc.Repo.DealsDatastore()),
MsgPool: msgPool,
MsgPreviewer: msg.NewPreviewer(fcWallet, chainStore, &cstOffline, bs),
MsgQueryer: msg.NewQueryer(nc.Repo, fcWallet, chainStore, &cstOffline, bs),
MsgWaiter: msg.NewWaiter(chainStore, bs, &cstOffline),
Network: net.New(peerHost, pubsub.NewPublisher(fsub), pubsub.NewSubscriber(fsub), net.NewRouter(router), bandwidthTracker, net.NewPinger(peerHost, pingService)),
Outbox: outbox,
Wallet: fcWallet,
}))
nd := &Node{
blockservice: bservice,
Blockstore: bs,
cborStore: &cstOffline,
Consensus: nodeConsensus,
ChainReader: chainStore,
Syncer: chainSyncer,
PowerTable: powerTable,
PorcelainAPI: PorcelainAPI,
Fetcher: fetcher,
Exchange: bswap,
host: peerHost,
Inbox: inbox,
OfflineMode: nc.OfflineMode,
Outbox: outbox,
PeerHost: peerHost,
Repo: nc.Repo,
Wallet: fcWallet,
blockTime: nc.BlockTime,
Router: router,
}
// Bootstrapping network peers.
periodStr := nd.Repo.Config().Bootstrap.Period
period, err := time.ParseDuration(periodStr)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap period %s", periodStr)
}
// Bootstrapper maintains connections to some subset of addresses
ba := nd.Repo.Config().Bootstrap.Addresses
bpi, err := net.PeerAddrsToPeerInfos(ba)
if err != nil {
return nil, errors.Wrapf(err, "couldn't parse bootstrap addresses [%s]", ba)
}
minPeerThreshold := nd.Repo.Config().Bootstrap.MinPeerThreshold
nd.Bootstrapper = net.NewBootstrapper(bpi, nd.Host(), nd.Host().Network(), nd.Router, minPeerThreshold, period)
return nd, nil
}
// Start boots up the node.
func (node *Node) Start(ctx context.Context) error {
if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil {
return errors.Wrap(err, "failed to setup metrics")
}
if err := metrics.RegisterJaeger(node.host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil {
return errors.Wrap(err, "failed to setup tracing")
}
var err error
if err = node.ChainReader.Load(ctx); err != nil {
return err
}
// Only set these up if there is a miner configured.
if _, err := node.miningAddress(); err == nil {
if err := node.setupMining(ctx); err != nil {
log.Errorf("setup mining failed: %v", err)
return err
}
}
// Start up 'hello' handshake service
syncCallBack := func(pid libp2ppeer.ID, cids []cid.Cid, height uint64) {
cidSet := types.NewSortedCidSet(cids...)
err := node.Syncer.HandleNewTipset(context.Background(), cidSet)
if err != nil {
log.Infof("error handling blocks: %s", cidSet.String())
}
}
node.HelloSvc = hello.New(node.Host(), node.ChainReader.GenesisCid(), syncCallBack, node.PorcelainAPI.ChainHead, node.Repo.Config().Net, flags.Commit)
err = node.setupProtocols()
if err != nil {
return errors.Wrap(err, "failed to set up protocols:")
}
node.RetrievalMiner = retrieval.NewMiner(node)
// subscribe to block notifications
blkSub, err := node.PorcelainAPI.PubSubSubscribe(BlockTopic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to blocks topic")
}
node.BlockSub = blkSub
// subscribe to message notifications
msgSub, err := node.PorcelainAPI.PubSubSubscribe(core.Topic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to message topic")
}
node.MessageSub = msgSub
cctx, cancel := context.WithCancel(context.Background())
node.cancelSubscriptionsCtx = cancel
go node.handleSubscription(cctx, node.processBlock, "processBlock", node.BlockSub, "BlockSub")
go node.handleSubscription(cctx, node.processMessage, "processMessage", node.MessageSub, "MessageSub")
node.HeaviestTipSetHandled = func() {}
node.HeaviestTipSetCh = node.ChainReader.HeadEvents().Sub(chain.NewHeadTopic)
head, err := node.PorcelainAPI.ChainHead()
if err != nil {
return errors.Wrap(err, "failed to get chain head")
}
go node.handleNewHeaviestTipSet(cctx, head)
if !node.OfflineMode {
node.Bootstrapper.Start(context.Background())
}
if err := node.setupHeartbeatServices(ctx); err != nil {
return errors.Wrap(err, "failed to start heartbeat services")
}
return nil
}
func (node *Node) setupHeartbeatServices(ctx context.Context) error {
mag := func() address.Address {
addr, err := node.miningAddress()
// the only error miningAddress() returns is ErrNoMinerAddress.
// if there is no configured miner address, simply send a zero
// address across the wire.
if err != nil {
return address.Undef
}
return addr
}
// start the primary heartbeat service
if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 {
hbs := metrics.NewHeartbeatService(node.Host(), node.Repo.Config().Heartbeat, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go hbs.Start(ctx)
}
// check if we want to connect to an alert service. An alerting service is a heartbeat
// service that can trigger alerts based on the contents of heatbeats.
if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 {
ahbs := metrics.NewHeartbeatService(node.Host(), &config.HeartbeatConfig{
BeatTarget: alertTarget,
BeatPeriod: "10s",
ReconnectPeriod: "10s",
Nickname: node.Repo.Config().Heartbeat.Nickname,
}, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go ahbs.Start(ctx)
}
return nil
}
func (node *Node) setupMining(ctx context.Context) error {
// initialize a sector builder
sectorBuilder, err := initSectorBuilderForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize sector builder")
}
node.sectorBuilder = sectorBuilder
return nil
}
func (node *Node) setIsMining(isMining bool) {
node.mining.Lock()
defer node.mining.Unlock()
node.mining.isMining = isMining
}
func (node *Node) handleNewMiningOutput(miningOutCh <-chan mining.Output) {
defer func() {
node.miningDoneWg.Done()
}()
for {
select {
case <-node.miningCtx.Done():
return
case output, ok := <-miningOutCh:
if !ok {
return
}
if output.Err != nil {
log.Errorf("stopping mining. error: %s", output.Err.Error())
node.StopMining(context.Background())
} else {
node.miningDoneWg.Add(1)
go func() {
if node.IsMining() {
node.AddNewlyMinedBlock(node.miningCtx, output.NewBlock)
}
node.miningDoneWg.Done()
}()
}
}
}
}
func (node *Node) handleNewHeaviestTipSet(ctx context.Context, head types.TipSet) {
for {
select {
case ts, ok := <-node.HeaviestTipSetCh:
if !ok {
return
}
newHead, ok := ts.(types.TipSet)
if !ok {
log.Error("non-tipset published on heaviest tipset channel")
continue
}
if len(newHead) == 0 {
log.Error("tipset of size 0 published on heaviest tipset channel. ignoring and waiting for a new heaviest tipset.")
continue
}
if err := node.Outbox.HandleNewHead(ctx, head, newHead); err != nil {
log.Error("updating outbound message queue for new tipset", err)
}
if err := node.Inbox.HandleNewHead(ctx, head, newHead); err != nil {
log.Error("updating message pool for new tipset", err)
}
head = newHead
if node.StorageMiner != nil {
node.StorageMiner.OnNewHeaviestTipSet(newHead)
}
node.HeaviestTipSetHandled()
case <-ctx.Done():
return
}
}
}
func (node *Node) cancelSubscriptions() {
if node.BlockSub != nil || node.MessageSub != nil {
node.cancelSubscriptionsCtx()
}
if node.BlockSub != nil {
node.BlockSub.Cancel()
node.BlockSub = nil
}
if node.MessageSub != nil {
node.MessageSub.Cancel()
node.MessageSub = nil
}
}
// Stop initiates the shutdown of the node.
func (node *Node) Stop(ctx context.Context) {
node.ChainReader.HeadEvents().Unsub(node.HeaviestTipSetCh)
node.StopMining(ctx)
node.cancelSubscriptions()
node.ChainReader.Stop()
if node.SectorBuilder() != nil {
if err := node.SectorBuilder().Close(); err != nil {
fmt.Printf("error closing sector builder: %s\n", err)
}
node.sectorBuilder = nil
}
if err := node.Host().Close(); err != nil {
fmt.Printf("error closing host: %s\n", err)
}
if err := node.Repo.Close(); err != nil {
fmt.Printf("error closing repo: %s\n", err)
}
node.Bootstrapper.Stop()
fmt.Println("stopping filecoin :(")
}
type newBlockFunc func(context.Context, *types.Block)
func (node *Node) addNewlyMinedBlock(ctx context.Context, b *types.Block) {
log.Debugf("Got a newly mined block from the mining worker: %s", b)
if err := node.AddNewBlock(ctx, b); err != nil {
log.Warningf("error adding new mined block: %s. err: %s", b.Cid().String(), err.Error())
}
}
// miningAddress returns the address of the mining actor mining on behalf of
// the node.
func (node *Node) miningAddress() (address.Address, error) {
addr := node.Repo.Config().Mining.MinerAddress
if addr.Empty() {
return address.Undef, ErrNoMinerAddress
}
return addr, nil
}
// MiningTimes returns the configured time it takes to mine a block, and also
// the mining delay duration, which is currently a fixed fraction of block time.
// Note this is mocked behavior, in production this time is determined by how
// long it takes to generate PoSTs.
func (node *Node) MiningTimes() (time.Duration, time.Duration) {
mineDelay := node.GetBlockTime() / mining.MineDelayConversionFactor
return node.GetBlockTime(), mineDelay
}
// GetBlockTime returns the current block time.
// TODO this should be surfaced somewhere in the plumbing API.
func (node *Node) GetBlockTime() time.Duration {
return node.blockTime
}
// SetBlockTime sets the block time.
func (node *Node) SetBlockTime(blockTime time.Duration) {
node.blockTime = blockTime
}
// StartMining causes the node to start feeding blocks to the mining worker and initializes
// the SectorBuilder for the mining address.
func (node *Node) StartMining(ctx context.Context) error {
if node.IsMining() {
return errors.New("Node is already mining")
}
minerAddr, err := node.miningAddress()
if err != nil {
return errors.Wrap(err, "failed to get mining address")
}
// ensure we have a sector builder
if node.SectorBuilder() == nil {
if err := node.setupMining(ctx); err != nil {
return err
}
}
minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
return errors.Wrapf(err, "failed to get mining owner address for miner %s", minerAddr)
}
_, mineDelay := node.MiningTimes()
if node.MiningWorker == nil {
if node.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil {
return err
}
}
if node.MiningScheduler == nil {
node.MiningScheduler = mining.NewScheduler(node.MiningWorker, mineDelay, node.PorcelainAPI.ChainHead)
}
// paranoid check
if !node.MiningScheduler.IsStarted() {
node.miningCtx, node.cancelMining = context.WithCancel(context.Background())
outCh, doneWg := node.MiningScheduler.Start(node.miningCtx)
node.miningDoneWg = doneWg
node.AddNewlyMinedBlock = node.addNewlyMinedBlock
node.miningDoneWg.Add(1)
go node.handleNewMiningOutput(outCh)
}
// initialize a storage miner
storageMiner, err := initStorageMinerForNode(ctx, node)
if err != nil {
return errors.Wrap(err, "failed to initialize storage miner")
}
node.StorageMiner = storageMiner
// loop, turning sealing-results into commitSector messages to be included
// in the chain
go func() {
for {
select {
case result := <-node.SectorBuilder().SectorSealResults():
if result.SealingErr != nil {
log.Errorf("failed to seal sector with id %d: %s", result.SectorID, result.SealingErr.Error())
} else if result.SealingResult != nil {
// TODO: determine these algorithmically by simulating call and querying historical prices
gasPrice := types.NewGasPrice(1)
gasUnits := types.NewGasUnits(300)
val := result.SealingResult
// This call can fail due to, e.g. nonce collisions. Our miners existence depends on this.
// We should deal with this, but MessageSendWithRetry is problematic.
msgCid, err := node.PorcelainAPI.MessageSend(
node.miningCtx,
minerOwnerAddr,
minerAddr,
nil,
gasPrice,
gasUnits,
"commitSector",
val.SectorID,
val.CommD[:],
val.CommR[:],
val.CommRStar[:],
val.Proof[:],
)
if err != nil {
log.Errorf("failed to send commitSector message from %s to %s for sector with id %d: %s", minerOwnerAddr, minerAddr, val.SectorID, err)
continue
}
node.StorageMiner.OnCommitmentSent(val, msgCid, nil)
}
case <-node.miningCtx.Done():
return
}
}
}()
// schedules sealing of staged piece-data
if node.Repo.Config().Mining.AutoSealIntervalSeconds > 0 {
go func() {
for {
select {
case <-node.miningCtx.Done():
return
case <-time.After(time.Duration(node.Repo.Config().Mining.AutoSealIntervalSeconds) * time.Second):
log.Info("auto-seal has been triggered")
if err := node.SectorBuilder().SealAllStagedSectors(node.miningCtx); err != nil {
log.Errorf("scheduler received error from node.SectorBuilder.SealAllStagedSectors (%s) - exiting", err.Error())
return
}
}
}
}()
} else {
log.Debug("auto-seal is disabled")
}
node.setIsMining(true)
return nil
}
func initSectorBuilderForNode(ctx context.Context, node *Node) (sectorbuilder.SectorBuilder, error) {
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
sectorSize, err := node.PorcelainAPI.MinerGetSectorSize(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get sector size for miner w/address %s", minerAddr.String())
}
lastUsedSectorID, err := node.PorcelainAPI.MinerGetLastCommittedSectorID(ctx, minerAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to get last used sector id for miner w/address %s", minerAddr.String())
}
// TODO: Currently, weconfigure the RustSectorBuilder to store its
// metadata in the staging directory, it should be in its own directory.
//
// Tracked here: https://github.com/filecoin-project/rust-fil-proofs/issues/402
repoPath, err := node.Repo.Path()
if err != nil {
return nil, err
}
sectorDir, err := paths.GetSectorPath(node.Repo.Config().SectorBase.RootDir, repoPath)
if err != nil {
return nil, err
}
stagingDir, err := paths.StagingDir(sectorDir)
if err != nil {
return nil, err
}
sealedDir, err := paths.SealedDir(sectorDir)
if err != nil {
return nil, err
}
cfg := sectorbuilder.RustSectorBuilderConfig{
BlockService: node.blockservice,
LastUsedSectorID: lastUsedSectorID,
MetadataDir: stagingDir,
MinerAddr: minerAddr,
SealedSectorDir: sealedDir,
StagedSectorDir: stagingDir,
SectorClass: types.NewSectorClass(sectorSize),
}
sb, err := sectorbuilder.NewRustSectorBuilder(cfg)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to initialize sector builder for miner %s", minerAddr.String()))
}
return sb, nil
}
func initStorageMinerForNode(ctx context.Context, node *Node) (*storage.Miner, error) {
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get node's mining address")
}
miningOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "no mining owner available, skipping storage miner setup")
}
miner, err := storage.NewMiner(minerAddr, miningOwnerAddr, node, node.Repo.DealsDatastore(), node.PorcelainAPI)
if err != nil {
return nil, errors.Wrap(err, "failed to instantiate storage miner")
}
return miner, nil
}
// StopMining stops mining on new blocks.
func (node *Node) StopMining(ctx context.Context) {
node.setIsMining(false)
if node.cancelMining != nil {
node.cancelMining()
}
if node.miningDoneWg != nil {
node.miningDoneWg.Wait()
}
// TODO: stop node.StorageMiner
}
// NewAddress creates a new account address on the default wallet backend.
func (node *Node) NewAddress() (address.Address, error) {
return wallet.NewAddress(node.Wallet)
}
// miningOwnerAddress returns the owner of miningAddr.
// TODO: find a better home for this method
func (node *Node) miningOwnerAddress(ctx context.Context, miningAddr address.Address) (address.Address, error) {
ownerAddr, err := node.PorcelainAPI.MinerGetOwnerAddress(ctx, miningAddr)
if err != nil {
return address.Undef, errors.Wrap(err, "failed to get miner owner address")
}
return ownerAddr, nil
}
func (node *Node) handleSubscription(ctx context.Context, f pubSubProcessorFunc, fname string, s pubsub.Subscription, sname string) {
for {
pubSubMsg, err := s.Next(ctx)
if err != nil {
log.Errorf("%s.Next(): %s", sname, err)
return
}
if err := f(ctx, pubSubMsg); err != nil {
if vmerr.ShouldRevert(err) {
log.Infof("%s(): %s", fname, err)
} else if err != context.Canceled {
log.Errorf("%s(): %s", fname, err)
}
}
}
}
// setupProtocols creates protocol clients and miners, then sets the node's APIs
// for each
func (node *Node) setupProtocols() error {
_, mineDelay := node.MiningTimes()
blockMiningAPI := block.New(
node.AddNewBlock,
node.ChainReader,
mineDelay,
node.StartMining,
node.StopMining,
node.CreateMiningWorker)
node.BlockMiningAPI = &blockMiningAPI
// set up retrieval client and api
retapi := retrieval.NewAPI(retrieval.NewClient(node.host, node.blockTime, node.PorcelainAPI))
node.RetrievalAPI = &retapi
// set up storage client and api
smc := storage.NewClient(node.blockTime, node.host, node.PorcelainAPI)
smcAPI := storage.NewAPI(smc)
node.StorageAPI = &smcAPI
return nil
}
// CreateMiningWorker creates a mining.Worker for the node using the configured
// getStateTree, getWeight, and getAncestors functions for the node
func (node *Node) CreateMiningWorker(ctx context.Context) (mining.Worker, error) {
processor := consensus.NewDefaultProcessor()
minerAddr, err := node.miningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get mining address")
}
minerPubKey, err := node.PorcelainAPI.MinerGetKey(ctx, minerAddr)
if err != nil {
return nil, errors.Wrap(err, "could not get key from miner actor")
}
minerOwnerAddr, err := node.miningOwnerAddress(ctx, minerAddr)
if err != nil {
log.Errorf("could not get owner address of miner actor")
return nil, err
}
return mining.NewDefaultWorker(
node.Inbox.Pool(), node.getStateTree, node.getWeight, node.getAncestors, processor, node.PowerTable,
node.Blockstore, node.CborStore(), minerAddr, minerOwnerAddr, minerPubKey,
node.Wallet, node.blockTime), nil
}
// getStateFromKey returns the state tree based on tipset fetched with provided key tsKey
func (node *Node) getStateFromKey(ctx context.Context, tsKey types.SortedCidSet) (state.Tree, error) {
stateCid, err := node.ChainReader.GetTipSetStateRoot(tsKey)
if err != nil {
return nil, err
}
return state.LoadStateTree(ctx, node.CborStore(), stateCid, builtin.Actors)
}
// getStateTree is the default GetStateTree function for the mining worker.
func (node *Node) getStateTree(ctx context.Context, ts types.TipSet) (state.Tree, error) {
return node.getStateFromKey(ctx, ts.ToSortedCidSet())
}
// getWeight is the default GetWeight function for the mining worker.
func (node *Node) getWeight(ctx context.Context, ts types.TipSet) (uint64, error) {
parent, err := ts.Parents()
if err != nil {
return uint64(0), err
}
// TODO handle genesis cid more gracefully
if parent.Len() == 0 {
return node.Consensus.Weight(ctx, ts, nil)
}
pSt, err := node.getStateFromKey(ctx, parent)
if err != nil {
return uint64(0), err
}
return node.Consensus.Weight(ctx, ts, pSt)
}
// getAncestors is the default GetAncestors function for the mining worker.
func (node *Node) getAncestors(ctx context.Context, ts types.TipSet, newBlockHeight *types.BlockHeight) ([]types.TipSet, error) {
ancestorHeight := types.NewBlockHeight(consensus.AncestorRoundsNeeded)
return chain.GetRecentAncestors(ctx, ts, node.ChainReader, newBlockHeight, ancestorHeight, sampling.LookbackParameter)
}
// -- Accessors
// Host returns the nodes host.
func (node *Node) Host() host.Host {
return node.host
}
// SectorBuilder returns the nodes sectorBuilder.
func (node *Node) SectorBuilder() sectorbuilder.SectorBuilder {
return node.sectorBuilder
}
// BlockService returns the nodes blockservice.
func (node *Node) BlockService() bserv.BlockService {
return node.blockservice
}
// CborStore returns the nodes cborStore.
func (node *Node) CborStore() *hamt.CborIpldStore {
return node.cborStore
}
// IsMining returns a boolean indicating whether the node is mining blocks.
func (node *Node) IsMining() bool {
node.mining.Lock()
defer node.mining.Unlock()
return node.mining.isMining
}
| 1 | 19,360 | We still need to figure out a way to get rid of this write method. Is this still necessary? | filecoin-project-venus | go |
@@ -265,6 +265,11 @@ class KNNClassifierTest(unittest.TestCase):
self.assertEquals(classifier.getNumPartitionIds(), 3)
+ # Check that the full set of partition ids is what we expect
+ self.assertItemsEqual(classifier.getPartitionIdPerPattern(),
+ [433, 213, np.inf, 433, 413])
+ self.assertItemsEqual(classifier.getPartitionIdList(),[433, 413, 213])
+
# Remove two rows - all indices shift down
self.assertEquals(classifier._removeRows([0,2]), 2)
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433), | 1 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest
from nupic.algorithms.KNNClassifier import KNNClassifier
class KNNClassifierTest(unittest.TestCase):
def testDistanceMetrics(self):
classifier = KNNClassifier(distanceMethod="norm", distanceNorm=2.0)
dimensionality = 40
protoA = np.array([0, 1, 3, 7, 11], dtype=np.int32)
protoB = np.array([20, 28, 30], dtype=np.int32)
classifier.learn(protoA, 0, isSparse=dimensionality)
classifier.learn(protoB, 0, isSparse=dimensionality)
# input is an arbitrary point, close to protoA, orthogonal to protoB
input = np.zeros(dimensionality)
input[:4] = 1.0
# input0 is used to test that the distance from a point to itself is 0
input0 = np.zeros(dimensionality)
input0[protoA] = 1.0
# Test l2 norm metric
_, _, dist, _ = classifier.infer(input)
l2Distances = [0.65465367, 1.0]
for actual, predicted in zip(l2Distances, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="l2 distance norm is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0], msg="l2 norm did not calculate 0 distance as expected.")
# Test l1 norm metric
classifier.distanceNorm = 1.0
_, _, dist, _ = classifier.infer(input)
l1Distances = [0.42857143, 1.0]
for actual, predicted in zip(l1Distances, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="l1 distance norm is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0], msg="l1 norm did not calculate 0 distance as expected.")
# Test raw overlap metric
classifier.distanceMethod = "rawOverlap"
_, _, dist, _ = classifier.infer(input)
rawOverlaps = [1, 4]
for actual, predicted in zip(rawOverlaps, dist):
self.assertEqual(
actual, predicted, msg="Raw overlap is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="Raw overlap did not calculate 0 distance as expected.")
# Test pctOverlapOfInput metric
classifier.distanceMethod = "pctOverlapOfInput"
_, _, dist, _ = classifier.infer(input)
pctOverlaps = [0.25, 1.0]
for actual, predicted in zip(pctOverlaps, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="pctOverlapOfInput is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="pctOverlapOfInput did not calculate 0 distance as expected.")
# Test pctOverlapOfProto metric
classifier.distanceMethod = "pctOverlapOfProto"
_, _, dist, _ = classifier.infer(input)
pctOverlaps = [0.40, 1.0]
for actual, predicted in zip(pctOverlaps, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="pctOverlapOfProto is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="pctOverlapOfProto did not calculate 0 distance as expected.")
# Test pctOverlapOfLarger metric
classifier.distanceMethod = "pctOverlapOfLarger"
_, _, dist, _ = classifier.infer(input)
pctOverlaps = [0.40, 1.0]
for actual, predicted in zip(pctOverlaps, dist):
self.assertAlmostEqual(
actual, predicted, places=5,
msg="pctOverlapOfLarger is not calculated as expected.")
_, _, dist0, _ = classifier.infer(input0)
self.assertEqual(
0.0, dist0[0],
msg="pctOverlapOfLarger did not calculate 0 distance as expected.")
def testOverlapDistanceMethodStandard(self):
"""Tests standard learning case for raw overlap"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
numPatterns = classifier.learn(b, 1, isSparse=dimensionality)
self.assertEquals(numPatterns, 2)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
denseB = np.zeros(dimensionality)
denseB[b] = 1.0
cat, _, _, _ = classifier.infer(denseB)
self.assertEquals(cat, 1)
def testPartitionIdExcluded(self):
"""
Tests that paritionId properly excludes training data points during
inference
"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
denseB = np.zeros(dimensionality)
denseB[b] = 1.0
classifier.learn(a, 0, isSparse=dimensionality, partitionId=0)
classifier.learn(b, 1, isSparse=dimensionality, partitionId=1)
cat, _, _, _ = classifier.infer(denseA, partitionId=1)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(denseA, partitionId=0)
self.assertEquals(cat, 1)
cat, _, _, _ = classifier.infer(denseB, partitionId=0)
self.assertEquals(cat, 1)
cat, _, _, _ = classifier.infer(denseB, partitionId=1)
self.assertEquals(cat, 0)
# Ensure it works even if you invoke learning again. To make it a bit more
# complex this time we insert A again but now with Id=2
classifier.learn(a, 0, isSparse=dimensionality, partitionId=2)
# Even though first A should be ignored, the second instance of A should
# not be ignored.
cat, _, _, _ = classifier.infer(denseA, partitionId=0)
self.assertEquals(cat, 0)
def testGetPartitionId(self):
"""
Test a sequence of calls to KNN to ensure we can retrieve partition Id:
- We first learn on some patterns (including one pattern with no
partitionId in the middle) and test that we can retrieve Ids.
- We then invoke inference and then check partitionId again.
- We check incorrect indices to ensure we get an exception.
- We check the case where the partitionId to be ignored is not in
the list.
- We learn on one more pattern and check partitionIds again
- We remove rows and ensure partitionIds still work
"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32)
e = np.array([1, 3, 7, 12, 14, 19, 22, 24, 33], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
classifier.learn(a, 0, isSparse=dimensionality, partitionId=433)
classifier.learn(b, 1, isSparse=dimensionality, partitionId=213)
classifier.learn(c, 1, isSparse=dimensionality, partitionId=None)
classifier.learn(d, 1, isSparse=dimensionality, partitionId=433)
self.assertEquals(classifier.getPartitionId(0), 433)
self.assertEquals(classifier.getPartitionId(1), 213)
self.assertEquals(classifier.getPartitionId(2), None)
self.assertEquals(classifier.getPartitionId(3), 433)
cat, _, _, _ = classifier.infer(denseA, partitionId=213)
self.assertEquals(cat, 0)
# Test with patternId not in classifier
cat, _, _, _ = classifier.infer(denseA, partitionId=666)
self.assertEquals(cat, 0)
# Partition Ids should be maintained after inference
self.assertEquals(classifier.getPartitionId(0), 433)
self.assertEquals(classifier.getPartitionId(1), 213)
self.assertEquals(classifier.getPartitionId(2), None)
self.assertEquals(classifier.getPartitionId(3), 433)
# Should return exceptions if we go out of bounds
with self.assertRaises(RuntimeError):
classifier.getPartitionId(4)
with self.assertRaises(RuntimeError):
classifier.getPartitionId(-1)
# Learn again
classifier.learn(e, 4, isSparse=dimensionality, partitionId=413)
self.assertEquals(classifier.getPartitionId(4), 413)
# Test getPatternIndicesWithPartitionId
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433),
[0, 3])
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(666),
[])
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(413),
[4])
self.assertEquals(classifier.getNumPartitionIds(), 3)
# Remove two rows - all indices shift down
self.assertEquals(classifier._removeRows([0,2]), 2)
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(433),
[1])
self.assertItemsEqual(classifier.getPatternIndicesWithPartitionId(413),
[2])
# Remove another row and check number of partitions have decreased
classifier._removeRows([0])
self.assertEquals(classifier.getNumPartitionIds(), 2)
def testGetPartitionIdWithNoIdsAtFirst(self):
"""
Tests that we can correctly retrieve partition Id even if the first few
vectors do not have Ids
"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
c = np.array([1, 2, 3, 14, 16, 19, 22, 24, 33], dtype=np.int32)
d = np.array([2, 4, 8, 12, 14, 19, 22, 24, 33], dtype=np.int32)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
denseD = np.zeros(dimensionality)
denseD[d] = 1.0
classifier.learn(a, 0, isSparse=dimensionality, partitionId=None)
classifier.learn(b, 1, isSparse=dimensionality, partitionId=None)
classifier.learn(c, 2, isSparse=dimensionality, partitionId=211)
classifier.learn(d, 1, isSparse=dimensionality, partitionId=405)
cat, _, _, _ = classifier.infer(denseA, partitionId=405)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(denseD, partitionId=405)
self.assertEquals(cat, 2)
cat, _, _, _ = classifier.infer(denseD)
self.assertEquals(cat, 1)
def testOverlapDistanceMethodBadSparsity(self):
"""Sparsity (input dimensionality) less than input array"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
# Learn with incorrect dimensionality, less than some bits (23, 29)
with self.assertRaises(RuntimeError):
classifier.learn(a, 0, isSparse=20)
def testOverlapDistanceMethodInconsistentDimensionality(self):
"""Inconsistent sparsity (input dimensionality)"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
# Learn with incorrect dimensionality, greater than largest ON bit, but
# inconsistent when inferring
numPatterns = classifier.learn(a, 0, isSparse=31)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
def testOverlapDistanceMethodStandardUnsorted(self):
"""If sparse representation indices are unsorted expect error."""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([29, 3, 7, 11, 13, 17, 19, 23, 1], dtype=np.int32)
b = np.array([2, 4, 20, 12, 14, 18, 8, 28, 30], dtype=np.int32)
with self.assertRaises(RuntimeError):
classifier.learn(a, 0, isSparse=dimensionality)
with self.assertRaises(RuntimeError):
classifier.learn(b, 1, isSparse=dimensionality)
def testOverlapDistanceMethodEmptyArray(self):
"""Tests case where pattern has no ON bits"""
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([], dtype=np.int32)
numPatterns = classifier.learn(a, 0, isSparse=dimensionality)
self.assertEquals(numPatterns, 1)
denseA = np.zeros(dimensionality)
denseA[a] = 1.0
cat, _, _, _ = classifier.infer(denseA)
self.assertEquals(cat, 0)
@unittest.skip("Finish when infer has options for sparse and dense "
"https://github.com/numenta/nupic/issues/2198")
def testOverlapDistanceMethod_ClassifySparse(self):
params = {"distanceMethod": "rawOverlap"}
classifier = KNNClassifier(**params)
dimensionality = 40
a = np.array([1, 3, 7, 11, 13, 17, 19, 23, 29], dtype=np.int32)
b = np.array([2, 4, 8, 12, 14, 18, 20, 28, 30], dtype=np.int32)
classifier.learn(a, 0, isSparse=dimensionality)
classifier.learn(b, 1, isSparse=dimensionality)
# TODO Test case where infer is passed a sparse representation after
# infer() has been extended to handle sparse and dense
cat, _, _, _ = classifier.infer(a)
self.assertEquals(cat, 0)
cat, _, _, _ = classifier.infer(b)
self.assertEquals(cat, 1)
if __name__ == "__main__":
unittest.main()
| 1 | 20,396 | `assertItemsEqual` compares the sequences after sorting them, is that intended? | numenta-nupic | py |
@@ -170,8 +170,8 @@ module Selenium
def macosx_path
path = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
- path = "~/Applications/Firefox.app/Contents/MacOS/firefox-bin" unless File.exist?(path)
- path = Platform.find_binary("firefox-bin") unless File.exist?(path)
+ path = "~/Applications/Firefox.app/Contents/MacOS/firefox-bin" unless File.exist?(File.expand_path(path))
+ path = Platform.find_binary("firefox-bin") unless File.exist?(File.expand_path?(path))
path
end | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Firefox
# @api private
class Binary
NO_FOCUS_LIBRARY_NAME = "x_ignore_nofocus.so"
NO_FOCUS_LIBRARIES = [
["#{WebDriver.root}/selenium/webdriver/firefox/native/linux/amd64/#{NO_FOCUS_LIBRARY_NAME}", "amd64/#{NO_FOCUS_LIBRARY_NAME}"],
["#{WebDriver.root}/selenium/webdriver/firefox/native/linux/x86/#{NO_FOCUS_LIBRARY_NAME}", "x86/#{NO_FOCUS_LIBRARY_NAME}"],
]
WAIT_TIMEOUT = 90
QUIT_TIMEOUT = 5
def start_with(profile, profile_path, *args)
if Platform.cygwin?
profile_path = Platform.cygwin_path(profile_path, :windows => true)
elsif Platform.windows?
profile_path = profile_path.gsub("/", "\\")
end
ENV['XRE_CONSOLE_LOG'] = profile.log_file if profile.log_file
ENV['XRE_PROFILE_PATH'] = profile_path
ENV['MOZ_NO_REMOTE'] = '1' # able to launch multiple instances
ENV['MOZ_CRASHREPORTER_DISABLE'] = '1' # disable breakpad
ENV['NO_EM_RESTART'] = '1' # prevent the binary from detaching from the console
if Platform.linux? && (profile.native_events? || profile.load_no_focus_lib?)
modify_link_library_path profile_path
end
execute(*args)
cope_with_mac_strangeness(args) if Platform.mac?
end
def quit
return unless @process
@process.poll_for_exit QUIT_TIMEOUT
rescue ChildProcess::TimeoutError
# ok, force quit
@process.stop QUIT_TIMEOUT
end
def wait
return unless @process
begin
@process.poll_for_exit(WAIT_TIMEOUT)
rescue ChildProcess::TimeoutError => e
@process.stop
raise e
end
end
private
def execute(*extra_args)
args = [self.class.path, "-no-remote"] + extra_args
@process = ChildProcess.build(*args)
@process.io.inherit! if $DEBUG
@process.start
end
def cope_with_mac_strangeness(args)
sleep 0.3
if @process.crashed?
# ok, trying a restart
sleep 7
execute(*args)
end
# ensure we're ok
sleep 0.3
if @process.crashed?
raise Error::WebDriverError, "unable to start Firefox cleanly, args: #{args.inspect}"
end
end
def modify_link_library_path(profile_path)
paths = []
NO_FOCUS_LIBRARIES.each do |from, to|
dest = File.join(profile_path, to)
FileUtils.mkdir_p File.dirname(dest)
FileUtils.cp from, dest
paths << File.expand_path(File.dirname(dest))
end
paths += ENV['LD_LIBRARY_PATH'].to_s.split(File::PATH_SEPARATOR)
ENV['LD_LIBRARY_PATH'] = paths.uniq.join(File::PATH_SEPARATOR)
ENV['LD_PRELOAD'] = NO_FOCUS_LIBRARY_NAME
end
class << self
#
# @api private
#
# @see Firefox.path=
#
def path=(path)
Platform.assert_executable(path)
@path = path
end
def path
@path ||= case Platform.os
when :macosx
macosx_path
when :windows
windows_path
when :linux, :unix
Platform.find_binary("firefox3", "firefox2", "firefox") || "/usr/bin/firefox"
else
raise Error::WebDriverError, "unknown platform: #{Platform.os}"
end
@path = Platform.cygwin_path(@path) if Platform.cygwin?
unless File.file?(@path.to_s)
raise Error::WebDriverError, "Could not find Firefox binary (os=#{Platform.os}). Make sure Firefox is installed or set the path manually with #{self}.path="
end
@path
end
def version
@version = case Platform.os
when :macosx
`#{path} -v`.strip[/[^\s]*$/][/^\d+/].to_i
when :windows
`\"#{path}\" -v | more`.strip[/[^\s]*$/][/^\d+/].to_i
when :linux
`#{path} -v`.strip[/[^\s]*$/][/^\d+/].to_i
else
0
end
end
private
def windows_path
windows_registry_path || Platform.find_in_program_files("\\Mozilla Firefox\\firefox.exe") || Platform.find_binary("firefox")
end
def macosx_path
path = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
path = "~/Applications/Firefox.app/Contents/MacOS/firefox-bin" unless File.exist?(path)
path = Platform.find_binary("firefox-bin") unless File.exist?(path)
path
end
def windows_registry_path
require 'win32/registry'
lm = Win32::Registry::HKEY_LOCAL_MACHINE
lm.open("SOFTWARE\\Mozilla\\Mozilla Firefox") do |reg|
main = lm.open("SOFTWARE\\Mozilla\\Mozilla Firefox\\#{reg.keys[0]}\\Main")
if entry = main.find { |key, type, data| key =~ /pathtoexe/i }
return entry.last
end
end
rescue LoadError
# older JRuby or IronRuby does not have win32/registry
rescue Win32::Registry::Error
end
end # class << self
end # Binary
end # Firefox
end # WebDriver
end # Selenium
| 1 | 12,881 | I don't think this one needs to be changed since the only previous path is already an absolute one. | SeleniumHQ-selenium | java |
@@ -13,8 +13,14 @@ namespace AutoRest.Swagger.Model.Utilities
{
public static class ValidationUtilities
{
- private static readonly string XmsPageable = "x-ms-pageable";
+ private const string XmsPageable = "x-ms-pageable";
private static readonly Regex TrackedResRegEx = new Regex(@".+/Resource$", RegexOptions.IgnoreCase);
+ private const string AzureHostSignature = "management.azure.com";
+
+ // if x-ms-parameterized-host extension is used or if the host is anything of the form management.azure, assume this to be an ARMServiceDefinition
+ // TODO: refine this definition for azure data plain also check x-ms-parameterized-host in detail to ensure this is ARM
+ public static bool IsARMServiceDefinition(ServiceDefinition serviceDefinition) =>
+ serviceDefinition.Extensions.ContainsKey("x-ms-parameterized-host") || serviceDefinition.Host?.EqualsIgnoreCase(AzureHostSignature) == true;
public static bool IsTrackedResource(Schema schema, Dictionary<string, Schema> definitions)
{ | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using AutoRest.Core.Utilities;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;
namespace AutoRest.Swagger.Model.Utilities
{
public static class ValidationUtilities
{
private static readonly string XmsPageable = "x-ms-pageable";
private static readonly Regex TrackedResRegEx = new Regex(@".+/Resource$", RegexOptions.IgnoreCase);
public static bool IsTrackedResource(Schema schema, Dictionary<string, Schema> definitions)
{
if (schema.AllOf != null)
{
foreach (Schema item in schema.AllOf)
{
if (TrackedResRegEx.IsMatch(item.Reference))
{
return true;
}
else
{
return IsTrackedResource(Schema.FindReferencedSchema(item.Reference, definitions), definitions);
}
}
}
return false;
}
// determine if the operation is xms pageable or returns an object of array type
public static bool IsXmsPageableOrArrayResponseOperation(Operation op, ServiceDefinition entity)
{
// if xmspageable type, return true
if (op.Extensions.GetValue<object>(XmsPageable) != null) return true;
// if a success response is not defined, we have nothing to check, return false
if (op.Responses?.ContainsKey("200") !=true) return false;
// if we have a non-null response schema, and the schema is of type array, return true
if (op.Responses["200"]?.Schema?.Reference?.Equals(string.Empty) == false)
{
var modelLink = op.Responses["200"].Schema.Reference;
// if the object has more than 2 properties, we can assume its a composite object
// that does not represent a collection of some type
if ((entity.Definitions[modelLink.StripDefinitionPath()].Properties?.Values?.Count ?? 2) >= 2)
{
return false;
}
// if the object is an allof on some other object, let's consider it to be a composite object
if (entity.Definitions[modelLink.StripDefinitionPath()].AllOf != null)
{
return false;
}
if (entity.Definitions[modelLink.StripDefinitionPath()].Properties?.Values?.Any(type => type.Type == DataType.Array)??false)
{
return true;
}
}
return false;
}
public static IEnumerable<Operation> GetOperationsByRequestMethod(string id, ServiceDefinition serviceDefinition)
{
return serviceDefinition.Paths.Values.Select(pathObj => pathObj.Where(pair=> pair.Key.ToLower().Equals(id.ToLower()))).SelectMany(pathPair => pathPair.Select(opPair => opPair.Value));
}
public static IEnumerable<string> GetResponseModelDefinitions(ServiceDefinition serviceDefinition)
{
// for every path, check its response object and get its model definition
var respDefinitions = serviceDefinition.Paths.SelectMany(
pathPair => pathPair.Value.Select(
pathObj => pathObj.Value.Responses?.ContainsKey("200") == true ? pathObj.Value.Responses["200"]?.Schema?.Reference?.StripDefinitionPath() : string.Empty));
respDefinitions = respDefinitions.Concat(
serviceDefinition.CustomPaths.SelectMany(
pathPair => pathPair.Value.Select(
pathObj => pathObj.Value.Responses?.ContainsKey("200") == true ? pathObj.Value.Responses["200"]?.Schema?.Reference?.StripDefinitionPath() : string.Empty)));
respDefinitions = respDefinitions.Where(def => !string.IsNullOrWhiteSpace(def)).Distinct();
return respDefinitions;
}
/// <summary>
/// Returns whether a string follows camel case style.
/// </summary>
/// <param name="name">String to check for style</param>
/// <returns>true if "name" follows camel case style, false otherwise.</returns>
public static bool isNameCamelCase(string name)
{
Regex propNameRegEx = new Regex(@"^[a-z0-9]+([A-Z][a-z0-9]+)+|^[a-z0-9]+$|^[a-z0-9]+[A-Z]$");
return (propNameRegEx.IsMatch(name));
}
/// <summary>
/// Returns a suggestion of camel case styled string based on the string passed as parameter.
/// </summary>
/// <param name="name">String to convert to camel case style</param>
/// <returns>A string that conforms with camel case style based on the string passed as parameter.</returns>
public static string ToCamelCase(string name)
{
StringBuilder sb = new StringBuilder(name);
if (sb.Length > 0)
{
sb[0] = sb[0].ToString().ToLower()[0];
}
bool firstUpper = true;
for (int i = 1; i < name.Length; i++)
{
if (char.IsUpper(sb[i]) && firstUpper)
{
firstUpper = false;
}
else
{
firstUpper = true;
if (char.IsUpper(sb[i]))
{
sb[i] = sb[i].ToString().ToLower()[0];
}
}
}
return sb.ToString();
}
public static IEnumerable<KeyValuePair<string, Schema>> GetArmResources(ServiceDefinition serviceDefinition)
{
return serviceDefinition.Definitions.Where(defPair=> defPair.Value.Extensions?.ContainsKey("x-ms-azure-resource")==true && (bool?)defPair.Value.Extensions["x-ms-azure-resource"] == true);
}
}
}
| 1 | 24,031 | what's the goal of this? | Azure-autorest | java |
@@ -49,6 +49,8 @@ type Node struct {
// CborStore is a temporary interface for interacting with IPLD objects.
CborStore *hamt.CborIpldStore
+
+ pool *core.MessagePool
}
// Config is a helper to aid in the construction of a filecoin node. | 1 | package node
import (
"context"
"fmt"
"gx/ipfs/QmNh1kGFFdsPu79KNSaL4NUKUPb4Eiz4KHdMtFY6664RDp/go-libp2p"
"gx/ipfs/QmNmJZL7FQySMtE2BQuLMuZg2EB2CLEunJJUSVSc9YnnbV/go-libp2p-host"
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
"gx/ipfs/QmSFihvoND3eDaAYRCeLgLPt62yCPgMZs1NSZmKFEtJQQw/go-libp2p-floodsub"
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
nonerouting "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/none"
"gx/ipfs/QmdBXcN47jVwKLwSyN9e9xYVZ7WcAWgQ5N4cmNw7nzWq2q/go-hamt-ipld"
bserv "github.com/ipfs/go-ipfs/blockservice"
exchange "github.com/ipfs/go-ipfs/exchange"
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/wallet"
)
var log = logging.Logger("node")
// Node represents a full Filecoin node.
type Node struct {
Host host.Host
ChainMgr *core.ChainManager
MsgPool *core.MessagePool
Wallet *wallet.Wallet
// Network Fields
PubSub *floodsub.PubSub
BlockSub *floodsub.Subscription
MessageSub *floodsub.Subscription
// Data Storage Fields
// Datastore is the underlying storage backend.
Datastore ds.Batching
// Exchange is the interface for fetching data from other nodes.
Exchange exchange.Interface
// CborStore is a temporary interface for interacting with IPLD objects.
CborStore *hamt.CborIpldStore
}
// Config is a helper to aid in the construction of a filecoin node.
type Config struct {
Libp2pOpts []libp2p.Option
Datastore ds.Batching
}
// ConfigOpt is a configuration option for a filecoin node.
type ConfigOpt func(*Config) error
// Libp2pOptions returns a node config option that sets up the libp2p node
func Libp2pOptions(opts ...libp2p.Option) ConfigOpt {
return func(nc *Config) error {
nc.Libp2pOpts = opts
return nil
}
}
// New creates a new node.
func New(ctx context.Context, opts ...ConfigOpt) (*Node, error) {
n := &Config{}
for _, o := range opts {
if err := o(n); err != nil {
return nil, err
}
}
return n.Build(ctx)
}
// Build instantiates a filecoin Node from the settings specified in the
// config.
func (nc *Config) Build(ctx context.Context) (*Node, error) {
host, err := libp2p.New(ctx, nc.Libp2pOpts...)
if err != nil {
return nil, err
}
if nc.Datastore == nil {
nc.Datastore = ds.NewMapDatastore()
}
bs := bstore.NewBlockstore(nc.Datastore)
// no content routing yet...
routing, _ := nonerouting.ConstructNilRouting(nil, nil, nil)
// set up bitswap
nwork := bsnet.NewFromIpfsHost(host, routing)
bswap := bitswap.New(ctx, host.ID(), nwork, bs, true)
bserv := bserv.New(bs, bswap)
cst := &hamt.CborIpldStore{bserv}
chainMgr := core.NewChainManager(cst)
// TODO: load state from disk
if err := chainMgr.Genesis(ctx, core.InitGenesis); err != nil {
return nil, err
}
// Set up libp2p pubsub
fsub, err := floodsub.NewFloodSub(ctx, host)
if err != nil {
return nil, errors.Wrap(err, "failed to set up floodsub")
}
return &Node{
CborStore: cst,
Host: host,
ChainMgr: chainMgr,
PubSub: fsub,
Datastore: nc.Datastore,
Exchange: bswap,
Wallet: wallet.New(),
MsgPool: core.NewMessagePool(),
}, nil
}
// Start boots up the node.
func (node *Node) Start() error {
// subscribe to block notifications
blkSub, err := node.PubSub.Subscribe(BlocksTopic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to blocks topic")
}
node.BlockSub = blkSub
// subscribe to message notifications
msgSub, err := node.PubSub.Subscribe(MessageTopic)
if err != nil {
return errors.Wrap(err, "failed to subscribe to message topic")
}
node.MessageSub = msgSub
go node.handleBlockSubscription()
return nil
}
// Stop initiates the shutdown of the node.
func (node *Node) Stop() {
if node.BlockSub != nil {
node.BlockSub.Cancel()
node.BlockSub = nil
}
if err := node.Host.Close(); err != nil {
fmt.Printf("error closing host: %s\n", err)
}
fmt.Println("stopping filecoin :(")
}
| 1 | 9,966 | why is this here? We already have a message pool in the node. | filecoin-project-venus | go |
@@ -1107,7 +1107,7 @@ func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning() {
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
- s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return(enumsgenpb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED).AnyTimes()
+ s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return(enumsgenpb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace}, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"errors"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/pborman/uuid"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/uber-go/tally"
commonpb "go.temporal.io/temporal-proto/common/v1"
enumspb "go.temporal.io/temporal-proto/enums/v1"
"go.temporal.io/temporal-proto/serviceerror"
enumsgenpb "github.com/temporalio/temporal/.gen/proto/enums/v1"
"github.com/temporalio/temporal/.gen/proto/historyservice/v1"
"github.com/temporalio/temporal/.gen/proto/persistenceblobs/v1"
"github.com/temporalio/temporal/common"
"github.com/temporalio/temporal/common/cache"
"github.com/temporalio/temporal/common/clock"
"github.com/temporalio/temporal/common/cluster"
"github.com/temporalio/temporal/common/definition"
"github.com/temporalio/temporal/common/log"
"github.com/temporalio/temporal/common/metrics"
"github.com/temporalio/temporal/common/mocks"
"github.com/temporalio/temporal/common/payloads"
"github.com/temporalio/temporal/common/persistence"
)
type (
activityReplicatorSuite struct {
suite.Suite
*require.Assertions
controller *gomock.Controller
mockShard *shardContextTest
mockTxProcessor *MocktransferQueueProcessor
mockReplicationProcessor *MockReplicatorQueueProcessor
mockTimerProcessor *MocktimerQueueProcessor
mockNamespaceCache *cache.MockNamespaceCache
mockClusterMetadata *cluster.MockMetadata
mockMutableState *MockmutableState
mockExecutionMgr *mocks.ExecutionManager
logger log.Logger
historyCache *historyCache
nDCActivityReplicator nDCActivityReplicator
}
)
func TestActivityReplicatorSuite(t *testing.T) {
s := new(activityReplicatorSuite)
suite.Run(t, s)
}
func (s *activityReplicatorSuite) SetupSuite() {
}
func (s *activityReplicatorSuite) TearDownSuite() {
}
func (s *activityReplicatorSuite) SetupTest() {
s.Assertions = require.New(s.T())
s.controller = gomock.NewController(s.T())
s.mockMutableState = NewMockmutableState(s.controller)
s.mockTxProcessor = NewMocktransferQueueProcessor(s.controller)
s.mockReplicationProcessor = NewMockReplicatorQueueProcessor(s.controller)
s.mockTimerProcessor = NewMocktimerQueueProcessor(s.controller)
s.mockTxProcessor.EXPECT().NotifyNewTask(gomock.Any(), gomock.Any()).AnyTimes()
s.mockReplicationProcessor.EXPECT().notifyNewTask().AnyTimes()
s.mockTimerProcessor.EXPECT().NotifyNewTimers(gomock.Any(), gomock.Any()).AnyTimes()
s.mockShard = newTestShardContext(
s.controller,
&persistence.ShardInfoWithFailover{
ShardInfo: &persistenceblobs.ShardInfo{
ShardId: 0,
RangeId: 1,
TransferAckLevel: 0,
}},
NewDynamicConfigForTest(),
)
s.mockNamespaceCache = s.mockShard.resource.NamespaceCache
s.mockExecutionMgr = s.mockShard.resource.ExecutionMgr
s.mockClusterMetadata = s.mockShard.resource.ClusterMetadata
s.mockClusterMetadata.EXPECT().IsGlobalNamespaceEnabled().Return(true).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetAllClusterInfo().Return(cluster.TestAllClusterInfo).AnyTimes()
s.logger = s.mockShard.GetLogger()
s.historyCache = newHistoryCache(s.mockShard)
engine := &historyEngineImpl{
currentClusterName: s.mockClusterMetadata.GetCurrentClusterName(),
shard: s.mockShard,
clusterMetadata: s.mockClusterMetadata,
executionManager: s.mockExecutionMgr,
historyCache: s.historyCache,
logger: s.logger,
tokenSerializer: common.NewProtoTaskTokenSerializer(),
metricsClient: s.mockShard.GetMetricsClient(),
timeSource: s.mockShard.GetTimeSource(),
historyEventNotifier: newHistoryEventNotifier(clock.NewRealTimeSource(), metrics.NewClient(tally.NoopScope, metrics.History), func(string) int { return 0 }),
txProcessor: s.mockTxProcessor,
replicatorProcessor: s.mockReplicationProcessor,
timerProcessor: s.mockTimerProcessor,
}
s.mockShard.SetEngine(engine)
s.nDCActivityReplicator = newNDCActivityReplicator(
s.mockShard,
s.historyCache,
s.logger,
)
}
func (s *activityReplicatorSuite) TearDownTest() {
s.controller.Finish()
s.mockShard.Finish(s.T())
}
func (s *activityReplicatorSuite) TestSyncActivity_WorkflowNotFound() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
}
s.mockExecutionMgr.On("GetWorkflowExecution", &persistence.GetWorkflowExecutionRequest{
NamespaceID: namespaceID,
Execution: commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: runID,
},
}).Return(nil, serviceerror.NewNotFound(""))
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
err := s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_WorkflowClosed() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(false).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_IncomingScheduleIDLarger_IncomingVersionSmaller() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(144)
version := int64(100)
lastWriteVersion := version + 100
nextEventID := scheduleID - 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
s.mockMutableState.EXPECT().GetLastWriteVersion().Return(lastWriteVersion, nil)
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_IncomingScheduleIDLarger_IncomingVersionLarger() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(144)
version := int64(100)
lastWriteVersion := version - 100
nextEventID := scheduleID - 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
s.mockMutableState.EXPECT().GetLastWriteVersion().Return(lastWriteVersion, nil).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Equal(newRetryTaskErrorWithHint(ErrRetrySyncActivityMsg, namespaceID, workflowID, runID, nextEventID), err)
}
func (s *activityReplicatorSuite) TestSyncActivity_VersionHistories_IncomingVersionSmaller_DiscardTask() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(144)
version := int64(99)
lastWriteVersion := version - 100
incomingVersionHistory := persistence.VersionHistory{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: scheduleID - 1,
Version: version - 1,
},
{
EventID: scheduleID,
Version: version,
},
},
}
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
VersionHistory: incomingVersionHistory.ToProto(),
}
localVersionHistories := &persistence.VersionHistories{
CurrentVersionHistoryIndex: 0,
Histories: []*persistence.VersionHistory{
{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: scheduleID - 1,
Version: version - 1,
},
{
EventID: scheduleID + 1,
Version: version + 1,
},
},
},
},
}
s.mockMutableState.EXPECT().GetVersionHistories().Return(localVersionHistories).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_DifferentVersionHistories_IncomingVersionLarger_ReturnRetryError() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(144)
version := int64(100)
lastWriteVersion := version - 100
incomingVersionHistory := persistence.VersionHistory{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: 50,
Version: 2,
},
{
EventID: scheduleID,
Version: version,
},
},
}
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
VersionHistory: incomingVersionHistory.ToProto(),
}
localVersionHistories := &persistence.VersionHistories{
CurrentVersionHistoryIndex: 0,
Histories: []*persistence.VersionHistory{
{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: 100,
Version: 2,
},
},
},
},
}
s.mockMutableState.EXPECT().GetVersionHistories().Return(localVersionHistories).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Equal(newNDCRetryTaskErrorWithHint(
resendHigherVersionMessage,
namespaceID,
workflowID,
runID,
50,
2,
common.EmptyEventID,
common.EmptyVersion,
),
err,
)
}
func (s *activityReplicatorSuite) TestSyncActivity_VersionHistories_IncomingScheduleIDLarger_ReturnRetryError() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(99)
version := int64(100)
lastWriteVersion := version - 100
incomingVersionHistory := persistence.VersionHistory{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: 50,
Version: 2,
},
{
EventID: scheduleID,
Version: version,
},
{
EventID: scheduleID + 100,
Version: version + 100,
},
},
}
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
VersionHistory: incomingVersionHistory.ToProto(),
}
localVersionHistories := &persistence.VersionHistories{
CurrentVersionHistoryIndex: 0,
Histories: []*persistence.VersionHistory{
{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: scheduleID - 10,
Version: version,
},
},
},
},
}
s.mockMutableState.EXPECT().GetVersionHistories().Return(localVersionHistories).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Equal(newNDCRetryTaskErrorWithHint(
resendMissingEventMessage,
namespaceID,
workflowID,
runID,
scheduleID-10,
version,
common.EmptyEventID,
common.EmptyVersion,
),
err,
)
}
func (s *activityReplicatorSuite) TestSyncActivity_VersionHistories_SameScheduleID() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(99)
version := int64(100)
lastWriteVersion := version - 100
incomingVersionHistory := persistence.VersionHistory{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: 50,
Version: 2,
},
{
EventID: scheduleID,
Version: version,
},
},
}
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
VersionHistory: incomingVersionHistory.ToProto(),
}
localVersionHistories := &persistence.VersionHistories{
CurrentVersionHistoryIndex: 0,
Histories: []*persistence.VersionHistory{
{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: scheduleID,
Version: version,
},
},
},
},
}
s.mockMutableState.EXPECT().GetVersionHistories().Return(localVersionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(nil, false).AnyTimes()
s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return(enumsgenpb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_VersionHistories_LocalVersionHistoryWin() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(99)
version := int64(100)
lastWriteVersion := version - 100
incomingVersionHistory := persistence.VersionHistory{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: scheduleID,
Version: version,
},
},
}
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
VersionHistory: incomingVersionHistory.ToProto(),
}
localVersionHistories := &persistence.VersionHistories{
CurrentVersionHistoryIndex: 0,
Histories: []*persistence.VersionHistory{
{
BranchToken: []byte{},
Items: []*persistence.VersionHistoryItem{
{
EventID: scheduleID,
Version: version,
},
{
EventID: scheduleID + 1,
Version: version + 1,
},
},
},
},
}
s.mockMutableState.EXPECT().GetVersionHistories().Return(localVersionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(nil, false).AnyTimes()
s.mockMutableState.EXPECT().GetWorkflowStateStatus().
Return(enumsgenpb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityCompleted() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(144)
version := int64(100)
lastWriteVersion := version
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(nil, false).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning_LocalActivityVersionLarger() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
scheduleID := int64(144)
version := int64(100)
lastWriteVersion := version + 10
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().AnyTimes()
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
lastWriteVersion,
nil,
), nil,
).AnyTimes()
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(&persistence.ActivityInfo{
Version: lastWriteVersion - 1,
}, true).AnyTimes()
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Nil(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning_Update_SameVersionSameAttempt() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
scheduleID := int64(144)
scheduledTime := time.Now()
startedID := scheduleID + 1
startedTime := scheduledTime.Add(time.Minute)
heartBeatUpdatedTime := startedTime.Add(time.Minute)
attempt := int32(0)
details := payloads.EncodeString("some random activity heartbeat progress")
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().Times(1)
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
ScheduledTime: scheduledTime.UnixNano(),
StartedId: startedID,
StartedTime: startedTime.UnixNano(),
Attempt: attempt,
LastHeartbeatTime: heartBeatUpdatedTime.UnixNano(),
Details: details,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
activityInfo := &persistence.ActivityInfo{
Version: version,
ScheduleID: scheduleID,
Attempt: attempt,
}
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(activityInfo, true).AnyTimes()
s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, activityInfo.Version).Return(true).AnyTimes()
expectedErr := errors.New("this is error is used to by pass lots of mocking")
s.mockMutableState.EXPECT().ReplicateActivityInfo(request, false).Return(expectedErr).Times(1)
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Equal(expectedErr, err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning_Update_SameVersionLargerAttempt() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
scheduleID := int64(144)
scheduledTime := time.Now()
startedID := scheduleID + 1
startedTime := scheduledTime.Add(time.Minute)
heartBeatUpdatedTime := startedTime.Add(time.Minute)
attempt := int32(100)
details := payloads.EncodeString("some random activity heartbeat progress")
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().Times(1)
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
ScheduledTime: scheduledTime.UnixNano(),
StartedId: startedID,
StartedTime: startedTime.UnixNano(),
Attempt: attempt,
LastHeartbeatTime: heartBeatUpdatedTime.UnixNano(),
Details: details,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
activityInfo := &persistence.ActivityInfo{
Version: version,
ScheduleID: scheduleID,
Attempt: attempt - 1,
}
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(activityInfo, true).AnyTimes()
s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, activityInfo.Version).Return(true).AnyTimes()
expectedErr := errors.New("this is error is used to by pass lots of mocking")
s.mockMutableState.EXPECT().ReplicateActivityInfo(request, true).Return(expectedErr).Times(1)
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Equal(expectedErr, err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning_Update_LargerVersion() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
scheduleID := int64(144)
scheduledTime := time.Now()
startedID := scheduleID + 1
startedTime := scheduledTime.Add(time.Minute)
heartBeatUpdatedTime := startedTime.Add(time.Minute)
attempt := int32(100)
details := payloads.EncodeString("some random activity heartbeat progress")
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
weContext.EXPECT().clear().Times(1)
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
ScheduledTime: scheduledTime.UnixNano(),
StartedId: startedID,
StartedTime: startedTime.UnixNano(),
Attempt: attempt,
LastHeartbeatTime: heartBeatUpdatedTime.UnixNano(),
Details: details,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
activityInfo := &persistence.ActivityInfo{
Version: version - 1,
ScheduleID: scheduleID,
Attempt: attempt + 1,
}
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(activityInfo, true).AnyTimes()
s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, activityInfo.Version).Return(false).AnyTimes()
expectedErr := errors.New("this is error is used to by pass lots of mocking")
s.mockMutableState.EXPECT().ReplicateActivityInfo(request, true).Return(expectedErr).Times(1)
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.Equal(expectedErr, err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
scheduleID := int64(144)
scheduledTime := time.Now()
startedID := scheduleID + 1
startedTime := scheduledTime.Add(time.Minute)
heartBeatUpdatedTime := startedTime.Add(time.Minute)
attempt := int32(100)
details := payloads.EncodeString("some random activity heartbeat progress")
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
ScheduledTime: scheduledTime.UnixNano(),
StartedId: startedID,
StartedTime: startedTime.UnixNano(),
Attempt: attempt,
LastHeartbeatTime: heartBeatUpdatedTime.UnixNano(),
Details: details,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return(enumsgenpb.WORKFLOW_EXECUTION_STATE_RUNNING, enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
activityInfo := &persistence.ActivityInfo{
Version: version - 1,
ScheduleID: scheduleID,
Attempt: attempt + 1,
}
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(activityInfo, true).AnyTimes()
activityInfos := map[int64]*persistence.ActivityInfo{activityInfo.ScheduleID: activityInfo}
s.mockMutableState.EXPECT().GetPendingActivityInfos().Return(activityInfos).AnyTimes()
s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, activityInfo.Version).Return(false).AnyTimes()
s.mockMutableState.EXPECT().ReplicateActivityInfo(request, true).Return(nil).Times(1)
s.mockMutableState.EXPECT().UpdateActivity(activityInfo).Return(nil).Times(1)
s.mockMutableState.EXPECT().GetCurrentVersion().Return(int64(1)).Times(1)
s.mockMutableState.EXPECT().AddTimerTasks(gomock.Any()).Times(1)
now := time.Unix(0, request.GetLastHeartbeatTime())
weContext.EXPECT().updateWorkflowExecutionWithNew(
now,
persistence.UpdateWorkflowModeUpdateCurrent,
nil,
nil,
transactionPolicyPassive,
nil,
).Return(nil).Times(1)
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.NoError(err)
}
func (s *activityReplicatorSuite) TestSyncActivity_ActivityRunning_ZombieWorkflow() {
namespace := "some random namespace name"
namespaceID := testNamespaceID
workflowID := "some random workflow ID"
runID := uuid.New()
version := int64(100)
scheduleID := int64(144)
scheduledTime := time.Now()
startedID := scheduleID + 1
startedTime := scheduledTime.Add(time.Minute)
heartBeatUpdatedTime := startedTime.Add(time.Minute)
attempt := int32(100)
details := payloads.EncodeString("some random activity heartbeat progress")
nextEventID := scheduleID + 10
key := definition.NewWorkflowIdentifier(namespaceID, workflowID, runID)
weContext := NewMockworkflowExecutionContext(s.controller)
weContext.EXPECT().loadWorkflowExecution().Return(s.mockMutableState, nil).Times(1)
weContext.EXPECT().lock(gomock.Any()).Return(nil)
weContext.EXPECT().unlock().Times(1)
_, err := s.historyCache.PutIfNotExist(key, weContext)
s.NoError(err)
request := &historyservice.SyncActivityRequest{
NamespaceId: namespaceID,
WorkflowId: workflowID,
RunId: runID,
Version: version,
ScheduledId: scheduleID,
ScheduledTime: scheduledTime.UnixNano(),
StartedId: startedID,
StartedTime: startedTime.UnixNano(),
Attempt: attempt,
LastHeartbeatTime: heartBeatUpdatedTime.UnixNano(),
Details: details,
}
s.mockMutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
s.mockMutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
var versionHistories *persistence.VersionHistories
s.mockMutableState.EXPECT().GetVersionHistories().Return(versionHistories).AnyTimes()
s.mockMutableState.EXPECT().GetReplicationState().Return(&persistence.ReplicationState{}).AnyTimes()
s.mockMutableState.EXPECT().GetWorkflowStateStatus().Return(enumsgenpb.WORKFLOW_EXECUTION_STATE_ZOMBIE, enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED).AnyTimes()
s.mockNamespaceCache.EXPECT().GetNamespaceByID(namespaceID).Return(
cache.NewGlobalNamespaceCacheEntryForTest(
&persistenceblobs.NamespaceInfo{Id: namespaceID, Name: namespace},
&persistenceblobs.NamespaceConfig{RetentionDays: 1},
&persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: cluster.TestCurrentClusterName,
Clusters: []string{
cluster.TestCurrentClusterName,
cluster.TestAlternativeClusterName,
},
},
version,
nil,
), nil,
).AnyTimes()
activityInfo := &persistence.ActivityInfo{
Version: version - 1,
ScheduleID: scheduleID,
Attempt: attempt + 1,
}
s.mockMutableState.EXPECT().GetActivityInfo(scheduleID).Return(activityInfo, true).AnyTimes()
activityInfos := map[int64]*persistence.ActivityInfo{activityInfo.ScheduleID: activityInfo}
s.mockMutableState.EXPECT().GetPendingActivityInfos().Return(activityInfos).AnyTimes()
s.mockClusterMetadata.EXPECT().IsVersionFromSameCluster(version, activityInfo.Version).Return(false).AnyTimes()
s.mockMutableState.EXPECT().ReplicateActivityInfo(request, true).Return(nil).Times(1)
s.mockMutableState.EXPECT().UpdateActivity(activityInfo).Return(nil).Times(1)
s.mockMutableState.EXPECT().GetCurrentVersion().Return(int64(1)).Times(1)
s.mockMutableState.EXPECT().AddTimerTasks(gomock.Any()).Times(1)
now := time.Unix(0, request.GetLastHeartbeatTime())
weContext.EXPECT().updateWorkflowExecutionWithNew(
now,
persistence.UpdateWorkflowModeBypassCurrent,
nil,
nil,
transactionPolicyPassive,
nil,
).Return(nil).Times(1)
err = s.nDCActivityReplicator.SyncActivity(context.Background(), request)
s.NoError(err)
}
| 1 | 9,783 | Another mess after all renames and enum conversions. Doesn't break anything though. | temporalio-temporal | go |
@@ -17,7 +17,8 @@ class PluginSetup(unittest.TestCase):
def test_a_statsd_timer_is_used_for_history_if_configured(self):
settings = {
"statsd_url": "udp://127.0.0.1:8125",
- "includes": "kinto.plugins.history"
+ "includes": "kinto.plugins.history",
+ "storage_strict_json": True
}
config = testing.setUp(settings=settings)
with mock.patch('kinto.core.statsd.Client.timer') as mocked: | 1 | import json
import re
import unittest
import mock
from pyramid import testing
from kinto import main as kinto_main
from kinto.core.testing import get_user_headers, skip_if_no_statsd
from .. import support
class PluginSetup(unittest.TestCase):
@skip_if_no_statsd
def test_a_statsd_timer_is_used_for_history_if_configured(self):
settings = {
"statsd_url": "udp://127.0.0.1:8125",
"includes": "kinto.plugins.history"
}
config = testing.setUp(settings=settings)
with mock.patch('kinto.core.statsd.Client.timer') as mocked:
kinto_main(None, config=config)
mocked.assert_called_with('plugins.history')
class HistoryWebTest(support.BaseWebTest, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings['includes'] = 'kinto.plugins.history'
return settings
class HelloViewTest(HistoryWebTest):
def test_history_capability_if_enabled(self):
resp = self.app.get('/')
capabilities = resp.json['capabilities']
self.assertIn('history', capabilities)
class HistoryViewTest(HistoryWebTest):
def setUp(self):
self.bucket_uri = '/buckets/test'
self.app.put(self.bucket_uri, headers=self.headers)
self.collection_uri = self.bucket_uri + '/collections/col'
resp = self.app.put(self.collection_uri, headers=self.headers)
self.collection = resp.json['data']
self.group_uri = self.bucket_uri + '/groups/grp'
body = {'data': {'members': ['elle']}}
resp = self.app.put_json(self.group_uri, body, headers=self.headers)
self.group = resp.json['data']
self.record_uri = '/buckets/test/collections/col/records/rec'
body = {'data': {'foo': 42}}
resp = self.app.put_json(self.record_uri, body, headers=self.headers)
self.record = resp.json['data']
self.history_uri = '/buckets/test/history'
def test_only_get_and_delete_on_collection_are_allowed(self):
self.app.put(self.history_uri, headers=self.headers, status=405)
self.app.patch(self.history_uri, headers=self.headers, status=405)
def test_only_collection_endpoint_is_available(self):
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
url = '{}/{}'.format(self.bucket_uri, entry['id'])
self.app.get(url, headers=self.headers, status=404)
self.app.put(url, headers=self.headers, status=404)
self.app.patch(url, headers=self.headers, status=404)
self.app.delete(url, headers=self.headers, status=404)
def test_tracks_user_and_date(self):
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][-1]
assert entry['user_id'] == self.principal
assert re.match('^\d{4}\-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}',
entry['date'])
#
# Bucket
#
def test_history_contains_bucket_creation(self):
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][-1]
assert entry['resource_name'] == 'bucket'
assert entry['bucket_id'] == 'test'
assert entry['action'] == 'create'
assert entry['uri'] == '/buckets/test'
def test_history_supports_creation_via_plural_endpoint(self):
resp = self.app.post_json('/buckets', {'data': {'id': 'posted'}},
headers=self.headers)
resp = self.app.get('/buckets/posted/history', headers=self.headers)
entry = resp.json['data'][0]
assert entry['resource_name'] == 'bucket'
assert entry['bucket_id'] == 'posted'
assert entry['action'] == 'create'
assert entry['uri'] == '/buckets/posted'
def test_tracks_bucket_attributes_update(self):
body = {'data': {'foo': 'baz'}}
self.app.patch_json(self.bucket_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['data']['foo'] == 'baz'
def test_tracks_bucket_permissions_update(self):
body = {'permissions': {'read': ['admins']}}
self.app.patch_json(self.bucket_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['permissions']['read'] == ['admins']
def test_bucket_delete_destroys_its_history_entries(self):
self.app.delete(self.bucket_uri, headers=self.headers)
storage = self.app.app.registry.storage
stored_in_backend, _ = storage.get_all(parent_id='/buckets/test',
collection_id='history')
assert len(stored_in_backend) == 0
def test_delete_all_buckets_destroys_history_entries(self):
self.app.put_json('/buckets/1', {"data": {"a": 1}},
headers=self.headers)
self.app.delete('/buckets?a=1', headers=self.headers)
# Entries about deleted bucket are gone.
storage = self.app.app.registry.storage
stored_in_backend, _ = storage.get_all(parent_id='/buckets/1',
collection_id='history')
assert len(stored_in_backend) == 0
# Entries of other buckets are still here.
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][-1]
assert entry['bucket_id'] == 'test'
assert entry['action'] == 'create'
#
# Collection
#
def test_tracks_collection_creation(self):
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][2]
cid = self.collection['id']
assert entry['resource_name'] == 'collection'
assert 'bucket_id' not in entry
assert entry['collection_id'] == cid
assert entry['action'] == 'create'
assert entry['uri'] == '/buckets/test/collections/{}'.format(cid)
def test_tracks_collection_attributes_update(self):
body = {'data': {'foo': 'baz'}}
self.app.patch_json(self.collection_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['data']['foo'] == 'baz'
def test_tracks_collection_permissions_update(self):
body = {'permissions': {'read': ['admins']}}
self.app.patch_json(self.collection_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['permissions']['read'] == ['admins']
def test_tracks_collection_delete(self):
self.app.delete(self.collection_uri, headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'delete'
assert entry['target']['data']['deleted'] is True
def test_tracks_multiple_collections_delete(self):
self.app.put(self.bucket_uri + '/collections/col2',
headers=self.headers)
self.app.delete(self.bucket_uri + '/collections', headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'delete'
assert entry['target']['data']['id'] in (self.collection['id'], 'col2')
#
# Group
#
def test_tracks_group_creation(self):
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][1]
assert entry['resource_name'] == 'group'
assert 'bucket_id' not in entry
assert entry['group_id'] == self.group['id']
assert entry['action'] == 'create'
assert entry['uri'] == '/buckets/test/groups/{}'.format(self.group['id'])
def test_tracks_group_attributes_update(self):
body = {'data': {'foo': 'baz', 'members': ['lui']}}
self.app.patch_json(self.group_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['data']['foo'] == 'baz'
assert entry['target']['data']['members'] == ['lui']
def test_tracks_group_permissions_update(self):
body = {'permissions': {'read': ['admins']}}
self.app.patch_json(self.group_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['permissions']['read'] == ['admins']
def test_tracks_group_delete(self):
self.app.delete(self.group_uri, headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'delete'
assert entry['target']['data']['deleted'] is True
def test_tracks_multiple_groups_delete(self):
self.app.put_json(self.bucket_uri + '/groups/g2',
{"data": {"members": ["her"]}},
headers=self.headers)
self.app.delete(self.bucket_uri + '/groups', headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'delete'
assert entry['target']['data']['id'] in (self.group['id'], 'g2')
#
# Record
#
def test_tracks_record_creation(self):
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
cid = self.collection['id']
rid = self.record['id']
assert entry['resource_name'] == 'record'
assert 'bucket_id' not in entry
assert entry['collection_id'] == cid
assert entry['record_id'] == rid
assert entry['action'] == 'create'
assert entry['uri'] == '/buckets/test/collections/{}/records/{}'.format(cid, rid)
assert entry['target']['data']['foo'] == 42
assert entry['target']['permissions']['write'][0].startswith('basicauth:')
def test_tracks_record_attributes_update(self):
resp = self.app.patch_json(self.record_uri, {'data': {'foo': 'baz'}},
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['data']['foo'] == 'baz'
def test_tracks_record_permissions_update(self):
body = {'permissions': {'read': ['admins']}}
resp = self.app.patch_json(self.record_uri, body,
headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'update'
assert entry['target']['permissions']['read'] == ['admins']
def test_tracks_record_delete(self):
resp = self.app.delete(self.record_uri, headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'delete'
assert entry['target']['data']['deleted'] is True
def test_tracks_multiple_records_delete(self):
records_uri = self.collection_uri + '/records'
body = {'data': {'foo': 43}}
resp = self.app.post_json(records_uri, body, headers=self.headers)
rid = resp.json['data']['id']
self.app.delete(records_uri, headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
entry = resp.json['data'][0]
assert entry['action'] == 'delete'
assert entry['target']['data']['id'] in (self.record['id'], rid)
def test_does_not_track_records_during_massive_deletion(self):
body = {'data': {'pim': 'pam'}}
records_uri = self.collection_uri + '/records'
self.app.post_json(records_uri, body, headers=self.headers)
self.app.delete(self.collection_uri, headers=self.headers)
resp = self.app.get(self.history_uri, headers=self.headers)
deletion_entries = [e for e in resp.json['data'] if e['action'] == 'delete']
assert len(deletion_entries) == 1
class HistoryDeletionTest(HistoryWebTest):
def setUp(self):
self.app.put('/buckets/bid', headers=self.headers)
self.app.put('/buckets/bid/collections/cid',
headers=self.headers)
body = {'data': {'foo': 42}}
self.app.put_json('/buckets/bid/collections/cid/records/rid',
body,
headers=self.headers)
def test_full_deletion(self):
self.app.delete('/buckets/bid/history', headers=self.headers)
resp = self.app.get('/buckets/bid/history', headers=self.headers)
assert len(resp.json['data']) == 0
def test_partial_deletion(self):
resp = self.app.get('/buckets/bid/history', headers=self.headers)
before = int(json.loads(resp.headers['ETag']))
self.app.put('/buckets/bid/collections/cid2', headers=self.headers)
# Delete everything before the last entry (exclusive)
self.app.delete('/buckets/bid/history?_before={}'.format(before),
headers=self.headers)
resp = self.app.get('/buckets/bid/history', headers=self.headers)
assert len(resp.json['data']) == 2 # record + new collection
class FilteringTest(HistoryWebTest):
def setUp(self):
self.app.put('/buckets/bid', headers=self.headers)
self.app.put('/buckets/0', headers=self.headers)
self.app.put('/buckets/bid/collections/cid',
headers=self.headers)
self.app.put('/buckets/0/collections/1',
headers=self.headers)
body = {'data': {'foo': 42}}
self.app.put_json('/buckets/bid/collections/cid/records/rid',
body,
headers=self.headers)
body = {'data': {'foo': 0}}
self.app.put_json('/buckets/0/collections/1/records/2',
body,
headers=self.headers)
body = {'data': {'foo': 'bar'}}
self.app.patch_json('/buckets/bid/collections/cid/records/rid',
body,
headers=self.headers)
self.app.delete('/buckets/bid/collections/cid/records/rid',
headers=self.headers)
def test_filter_by_unknown_field_is_not_allowed(self):
self.app.get('/buckets/bid/history?movie=bourne',
headers=self.headers,
status=400)
def test_filter_by_action(self):
resp = self.app.get('/buckets/bid/history?action=delete',
headers=self.headers)
assert len(resp.json['data']) == 1
def test_filter_by_resource(self):
resp = self.app.get('/buckets/bid/history?resource_name=bucket',
headers=self.headers)
assert len(resp.json['data']) == 1
def test_filter_by_uri(self):
uri = '/buckets/bid/collections/cid/records/rid'
resp = self.app.get('/buckets/bid/history?uri={}'.format(uri),
headers=self.headers)
assert len(resp.json['data']) == 3 # create / update / delete
def test_allows_diff_between_two_versions_of_a_record(self):
uri = '/buckets/bid/collections/cid/records/rid'
querystring = '?uri={}&_limit=2&_sort=last_modified'.format(uri)
resp = self.app.get('/buckets/bid/history{}'.format(querystring),
headers=self.headers)
entries = resp.json['data']
version1 = entries[0]['target']['data']
version2 = entries[1]['target']['data']
diff = [(k, version1[k], version2[k])
for k in version1.keys()
if k != 'last_modified' and version2[k] != version1[k]]
assert diff == [('foo', 42, 'bar')]
def test_filter_by_bucket(self):
uri = '/buckets/bid/history?bucket_id=bid'
resp = self.app.get(uri,
headers=self.headers)
# This is equivalent to filtering by resource_name=bucket,
# since only entries for bucket have ``bucket_id`` attribute.
assert len(resp.json['data']) == 1
def test_filter_by_collection(self):
uri = '/buckets/bid/history?collection_id=cid'
resp = self.app.get(uri,
headers=self.headers)
assert len(resp.json['data']) == 4
def test_filter_by_numeric_bucket(self):
uri = '/buckets/0/history?bucket_id=0'
resp = self.app.get(uri,
headers=self.headers)
assert len(resp.json['data']) == 1
def test_filter_by_numeric_collection(self):
uri = '/buckets/0/history?collection_id=1'
resp = self.app.get(uri,
headers=self.headers)
assert len(resp.json['data']) == 2
def test_filter_by_numeric_record(self):
uri = '/buckets/0/history?record_id=2'
resp = self.app.get(uri,
headers=self.headers)
assert len(resp.json['data']) == 1
def test_filter_by_target_fields(self):
uri = '/buckets/bid/history?target.data.id=rid'
resp = self.app.get(uri,
headers=self.headers)
assert len(resp.json['data']) == 3 # create, update, delete
def test_limit_results(self):
resp = self.app.get('/buckets/bid/history?_limit=2',
headers=self.headers)
assert len(resp.json['data']) == 2
assert 'Next-Page' in resp.headers
def test_filter_returned_fields(self):
resp = self.app.get('/buckets/bid/history?_fields=uri,action',
headers=self.headers)
assert sorted(resp.json['data'][0].keys()) == ['action', 'id',
'last_modified', 'uri']
def test_sort_by_date(self):
resp = self.app.get('/buckets/bid/history?_sort=date',
headers=self.headers)
entries = resp.json['data']
assert entries[0]['date'] < entries[-1]['date']
class BulkTest(HistoryWebTest):
def setUp(self):
body = {
'defaults': {
'method': 'POST',
'path': '/buckets/bid/collections/cid/records',
},
'requests': [{
'path': '/buckets/bid',
'method': 'PUT'
}, {
'path': '/buckets/bid/collections',
'body': {'data': {'id': 'cid'}}
}, {
'body': {'data': {'id': 'a', 'attr': 1}},
}, {
'body': {'data': {'id': 'b', 'attr': 2}},
}, {
'body': {'data': {'id': 'c', 'attr': 3}}
}]
}
self.app.post_json('/batch', body, headers=self.headers)
def test_post_on_collection(self):
resp = self.app.get('/buckets/bid/history', headers=self.headers)
entries = resp.json['data']
assert len(entries) == 5
assert entries[0]['uri'] == '/buckets/bid/collections/cid/records/c'
assert entries[-2]['uri'] == '/buckets/bid/collections/cid'
def test_delete_on_collection(self):
body = {
'defaults': {
'method': 'DELETE',
},
'requests': [{
'path': '/buckets/bid/collections/cid/records/a',
}, {
'path': '/buckets/bid/collections/cid/records/b',
}, {
'path': '/buckets/bid/collections/cid/records/c',
}]
}
self.app.post_json('/batch', body, headers=self.headers)
resp = self.app.get('/buckets/bid/history', headers=self.headers)
entries = resp.json['data']
assert entries[0]['uri'] == '/buckets/bid/collections/cid/records/c'
assert entries[1]['uri'] == '/buckets/bid/collections/cid/records/b'
assert entries[2]['uri'] == '/buckets/bid/collections/cid/records/a'
def test_multiple_patch(self):
# Kinto/kinto#942
requests = [{
'method': 'PATCH',
'path': '/buckets/bid/collections/cid/records/{}'.format(l),
'body': {'data': {'label': l}}} for l in ('a', 'b', 'c')]
self.app.post_json('/batch', {'requests': requests}, headers=self.headers)
resp = self.app.get('/buckets/bid/history', headers=self.headers)
entries = resp.json['data']
for entry in entries:
if entry['resource_name'] != 'record':
continue
assert entry['record_id'] == entry['target']['data']['id']
class DefaultBucketTest(HistoryWebTest):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings['includes'] = ('kinto.plugins.default_bucket '
'kinto.plugins.history')
return settings
def setUp(self):
resp = self.app.get('/', headers=self.headers)
self.bucket_id = resp.json['user']['bucket']
self.history_uri = '/buckets/{}/history'.format(self.bucket_id)
def test_history_can_be_accessed_via_default_alias(self):
self.app.get('/buckets/default/collections/blah',
headers=self.headers)
resp = self.app.get('/buckets/default/history', headers=self.headers)
assert len(resp.json['data']) == 2
def test_implicit_creations_are_listed(self):
body = {'data': {'foo': 42}}
resp = self.app.post_json('/buckets/default/collections/blah/records',
body,
headers=self.headers)
record = resp.json['data']
resp = self.app.get(self.history_uri, headers=self.headers)
entries = resp.json['data']
assert len(entries) == 3
bucket_uri = '/buckets/{}'.format(self.bucket_id)
assert entries[2]['resource_name'] == 'bucket'
assert entries[2]['bucket_id'] == self.bucket_id
assert entries[2]['uri'] == bucket_uri
assert entries[2]['target']['permissions']['write'][0] == self.principal
collection_uri = bucket_uri + '/collections/blah'
assert entries[1]['resource_name'] == 'collection'
assert 'bucket_id' not in entries[1]
assert entries[1]['collection_id'] == 'blah'
assert entries[1]['uri'] == collection_uri
assert entries[1]['target']['permissions']['write'][0] == self.principal
record_uri = collection_uri + '/records/{}'.format(record['id'])
assert entries[0]['resource_name'] == 'record'
assert 'bucket_id' not in entries[1]
assert entries[0]['collection_id'] == 'blah'
assert entries[0]['record_id'] == record['id']
assert entries[0]['uri'] == record_uri
assert entries[0]['target']['data']['foo'] == 42
assert entries[0]['target']['permissions']['write'][0] == self.principal
class PermissionsTest(HistoryWebTest):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings['experimental_permissions_endpoint'] = 'true'
return settings
def setUp(self):
self.alice_headers = get_user_headers('alice')
self.bob_headers = get_user_headers('bob')
self.julia_headers = get_user_headers('julia')
self.alice_principal = ('basicauth:d5b0026601f1b251974e09548d44155e16'
'812e3c64ff7ae053fe3542e2ca1570')
self.bob_principal = ('basicauth:c031ced27503f788b102ca54269a062ec737'
'94bb075154c74a0d4311e74ca8b6')
self.julia_principal = ('basicauth:d8bab8d9fe0510fcaf9b5ad5942c027fc'
'2fdf80b6dc59cc3c48d12a2fcb18f1c')
bucket = {
'permissions': {
'read': [self.alice_principal]
}
}
collection = {
'permissions': {
'read': [self.julia_principal]
}
}
record = {
'permissions': {
'write': [self.bob_principal, self.alice_principal],
}
}
self.app.put('/buckets/author-only',
headers=self.headers)
self.app.put_json('/buckets/test',
bucket,
headers=self.headers)
self.app.put_json('/buckets/test/groups/admins',
{'data': {'members': []}},
headers=self.headers)
self.app.put_json('/buckets/test/collections/alice-julia',
collection,
headers=self.headers)
self.app.put_json('/buckets/test/collections/author-only',
headers=self.headers)
self.app.post_json('/buckets/test/collections/alice-julia/records',
record,
headers=self.headers)
self.app.post_json('/buckets/test/collections/alice-julia/records',
{'permissions': {'read': ['system.Authenticated']}},
headers=self.headers)
def test_author_can_read_everything(self):
resp = self.app.get('/buckets/test/history',
headers=self.headers)
entries = resp.json['data']
assert len(entries) == 6 # everything.
def test_read_permission_can_be_given_to_anybody_via_settings(self):
with mock.patch.dict(self.app.app.registry.settings,
[('history_read_principals', 'system.Everyone')]):
resp = self.app.get('/buckets/test/history',
headers=get_user_headers('tartan:pion'))
entries = resp.json['data']
assert len(entries) == 6 # everything.
def test_bucket_read_allows_whole_history(self):
resp = self.app.get('/buckets/test/history',
headers=self.alice_headers)
entries = resp.json['data']
assert len(entries) == 6 # everything.
self.app.get('/buckets/author-only/history',
headers=self.alice_headers,
status=403)
def test_collection_read_restricts_to_collection(self):
resp = self.app.get('/buckets/test/history',
headers=self.julia_headers)
entries = resp.json['data']
assert len(entries) == 3
assert entries[0]['resource_name'] == 'record'
assert entries[1]['resource_name'] == 'record'
assert entries[2]['resource_name'] == 'collection'
def test_write_on_record_restricts_to_record(self):
resp = self.app.get('/buckets/test/history',
headers=self.bob_headers)
entries = resp.json['data']
assert len(entries) == 2
assert ('system.Authenticated' in
entries[0]['target']['permissions']['read'])
assert entries[0]['resource_name'] == 'record'
assert (self.bob_principal in
entries[1]['target']['permissions']['write'])
assert entries[1]['resource_name'] == 'record'
def test_publicly_readable_record_allows_any_authenticated(self):
resp = self.app.get('/buckets/test/history',
headers=get_user_headers('jack:'))
entries = resp.json['data']
assert len(entries) == 1
assert ('system.Authenticated' in
entries[0]['target']['permissions']['read'])
assert entries[0]['resource_name'] == 'record'
def test_new_entries_are_not_readable_if_permission_is_removed(self):
resp = self.app.get('/buckets/test/history',
headers=self.alice_headers)
before = resp.headers['ETag']
# Remove alice from read permission.
self.app.patch_json('/buckets/test',
{'permissions': {'read': []}},
headers=self.headers)
# Create new collection.
self.app.put_json('/buckets/test/collections/new-one',
headers=self.headers)
# History did not evolve for alice.
resp = self.app.get('/buckets/test/history',
headers=self.alice_headers)
assert resp.headers['ETag'] != before
def test_history_entries_are_not_listed_in_permissions_endpoint(self):
resp = self.app.get('/permissions',
headers=self.headers)
entries = [e['resource_name'] == 'history' for e in resp.json["data"]]
assert not any(entries)
class ExcludeResourcesTest(HistoryWebTest):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings['history.exclude_resources'] = ('/buckets/a '
'/buckets/b/collections/a '
'/buckets/b/groups/a')
return settings
def setUp(self):
group = {'data': {'members': []}}
self.app.put_json('/buckets/a', headers=self.headers)
self.app.put_json('/buckets/a/groups/admins', group, headers=self.headers)
self.app.put_json('/buckets/b', headers=self.headers)
self.app.put_json('/buckets/b/groups/a', group, headers=self.headers)
self.app.put_json('/buckets/b/collections/a', headers=self.headers)
self.app.put_json('/buckets/b/collections/a/records/1', headers=self.headers)
self.app.put_json('/buckets/b/collections/b', headers=self.headers)
self.app.put_json('/buckets/b/collections/b/records/1', headers=self.headers)
def test_whole_buckets_can_be_excluded(self):
resp = self.app.get('/buckets/a/history',
headers=self.headers)
entries = resp.json['data']
assert len(entries) == 0 # nothing.
def test_some_specific_collection_can_be_excluded(self):
resp = self.app.get('/buckets/b/history?collection_id=b',
headers=self.headers)
entries = resp.json['data']
assert len(entries) > 0
resp = self.app.get('/buckets/b/history?collection_id=a',
headers=self.headers)
entries = resp.json['data']
assert len(entries) == 0 # nothing.
def test_some_specific_object_can_be_excluded(self):
resp = self.app.get('/buckets/b/history?group_id=a',
headers=self.headers)
entries = resp.json['data']
assert len(entries) == 0 # nothing.
| 1 | 10,974 | I don't think it's necessary to change this for the history plugin tests? | Kinto-kinto | py |
@@ -328,8 +328,8 @@ bool TCPv4Transport::compare_locator_ip_and_port(
void TCPv4Transport::fill_local_ip(
Locator& loc) const
{
- IPLocator::setIPv4(loc, "127.0.0.1");
loc.kind = LOCATOR_KIND_TCPv4;
+ IPLocator::setIPv4(loc, "127.0.0.1");
}
ip::tcp::endpoint TCPv4Transport::generate_endpoint( | 1 | // Copyright 2018 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <rtps/transport/TCPv4Transport.h>
#include <utility>
#include <cstring>
#include <algorithm>
#include <asio.hpp>
#include <fastdds/dds/log/Log.hpp>
#include <fastdds/rtps/transport/TCPv4TransportDescriptor.h>
#include <fastrtps/utils/IPLocator.h>
using namespace std;
using namespace asio;
namespace eprosima {
namespace fastdds {
namespace rtps {
using IPFinder = fastrtps::rtps::IPFinder;
using octet = fastrtps::rtps::octet;
using IPLocator = fastrtps::rtps::IPLocator;
using Log = fastdds::dds::Log;
static void get_ipv4s(
std::vector<IPFinder::info_IP>& locNames,
bool return_loopback = false)
{
IPFinder::getIPs(&locNames, return_loopback);
auto new_end = remove_if(locNames.begin(),
locNames.end(),
[](IPFinder::info_IP ip)
{
return ip.type != IPFinder::IP4 && ip.type != IPFinder::IP4_LOCAL;
});
locNames.erase(new_end, locNames.end());
std::for_each(locNames.begin(), locNames.end(), [](IPFinder::info_IP& loc)
{
loc.locator.kind = LOCATOR_KIND_TCPv4;
});
}
static asio::ip::address_v4::bytes_type locator_to_native(
Locator& locator,
const octet* local_wan)
{
const octet* wan = IPLocator::getWan(locator);
if (IPLocator::hasWan(locator) && (memcmp(local_wan, wan, 4) != 0))
{
return{ { wan[0], wan[1], wan[2], wan[3]} };
}
else
{
return{ { IPLocator::getIPv4(locator)[0],
IPLocator::getIPv4(locator)[1],
IPLocator::getIPv4(locator)[2],
IPLocator::getIPv4(locator)[3]} };
}
}
TCPv4Transport::TCPv4Transport(
const TCPv4TransportDescriptor& descriptor)
: TCPTransportInterface(LOCATOR_KIND_TCPv4)
, configuration_(descriptor)
{
for (const auto& interface : descriptor.interfaceWhiteList)
{
interface_whitelist_.emplace_back(ip::address_v4::from_string(interface));
}
for (uint16_t port : configuration_.listening_ports)
{
Locator locator(LOCATOR_KIND_TCPv4, port);
create_acceptor_socket(locator);
}
#if !TLS_FOUND
if (descriptor.apply_security)
{
logError(RTCP_TLS, "Trying to use TCP Transport with TLS but TLS was not found.");
}
#endif // if !TLS_FOUND
}
TCPv4Transport::TCPv4Transport()
: TCPTransportInterface(LOCATOR_KIND_TCPv4)
{
}
TCPv4Transport::~TCPv4Transport()
{
clean();
}
TCPv4TransportDescriptor::TCPv4TransportDescriptor()
: TCPTransportDescriptor()
{
memset(wan_addr, 0, 4);
}
TCPv4TransportDescriptor::TCPv4TransportDescriptor(
const TCPv4TransportDescriptor& t)
: TCPTransportDescriptor(t)
{
memcpy(wan_addr, t.wan_addr, 4);
}
TCPv4TransportDescriptor& TCPv4TransportDescriptor::operator =(
const TCPv4TransportDescriptor& t)
{
*static_cast<TCPTransportDescriptor*>(this) = t;
memcpy(wan_addr, t.wan_addr, 4);
return *this;
}
bool TCPv4TransportDescriptor::operator ==(
const TCPv4TransportDescriptor& t) const
{
return (this->wan_addr[0] == t.wan_addr[0] &&
this->wan_addr[1] == t.wan_addr[1] &&
this->wan_addr[2] == t.wan_addr[2] &&
this->wan_addr[3] == t.wan_addr[3] &&
TCPTransportDescriptor::operator ==(t));
}
TransportInterface* TCPv4TransportDescriptor::create_transport() const
{
return new TCPv4Transport(*this);
}
void TCPv4Transport::AddDefaultOutputLocator(
LocatorList&)
{
}
const TCPTransportDescriptor* TCPv4Transport::configuration() const
{
return &configuration_;
}
TCPTransportDescriptor* TCPv4Transport::configuration()
{
return &configuration_;
}
void TCPv4Transport::get_ips(
std::vector<IPFinder::info_IP>& locNames,
bool return_loopback) const
{
get_ipv4s(locNames, return_loopback);
}
uint16_t TCPv4Transport::GetLogicalPortIncrement() const
{
return configuration_.logical_port_increment;
}
uint16_t TCPv4Transport::GetLogicalPortRange() const
{
return configuration_.logical_port_range;
}
uint16_t TCPv4Transport::GetMaxLogicalPort() const
{
return configuration_.max_logical_port;
}
std::vector<std::string> TCPv4Transport::get_binding_interfaces_list()
{
std::vector<std::string> vOutputInterfaces;
if (is_interface_whitelist_empty())
{
vOutputInterfaces.push_back(s_IPv4AddressAny);
}
else
{
for (auto& ip : interface_whitelist_)
{
vOutputInterfaces.push_back(ip.to_string());
}
}
return vOutputInterfaces;
}
bool TCPv4Transport::is_interface_whitelist_empty() const
{
return interface_whitelist_.empty();
}
bool TCPv4Transport::is_interface_allowed(
const std::string& interface) const
{
return is_interface_allowed(asio::ip::address_v4::from_string(interface));
}
bool TCPv4Transport::is_interface_allowed(
const ip::address_v4& ip) const
{
if (interface_whitelist_.empty())
{
return true;
}
if (ip == ip::address_v4::any())
{
return true;
}
return find(interface_whitelist_.begin(), interface_whitelist_.end(), ip) != interface_whitelist_.end();
}
LocatorList TCPv4Transport::NormalizeLocator(
const Locator& locator)
{
LocatorList list;
if (IPLocator::isAny(locator))
{
std::vector<IPFinder::info_IP> locNames;
get_ipv4s(locNames);
for (const auto& infoIP : locNames)
{
auto ip = asio::ip::address_v4::from_string(infoIP.name);
if (is_interface_allowed(ip))
{
Locator newloc(locator);
IPLocator::setIPv4(newloc, infoIP.locator);
list.push_back(newloc);
}
}
if (list.empty())
{
Locator newloc(locator);
IPLocator::setIPv4(newloc, "127.0.0.1");
list.push_back(newloc);
}
}
else
{
list.push_back(locator);
}
return list;
}
bool TCPv4Transport::is_local_locator(
const Locator& locator) const
{
assert(locator.kind == LOCATOR_KIND_TCPv4);
/*
* Check case: Remote WAN address isn't our WAN address.
*/
if (IPLocator::hasWan(locator))
{
const octet* wan = IPLocator::getWan(locator);
if (memcmp(wan, configuration_.wan_addr, 4 * sizeof(octet)) != 0)
{
return false; // WAN mismatch
}
}
/*
* Check case: Address is localhost
*/
if (IPLocator::isLocal(locator))
{
return true;
}
/*
* Check case: Address is one of our addresses.
*/
for (const IPFinder::info_IP& localInterface : current_interfaces_)
{
if (IPLocator::compareAddress(locator, localInterface.locator))
{
return true;
}
}
return false;
}
bool TCPv4Transport::is_locator_allowed(
const Locator& locator) const
{
if (!IsLocatorSupported(locator))
{
return false;
}
if (interface_whitelist_.empty())
{
return true;
}
return is_interface_allowed(IPLocator::toIPv4string(locator));
}
bool TCPv4Transport::compare_locator_ip(
const Locator& lh,
const Locator& rh) const
{
return IPLocator::compareAddress(lh, rh);
}
bool TCPv4Transport::compare_locator_ip_and_port(
const Locator& lh,
const Locator& rh) const
{
return IPLocator::compareAddressAndPhysicalPort(lh, rh);
}
void TCPv4Transport::fill_local_ip(
Locator& loc) const
{
IPLocator::setIPv4(loc, "127.0.0.1");
loc.kind = LOCATOR_KIND_TCPv4;
}
ip::tcp::endpoint TCPv4Transport::generate_endpoint(
const Locator& loc,
uint16_t port) const
{
asio::ip::address_v4::bytes_type remoteAddress;
IPLocator::copyIPv4(loc, remoteAddress.data());
return ip::tcp::endpoint(asio::ip::address_v4(remoteAddress), port);
}
ip::tcp::endpoint TCPv4Transport::generate_local_endpoint(
Locator& loc,
uint16_t port) const
{
return ip::tcp::endpoint(asio::ip::address_v4(locator_to_native(loc, configuration_.wan_addr)), port);
}
ip::tcp::endpoint TCPv4Transport::generate_endpoint(
uint16_t port) const
{
return asio::ip::tcp::endpoint(asio::ip::tcp::v4(), port);
}
asio::ip::tcp TCPv4Transport::generate_protocol() const
{
return asio::ip::tcp::v4();
}
bool TCPv4Transport::is_interface_allowed(
const Locator& loc) const
{
asio::ip::address_v4 ip = asio::ip::address_v4::from_string(IPLocator::toIPv4string(loc));
return is_interface_allowed(ip);
}
void TCPv4Transport::set_receive_buffer_size(
uint32_t size)
{
configuration_.receiveBufferSize = size;
}
void TCPv4Transport::set_send_buffer_size(
uint32_t size)
{
configuration_.sendBufferSize = size;
}
void TCPv4Transport::endpoint_to_locator(
const ip::tcp::endpoint& endpoint,
Locator& locator) const
{
locator.kind = LOCATOR_KIND_TCPv4;
IPLocator::setPhysicalPort(locator, endpoint.port());
auto ipBytes = endpoint.address().to_v4().to_bytes();
IPLocator::setIPv4(locator, ipBytes.data());
}
bool TCPv4Transport::fillMetatrafficUnicastLocator(
Locator& locator,
uint32_t metatraffic_unicast_port) const
{
bool result = TCPTransportInterface::fillMetatrafficUnicastLocator(locator, metatraffic_unicast_port);
IPLocator::setWan(locator,
configuration_.wan_addr[0], configuration_.wan_addr[1],
configuration_.wan_addr[2], configuration_.wan_addr[3]);
return result;
}
bool TCPv4Transport::fillUnicastLocator(
Locator& locator,
uint32_t well_known_port) const
{
bool result = TCPTransportInterface::fillUnicastLocator(locator, well_known_port);
IPLocator::setWan(locator,
configuration_.wan_addr[0], configuration_.wan_addr[1],
configuration_.wan_addr[2], configuration_.wan_addr[3]);
return result;
}
} // namespace rtps
} // namespace fastrtps
} // namespace eprosima
| 1 | 23,501 | In general, I suggest using the `kind()` getter inhereted from `TransportInterface` | eProsima-Fast-DDS | cpp |
@@ -48,8 +48,6 @@ namespace NLog.Layouts
/// </remarks>
[Layout("Log4JXmlEventLayout")]
[ThreadAgnostic]
- [ThreadSafe]
- [AppDomainFixedOutput]
public class Log4JXmlEventLayout : Layout, IIncludeContext
{
/// <summary> | 1 | //
// Copyright (c) 2004-2021 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Layouts
{
using System;
using System.Collections.Generic;
using System.Text;
using NLog.Config;
using NLog.LayoutRenderers;
using NLog.Targets;
/// <summary>
/// A specialized layout that renders Log4j-compatible XML events.
/// </summary>
/// <remarks>
/// This layout is not meant to be used explicitly. Instead you can use ${log4jxmlevent} layout renderer.
/// </remarks>
[Layout("Log4JXmlEventLayout")]
[ThreadAgnostic]
[ThreadSafe]
[AppDomainFixedOutput]
public class Log4JXmlEventLayout : Layout, IIncludeContext
{
/// <summary>
/// Initializes a new instance of the <see cref="Log4JXmlEventLayout" /> class.
/// </summary>
public Log4JXmlEventLayout()
{
Renderer = new Log4JXmlEventLayoutRenderer();
Parameters = new List<NLogViewerParameterInfo>();
Renderer.Parameters = Parameters;
}
/// <summary>
/// Gets the <see cref="Log4JXmlEventLayoutRenderer"/> instance that renders log events.
/// </summary>
public Log4JXmlEventLayoutRenderer Renderer { get; }
/// <summary>
/// Gets the collection of parameters. Each parameter contains a mapping
/// between NLog layout and a named parameter.
/// </summary>
/// <docgen category='Payload Options' order='10' />
[ArrayParameter(typeof(NLogViewerParameterInfo), "parameter")]
public IList<NLogViewerParameterInfo> Parameters { get => Renderer.Parameters; set => Renderer.Parameters = value; }
/// <summary>
/// Gets or sets the option to include all properties from the log events
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool IncludeEventProperties
{
get => Renderer.IncludeEventProperties;
set => Renderer.IncludeEventProperties = value;
}
/// <summary>
/// Gets or sets whether to include the contents of the <see cref="ScopeContext"/> properties-dictionary.
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool IncludeScopeProperties
{
get => Renderer.IncludeScopeProperties;
set => Renderer.IncludeScopeProperties = value;
}
/// <summary>
/// Gets or sets whether to include log4j:NDC in output from <see cref="ScopeContext"/> nested context.
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool IncludeScopeNested
{
get => Renderer.IncludeScopeNested;
set => Renderer.IncludeScopeNested = value;
}
/// <summary>
/// Gets or sets the option to include all properties from the log events
/// </summary>
/// <docgen category='Payload Options' order='10' />
[Obsolete("Replaced by IncludeEventProperties. Marked obsolete on NLog 5.0")]
public bool IncludeAllProperties { get => IncludeEventProperties; set => IncludeEventProperties = value; }
/// <summary>
/// Gets or sets a value indicating whether to include contents of the <see cref="MappedDiagnosticsContext"/> dictionary.
/// </summary>
/// <docgen category='Payload Options' order='10' />
[Obsolete("Replaced by IncludeScopeProperties. Marked obsolete on NLog 5.0")]
public bool IncludeMdc { get => Renderer.IncludeMdc; set => Renderer.IncludeMdc = value; }
/// <summary>
/// Gets or sets whether to include log4j:NDC in output from <see cref="ScopeContext"/> nested context.
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool IncludeNdc { get => Renderer.IncludeNdc; set => Renderer.IncludeNdc = value; }
/// <summary>
/// Gets or sets a value indicating whether to include contents of the <see cref="MappedDiagnosticsLogicalContext"/> dictionary.
/// </summary>
/// <docgen category='Payload Options' order='10' />
[Obsolete("Replaced by IncludeScopeProperties. Marked obsolete on NLog 5.0")]
public bool IncludeMdlc { get => Renderer.IncludeMdlc; set => Renderer.IncludeMdlc = value; }
/// <summary>
/// Gets or sets a value indicating whether to include contents of the <see cref="NestedDiagnosticsLogicalContext"/> stack.
/// </summary>
/// <docgen category='Payload Options' order='10' />
[Obsolete("Replaced by IncludeScopeNested. Marked obsolete on NLog 5.0")]
public bool IncludeNdlc { get => Renderer.IncludeNdlc; set => Renderer.IncludeNdlc = value; }
/// <summary>
/// Gets or sets the log4j:event logger-xml-attribute (Default ${logger})
/// </summary>
/// <docgen category='Payload Options' order='10' />
public Layout LoggerName
{
get => Renderer.LoggerName;
set => Renderer.LoggerName = value;
}
/// <summary>
/// Gets or sets the AppInfo field. By default it's the friendly name of the current AppDomain.
/// </summary>
/// <docgen category='Payload Options' order='10' />
public Layout AppInfo
{
get => Renderer.AppInfo;
set => Renderer.AppInfo = value;
}
/// <summary>
/// Gets or sets whether the log4j:throwable xml-element should be written as CDATA
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool WriteThrowableCData
{
get => Renderer.WriteThrowableCData;
set => Renderer.WriteThrowableCData = value;
}
/// <summary>
/// Gets or sets a value indicating whether to include call site (class and method name) in the information sent over the network.
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool IncludeCallSite
{
get => Renderer.IncludeCallSite;
set => Renderer.IncludeCallSite = value;
}
/// <summary>
/// Gets or sets a value indicating whether to include source info (file name and line number) in the information sent over the network.
/// </summary>
/// <docgen category='Payload Options' order='10' />
public bool IncludeSourceInfo
{
get => Renderer.IncludeSourceInfo;
set => Renderer.IncludeSourceInfo = value;
}
internal override void PrecalculateBuilder(LogEventInfo logEvent, StringBuilder target)
{
PrecalculateBuilderInternal(logEvent, target);
}
/// <summary>
/// Renders the layout for the specified logging event by invoking layout renderers.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <returns>The rendered layout.</returns>
protected override string GetFormattedMessage(LogEventInfo logEvent)
{
return RenderAllocateBuilder(logEvent);
}
/// <summary>
/// Renders the layout for the specified logging event by invoking layout renderers.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <param name="target"><see cref="StringBuilder"/> for the result</param>
protected override void RenderFormattedMessage(LogEventInfo logEvent, StringBuilder target)
{
Renderer.RenderAppendBuilder(logEvent, target);
}
}
} | 1 | 22,766 | Is the removal of `[AppDomainFixedOutput]` on purpose? (looks like a copy-paste...euh..remove error) | NLog-NLog | .cs |
@@ -1,4 +1,4 @@
-//snippet-sourcedescription:[ListUsers.java demonstrates how to list all AWS Identity and Access Management (IAM) users.]
+//snippet-sourcedescription:[ListUsers.java demonstrates how to list all AWS Identity and Access Management (AWS IAM) users.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM] | 1 | //snippet-sourcedescription:[ListUsers.java demonstrates how to list all AWS Identity and Access Management (IAM) users.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM]
//snippet-sourcetype:[full-example]
//snippet-sourcedate:[11/02/2020]
//snippet-sourceauthor:[scmacdon-aws]
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package com.example.iam;
// snippet-start:[iam.java2.list_users.import]
import software.amazon.awssdk.services.iam.model.IamException;
import software.amazon.awssdk.services.iam.model.ListUsersRequest;
import software.amazon.awssdk.services.iam.model.ListUsersResponse;
import software.amazon.awssdk.services.iam.model.User;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.iam.IamClient;
// snippet-end:[iam.java2.list_users.import]
public class ListUsers {
public static void main(String[] args) {
Region region = Region.AWS_GLOBAL;
IamClient iam = IamClient.builder()
.region(region)
.build();
listAllUsers(iam );
System.out.println("Done");
iam.close();
}
// snippet-start:[iam.java2.list_users.main]
public static void listAllUsers(IamClient iam ) {
try {
boolean done = false;
String newMarker = null;
while(!done) {
ListUsersResponse response;
if (newMarker == null) {
ListUsersRequest request = ListUsersRequest.builder().build();
response = iam.listUsers(request);
} else {
ListUsersRequest request = ListUsersRequest.builder()
.marker(newMarker).build();
response = iam.listUsers(request);
}
for(User user : response.users()) {
System.out.format("\n Retrieved user %s", user.userName());
}
if(!response.isTruncated()) {
done = true;
} else {
newMarker = response.marker();
}
}
} catch (IamException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
// snippet-end:[iam.java2.list_users.main]
}
}
| 1 | 18,249 | AWS Identity and Access Management (IAM) | awsdocs-aws-doc-sdk-examples | rb |
@@ -163,6 +163,7 @@ type APIV1POSTTransactionSignRequest struct {
WalletHandleToken string `json:"wallet_handle_token"`
// swagger:strfmt byte
Transaction []byte `json:"transaction"`
+ PublicKey crypto.PublicKey `json:"public_key"`
WalletPassword string `json:"wallet_password"`
}
| 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package kmdapi
import (
"github.com/algorand/go-algorand/crypto"
)
// APIV1Request is the interface that all API V1 requests must satisfy
//
// swagger:ignore
type APIV1Request interface{} // we need to tell swagger to ignore due to bug (go-swagger/issues/1436)
// APIV1RequestEnvelope is a common envelope that all API V1 requests must embed
//
// swagger:ignore
type APIV1RequestEnvelope struct { // we need to tell swagger to ignore due to bug (go-swagger/issues/1436)
_struct struct{} `codec:",omitempty,omitemptyarray"`
}
// VersionsRequest is the request for `GET /versions`
//
// swagger:model VersionsRequest
type VersionsRequest struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
}
// APIV1GETWalletsRequest is the request for `GET /v1/wallets`
//
// swagger:model ListWalletsRequest
type APIV1GETWalletsRequest struct {
APIV1RequestEnvelope
}
// APIV1POSTWalletRequest is the request for `POST /v1/wallet`
//
// swagger:model CreateWalletRequest
type APIV1POSTWalletRequest struct {
APIV1RequestEnvelope
WalletName string `json:"wallet_name"`
WalletDriverName string `json:"wallet_driver_name"`
WalletPassword string `json:"wallet_password"`
MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"`
}
// APIV1POSTWalletInitRequest is the request for `POST /v1/wallet/init`
//
// swagger:model InitWalletHandleTokenRequest
type APIV1POSTWalletInitRequest struct {
APIV1RequestEnvelope
WalletID string `json:"wallet_id"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTWalletReleaseRequest is the request for `POST /v1/wallet/release`
//
// swagger:model ReleaseWalletHandleTokenRequest
type APIV1POSTWalletReleaseRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
}
// APIV1POSTWalletRenewRequest is the request for `POST /v1/wallet/renew`
//
// swagger:model RenewWalletHandleTokenRequest
type APIV1POSTWalletRenewRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
}
// APIV1POSTWalletRenameRequest is the request for `POST /v1/wallet/rename`
//
// swagger:model RenameWalletRequest
type APIV1POSTWalletRenameRequest struct {
APIV1RequestEnvelope
WalletID string `json:"wallet_id"`
WalletPassword string `json:"wallet_password"`
NewWalletName string `json:"wallet_name"`
}
// APIV1POSTWalletInfoRequest is the request for `POST /v1/wallet/info`
//
// swagger:model WalletInfoRequest
type APIV1POSTWalletInfoRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
}
// APIV1POSTMasterKeyExportRequest is the request for `POST /v1/master-key/export`
//
// swagger:model ExportMasterKeyRequest
type APIV1POSTMasterKeyExportRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTKeyImportRequest is the request for `POST /v1/key/import`
//
// swagger:model ImportKeyRequest
type APIV1POSTKeyImportRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
PrivateKey crypto.PrivateKey `json:"private_key"`
}
// APIV1POSTKeyExportRequest is the request for `POST /v1/key/export`
//
// swagger:model ExportKeyRequest
type APIV1POSTKeyExportRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Address string `json:"address"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTKeyRequest is the request for `POST /v1/key`
//
// swagger:model GenerateKeyRequest
type APIV1POSTKeyRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
DisplayMnemonic bool `json:"display_mnemonic"`
}
// APIV1DELETEKeyRequest is the request for `DELETE /v1/key`
//
// swagger:model DeleteKeyRequest
type APIV1DELETEKeyRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Address string `json:"address"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTKeyListRequest is the request for `POST /v1/key/list`
//
// swagger:model ListKeysRequest
type APIV1POSTKeyListRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
}
// APIV1POSTTransactionSignRequest is the request for `POST /v1/transaction/sign`
//
// swagger:model SignTransactionRequest
type APIV1POSTTransactionSignRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
// swagger:strfmt byte
Transaction []byte `json:"transaction"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTProgramSignRequest is the request for `POST /v1/program/sign`
//
// swagger:model SignProgramRequest
type APIV1POSTProgramSignRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Address string `json:"address"`
// swagger:strfmt byte
Program []byte `json:"data"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTMultisigListRequest is the request for `POST /v1/multisig/list`
//
// swagger:model ListMultisigRequest
type APIV1POSTMultisigListRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
}
// APIV1POSTMultisigImportRequest is the request for `POST /v1/multisig/import`
//
// swagger:model ImportMultisigRequest
type APIV1POSTMultisigImportRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Version uint8 `json:"multisig_version"`
Threshold uint8 `json:"threshold"`
PKs []crypto.PublicKey `json:"pks"`
}
// APIV1POSTMultisigExportRequest is the request for `POST /v1/multisig/export`
//
// swagger:model ExportMultisigRequest
type APIV1POSTMultisigExportRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Address string `json:"address"`
}
// APIV1DELETEMultisigRequest is the request for `DELETE /v1/multisig`
//
// swagger:model DeleteMultisigRequest
type APIV1DELETEMultisigRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Address string `json:"address"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTMultisigTransactionSignRequest is the request for `POST /v1/multisig/sign`
//
// swagger:model SignMultisigRequest
type APIV1POSTMultisigTransactionSignRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
// swagger:strfmt byte
Transaction []byte `json:"transaction"`
PublicKey crypto.PublicKey `json:"public_key"`
PartialMsig crypto.MultisigSig `json:"partial_multisig"`
WalletPassword string `json:"wallet_password"`
}
// APIV1POSTMultisigProgramSignRequest is the request for `POST /v1/multisig/signprogram`
//
// swagger:model SignProgramMultisigRequest
type APIV1POSTMultisigProgramSignRequest struct {
APIV1RequestEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
Address string `json:"address"`
// swagger:strfmt byte
Program []byte `json:"data"`
PublicKey crypto.PublicKey `json:"public_key"`
PartialMsig crypto.MultisigSig `json:"partial_multisig"`
WalletPassword string `json:"wallet_password"`
}
| 1 | 38,154 | gofmt everything pls | algorand-go-algorand | go |
@@ -243,6 +243,8 @@ type PodIOChaosStatus struct {
}
// +kubebuilder:object:root=true
+// +kubebuilder:printcolumn:name="action",type=string,JSONPath=`.spec.action`
+// +kubebuilder:printcolumn:name="duration",type=string,JSONPath=`.spec.duration`
// PodIOChaosList contains a list of PodIOChaos
type PodIOChaosList struct { | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// PodIOChaosSpec defines the desired state of IOChaos
type PodIOChaosSpec struct {
// VolumeMountPath represents the target mount path
// It must be a root of mount path now.
// TODO: search the mount parent of any path automatically.
// TODO: support multiple different volume mount path in one pod
VolumeMountPath string `json:"volumeMountPath"`
// TODO: support multiple different container to inject in one pod
// +optional
Container *string `json:"container,omitempty"`
// Actions are a list of IOChaos actions
// +optional
Actions []IOChaosAction `json:"actions,omitempty"`
}
// IOChaosAction defines an possible action of IOChaos
type IOChaosAction struct {
Type IOChaosType `json:"type"`
Filter `json:",inline"`
// Faults represents the fault to inject
// +optional
Faults []IoFault `json:"faults,omitempty"`
// Latency represents the latency to inject
// +optional
Latency string `json:"latency,omitempty"`
// AttrOverride represents the attribution to override
// +optional
*AttrOverrideSpec `json:",inline"`
// MistakeSpec represents the mistake to inject
// +optional
*MistakeSpec `json:"mistake,omitempty"`
// Source represents the source of current rules
Source string `json:"source,omitempty"`
}
// IOChaosType represents the type of an IOChaos Action
type IOChaosType string
const (
// IoLatency represents injecting latency for io operation
IoLatency IOChaosType = "latency"
// IoFaults represents injecting faults for io operation
IoFaults IOChaosType = "fault"
// IoAttrOverride represents replacing attribution for io operation
IoAttrOverride IOChaosType = "attrOverride"
// IoMistake represents injecting incorrect read or write for io operation
IoMistake IOChaosType = "mistake"
)
// Filter represents a filter of IOChaos action, which will define the
// scope of an IOChaosAction
type Filter struct {
// Path represents a glob of injecting path
Path string `json:"path"`
// Methods represents the method that the action will inject in
// +optional
Methods []IoMethod `json:"methods,omitempty"`
// Percent represents the percent probability of injecting this action
Percent int `json:"percent"`
}
// IoFault represents the fault to inject and their weight
type IoFault struct {
Errno uint32 `json:"errno"`
Weight int32 `json:"weight"`
}
// AttrOverrideSpec represents an override of attribution
type AttrOverrideSpec struct {
//+optional
Ino *uint64 `json:"ino,omitempty"`
//+optional
Size *uint64 `json:"size,omitempty"`
//+optional
Blocks *uint64 `json:"blocks,omitempty"`
//+optional
Atime *Timespec `json:"atime,omitempty"`
//+optional
Mtime *Timespec `json:"mtime,omitempty"`
//+optional
Ctime *Timespec `json:"ctime,omitempty"`
//+optional
Kind *FileType `json:"kind,omitempty"`
//+optional
Perm *uint16 `json:"perm,omitempty"`
//+optional
Nlink *uint32 `json:"nlink,omitempty"`
//+optional
UID *uint32 `json:"uid,omitempty"`
//+optional
GID *uint32 `json:"gid,omitempty"`
//+optional
Rdev *uint32 `json:"rdev,omitempty"`
}
// MistakeSpec represents one type of mistake
type MistakeSpec struct {
// Filling determines what is filled in the miskate data.
// +optional
// +kubebuilder:validation:Enum=zero;random
Filling FillingType `json:"filling,omitempty"`
// There will be [1, MaxOccurrences] segments of wrong data.
// +optional
// +kubebuilder:validation:Minimum=1
MaxOccurrences int64 `json:"maxOccurrences,omitempty"`
// Max length of each wrong data segment in bytes
// +optional
// +kubebuilder:validation:Minimum=1
MaxLength int64 `json:"maxLength,omitempty"`
}
// FillingType represents type of data is filled for incorrectness
type FillingType string
const (
// All zero
Zero FillingType = "zero"
// Random octets
Random FillingType = "random"
)
// Timespec represents a time
type Timespec struct {
Sec int64 `json:"sec"`
Nsec int64 `json:"nsec"`
}
// FileType represents type of a file
type FileType string
const (
NamedPipe FileType = "namedPipe"
CharDevice FileType = "charDevice"
BlockDevice FileType = "blockDevice"
Directory FileType = "directory"
RegularFile FileType = "regularFile"
TSymlink FileType = "symlink"
Socket FileType = "socket"
)
type IoMethod string
const (
LookUp IoMethod = "lookup"
Forget IoMethod = "forget"
GetAttr IoMethod = "getattr"
SetAttr IoMethod = "setattr"
ReadLink IoMethod = "readlink"
Mknod IoMethod = "mknod"
Mkdir IoMethod = "mkdir"
UnLink IoMethod = "unlink"
Rmdir IoMethod = "rmdir"
MSymlink IoMethod = "symlink"
Rename IoMethod = "rename"
Link IoMethod = "link"
Open IoMethod = "open"
Read IoMethod = "read"
Write IoMethod = "write"
Flush IoMethod = "flush"
Release IoMethod = "release"
Fsync IoMethod = "fsync"
Opendir IoMethod = "opendir"
Readdir IoMethod = "readdir"
Releasedir IoMethod = "releasedir"
Fsyncdir IoMethod = "fsyncdir"
Statfs IoMethod = "statfs"
SetXAttr IoMethod = "setxattr"
GetXAttr IoMethod = "getxattr"
ListXAttr IoMethod = "listxattr"
RemoveXAttr IoMethod = "removexattr"
Access IoMethod = "access"
Create IoMethod = "create"
GetLk IoMethod = "getlk"
SetLk IoMethod = "setlk"
Bmap IoMethod = "bmap"
)
// +chaos-mesh:base
// +chaos-mesh:webhook:enableUpdate
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// PodIOChaos is the Schema for the podiochaos API
type PodIOChaos struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PodIOChaosSpec `json:"spec,omitempty"`
//+optional
Status PodIOChaosStatus `json:"status,omitempty"`
}
type PodIOChaosStatus struct {
// Pid represents a running toda process id
// +optional
Pid int64 `json:"pid,omitempty"`
// StartTime represents the start time of a toda process
// +optional
StartTime int64 `json:"startTime,omitempty"`
// +optional
FailedMessage string `json:"failedMessage,omitempty"`
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
// +kubebuilder:object:root=true
// PodIOChaosList contains a list of PodIOChaos
type PodIOChaosList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []PodIOChaos `json:"items"`
}
| 1 | 25,454 | ditto, We do not need add this feature for PodXXXChaos | chaos-mesh-chaos-mesh | go |
@@ -38,8 +38,9 @@ public class SampleNamer extends NameFormatterDelegator {
/**
* Returns the variable name of the service.
*/
- public String getServiceVarName(String apiTypeName) {
- return localVarName(Name.lowerCamel(Name.upperCamel(apiTypeName).toLowerCamel(), "service"));
+ public String getServiceVarName(String lowerCamelApiTypeName) {
+ return localVarName(
+ Name.lowerCamel(Name.lowerCamel(lowerCamelApiTypeName).toLowerCamel(), "service"));
}
/** | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.discovery.transformer;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NameFormatter;
import com.google.api.codegen.util.NameFormatterDelegator;
import com.google.common.base.Strings;
/**
* Provides language-specific names for variables and classes.
*/
public class SampleNamer extends NameFormatterDelegator {
public SampleNamer(NameFormatter nameFormatter) {
super(nameFormatter);
}
/**
* Returns the class name of the sample.
*/
public String getSampleClassName(String apiTypeName) {
return className(Name.upperCamel(apiTypeName, "Example"));
}
/**
* Returns the variable name of the service.
*/
public String getServiceVarName(String apiTypeName) {
return localVarName(Name.lowerCamel(Name.upperCamel(apiTypeName).toLowerCamel(), "service"));
}
/**
* Returns the variable name for a field.
*/
public String getFieldVarName(String fieldName) {
return localVarName(Name.lowerCamel(fieldName));
}
/**
* Returns the resource getter method name for a resource field.
*/
public String getResourceGetterName(String fieldName) {
return publicMethodName(Name.lowerCamel("get", fieldName));
}
/**
* Returns the variable name for a resource field.
*
* If resourceTypeName is an empty string, "item" is returned.
*/
public String getResourceVarName(String resourceTypeName) {
if (Strings.isNullOrEmpty(resourceTypeName)) {
return localVarName(Name.lowerCamel("item"));
}
return localVarName(Name.upperCamel(resourceTypeName));
}
/**
* Returns the variable name of the request.
*/
public String getRequestVarName() {
return localVarName(Name.lowerCamel("request"));
}
/**
* Returns the variable name of the request body.
*/
public String getRequestBodyVarName() {
return localVarName(Name.lowerCamel("requestBody"));
}
/**
* Returns the variable name of the response.
*/
public String getResponseVarName() {
return localVarName(Name.lowerCamel("response"));
}
}
| 1 | 18,332 | There is a simpler way to do this: 1. Don't bother with the whole lowerCamelApiTypeName in SampleConfig 2. Use this code: `return localVarName(Name.upperCamel(apiTypeName, "Service"));` Basically, each of the static creation methods of Name just need to have their arguments all be in the same casing, so you would just pass the upper camel string "Service" here. | googleapis-gapic-generator | java |
@@ -28,7 +28,7 @@ using EdgeData = util::NodeBasedDynamicGraph::EdgeData;
bool requiresAnnouncement(const EdgeData &from, const EdgeData &to)
{
- return !from.IsCompatibleTo(to);
+ return !from.CanCombineWith(to);
}
TurnAnalysis::TurnAnalysis(const util::NodeBasedDynamicGraph &node_based_graph, | 1 | #include "extractor/guidance/turn_analysis.hpp"
#include "extractor/guidance/constants.hpp"
#include "extractor/guidance/road_classification.hpp"
#include "util/coordinate.hpp"
#include "util/coordinate_calculation.hpp"
#include "util/guidance/toolkit.hpp"
#include "util/simple_logger.hpp"
#include <cstddef>
#include <iomanip>
#include <limits>
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
using osrm::util::guidance::getTurnDirection;
namespace osrm
{
namespace extractor
{
namespace guidance
{
using EdgeData = util::NodeBasedDynamicGraph::EdgeData;
bool requiresAnnouncement(const EdgeData &from, const EdgeData &to)
{
return !from.IsCompatibleTo(to);
}
TurnAnalysis::TurnAnalysis(const util::NodeBasedDynamicGraph &node_based_graph,
const std::vector<QueryNode> &node_info_list,
const RestrictionMap &restriction_map,
const std::unordered_set<NodeID> &barrier_nodes,
const CompressedEdgeContainer &compressed_edge_container,
const util::NameTable &name_table,
const SuffixTable &street_name_suffix_table,
const ProfileProperties &profile_properties)
: node_based_graph(node_based_graph), intersection_generator(node_based_graph,
restriction_map,
barrier_nodes,
node_info_list,
compressed_edge_container),
roundabout_handler(node_based_graph,
node_info_list,
compressed_edge_container,
name_table,
street_name_suffix_table,
profile_properties,
intersection_generator),
motorway_handler(node_based_graph,
node_info_list,
name_table,
street_name_suffix_table,
intersection_generator),
turn_handler(node_based_graph,
node_info_list,
name_table,
street_name_suffix_table,
intersection_generator),
sliproad_handler(intersection_generator,
node_based_graph,
node_info_list,
name_table,
street_name_suffix_table)
{
}
Intersection TurnAnalysis::assignTurnTypes(const NodeID from_nid,
const EdgeID via_eid,
Intersection intersection) const
{
// Roundabouts are a main priority. If there is a roundabout instruction present, we process the
// turn as a roundabout
if (roundabout_handler.canProcess(from_nid, via_eid, intersection))
{
intersection = roundabout_handler(from_nid, via_eid, std::move(intersection));
}
else
{
// set initial defaults for normal turns and modifier based on angle
intersection = setTurnTypes(from_nid, via_eid, std::move(intersection));
if (motorway_handler.canProcess(from_nid, via_eid, intersection))
{
intersection = motorway_handler(from_nid, via_eid, std::move(intersection));
}
else
{
BOOST_ASSERT(turn_handler.canProcess(from_nid, via_eid, intersection));
intersection = turn_handler(from_nid, via_eid, std::move(intersection));
}
}
// Handle sliproads
if (sliproad_handler.canProcess(from_nid, via_eid, intersection))
intersection = sliproad_handler(from_nid, via_eid, std::move(intersection));
// Turn On Ramps Into Off Ramps, if we come from a motorway-like road
if (node_based_graph.GetEdgeData(via_eid).road_classification.IsMotorwayClass())
{
std::for_each(intersection.begin(), intersection.end(), [](ConnectedRoad &road) {
if (road.turn.instruction.type == TurnType::OnRamp)
road.turn.instruction.type = TurnType::OffRamp;
});
}
return intersection;
}
std::vector<TurnOperation>
TurnAnalysis::transformIntersectionIntoTurns(const Intersection &intersection) const
{
std::vector<TurnOperation> turns;
for (auto road : intersection)
if (road.entry_allowed)
turns.emplace_back(road.turn);
return turns;
}
Intersection TurnAnalysis::getIntersection(const NodeID from_nid, const EdgeID via_eid) const
{
return intersection_generator(from_nid, via_eid);
}
// Sets basic turn types as fallback for otherwise unhandled turns
Intersection
TurnAnalysis::setTurnTypes(const NodeID from_nid, const EdgeID, Intersection intersection) const
{
for (auto &road : intersection)
{
if (!road.entry_allowed)
continue;
const EdgeID onto_edge = road.turn.eid;
const NodeID to_nid = node_based_graph.GetTarget(onto_edge);
road.turn.instruction = {TurnType::Turn,
(from_nid == to_nid) ? DirectionModifier::UTurn
: getTurnDirection(road.turn.angle)};
}
return intersection;
}
const IntersectionGenerator &TurnAnalysis::getGenerator() const { return intersection_generator; }
} // namespace guidance
} // namespace extractor
} // namespace osrm
| 1 | 17,271 | Can you explain this change to me? | Project-OSRM-osrm-backend | cpp |
@@ -19,7 +19,7 @@ const (
OpenEBSArtifacts ArtifactSource = "../artifacts/openebs-ci.yaml"
CStorPVCArtifacts ArtifactSource = "../artifacts/cstor-pvc.yaml"
JivaPVCArtifacts ArtifactSource = "../artifacts/jiva-pvc.yaml"
- SingleReplicaSC ArtifactSource = "../artifacts/single-replica.yaml"
+ SingleReplicaSC ArtifactSource = "../artifacts/storageclass-1r.yaml"
CVRArtifact ArtifactSource = "../artifacts/cvr-schema.yaml"
CRArtifact ArtifactSource = "../artifacts/cr-schema.yaml"
) | 1 | package artifacts
import (
"errors"
"io/ioutil"
"strings"
k8s "github.com/openebs/maya/pkg/client/k8s/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/openebs/maya/pkg/artifact/v1alpha1"
)
// ArtifactSource holds the path to fetch artifacts
type ArtifactSource string
type Artifact string
const (
OpenEBSArtifacts ArtifactSource = "../artifacts/openebs-ci.yaml"
CStorPVCArtifacts ArtifactSource = "../artifacts/cstor-pvc.yaml"
JivaPVCArtifacts ArtifactSource = "../artifacts/jiva-pvc.yaml"
SingleReplicaSC ArtifactSource = "../artifacts/single-replica.yaml"
CVRArtifact ArtifactSource = "../artifacts/cvr-schema.yaml"
CRArtifact ArtifactSource = "../artifacts/cr-schema.yaml"
)
// PodName holds the name of the pod
type PodName string
const (
// MayaAPIServerPodName is the name of the maya api server pod
MayaAPIServerPodName PodName = "maya-apiserver"
)
// Namespace holds the name of the namespace
type Namespace string
const (
// OpenebsNamespace is the name of the openebs namespace
OpenebsNamespace Namespace = "openebs"
)
// LabelSelector holds the label got openebs components
type LabelSelector string
const (
MayaAPIServerLabelSelector LabelSelector = "name=maya-apiserver"
OpenEBSProvisionerLabelSelector LabelSelector = "name=openebs-provisioner"
OpenEBSSnapshotOperatorLabelSelector LabelSelector = "name=openebs-snapshot-operator"
OpenEBSAdmissionServerLabelSelector LabelSelector = "app=admission-webhook"
OpenEBSNDMLabelSelector LabelSelector = "name=openebs-ndm"
OpenEBSCStorPoolLabelSelector LabelSelector = "app=cstor-pool"
)
func parseK8sYaml(yamls string) (k8s.UnstructedList, []error) {
sepYamlfiles := strings.Split(yamls, "---")
artifacts := v1alpha1.ArtifactList{}
for _, f := range sepYamlfiles {
if f == "\n" || f == "" {
// ignore empty cases
continue
}
f = strings.TrimSpace(f)
artifacts.Items = append(artifacts.Items, &v1alpha1.Artifact{Doc: f})
}
return artifacts.ToUnstructuredList()
}
// parseK8sYamlFromFile parses the kubernetes yaml and returns the objects in a UnstructuredList
func parseK8sYamlFromFile(filename string) (k8s.UnstructedList, []error) {
fileBytes, err := ioutil.ReadFile(filename)
if err != nil {
return k8s.UnstructedList{}, []error{err}
}
fileAsString := string(fileBytes[:])
return parseK8sYaml(fileAsString)
}
// GetArtifactsListUnstructuredFromFile returns the unstructured list of openebs components
func GetArtifactsListUnstructuredFromFile(a ArtifactSource) ([]*unstructured.Unstructured, []error) {
ulist, err := parseK8sYamlFromFile(string(a))
if err != nil {
return nil, err
}
nList := ulist.MapAllIfAny([]k8s.UnstructuredMiddleware{})
return nList.Items, err
}
// GetArtifactUnstructuredFromFile returns the unstructured list of openebs components
func GetArtifactUnstructuredFromFile(a ArtifactSource) (*unstructured.Unstructured, error) {
ulist, err := parseK8sYamlFromFile(string(a))
if len(err) != 0 {
return nil, err[0]
}
if len(ulist.Items) != 1 {
return nil, errors.New("more than one artifacts found")
}
nList := ulist.MapAllIfAny([]k8s.UnstructuredMiddleware{})
return nList.Items[0], nil
}
// GetArtifactsListUnstructured returns the unstructured list of openebs components
func GetArtifactsListUnstructured(a Artifact) ([]*unstructured.Unstructured, []error) {
ulist, err := parseK8sYaml(strings.TrimSpace(string(a)))
if err != nil {
return nil, err
}
nList := ulist.MapAllIfAny([]k8s.UnstructuredMiddleware{})
return nList.Items, err
}
// GetArtifactUnstructured returns the unstructured list of openebs components
func GetArtifactUnstructured(a Artifact) (*unstructured.Unstructured, error) {
ulist, err := parseK8sYaml(string(a))
if len(err) != 0 {
return nil, err[0]
}
if len(ulist.Items) != 1 {
return nil, errors.New("more than one artifacts found")
}
nList := ulist.MapAllIfAny([]k8s.UnstructuredMiddleware{})
return nList.Items[0], nil
}
| 1 | 12,465 | let us rename the artifact to `storageclass-single-replica.yaml` | openebs-maya | go |
@@ -5,7 +5,7 @@ using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
-namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Networking
+namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal.Networking
{
public class LibuvFunctions
{ | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Networking
{
public class LibuvFunctions
{
public LibuvFunctions()
{
IsWindows = PlatformApis.IsWindows;
_uv_loop_init = NativeMethods.uv_loop_init;
_uv_loop_close = NativeMethods.uv_loop_close;
_uv_run = NativeMethods.uv_run;
_uv_stop = NativeMethods.uv_stop;
_uv_ref = NativeMethods.uv_ref;
_uv_unref = NativeMethods.uv_unref;
_uv_fileno = NativeMethods.uv_fileno;
_uv_close = NativeMethods.uv_close;
_uv_async_init = NativeMethods.uv_async_init;
_uv_async_send = NativeMethods.uv_async_send;
_uv_unsafe_async_send = NativeMethods.uv_unsafe_async_send;
_uv_tcp_init = NativeMethods.uv_tcp_init;
_uv_tcp_bind = NativeMethods.uv_tcp_bind;
_uv_tcp_open = NativeMethods.uv_tcp_open;
_uv_tcp_nodelay = NativeMethods.uv_tcp_nodelay;
_uv_pipe_init = NativeMethods.uv_pipe_init;
_uv_pipe_bind = NativeMethods.uv_pipe_bind;
_uv_listen = NativeMethods.uv_listen;
_uv_accept = NativeMethods.uv_accept;
_uv_pipe_connect = NativeMethods.uv_pipe_connect;
_uv_pipe_pending_count = NativeMethods.uv_pipe_pending_count;
_uv_read_start = NativeMethods.uv_read_start;
_uv_read_stop = NativeMethods.uv_read_stop;
_uv_try_write = NativeMethods.uv_try_write;
unsafe
{
_uv_write = NativeMethods.uv_write;
_uv_write2 = NativeMethods.uv_write2;
}
_uv_shutdown = NativeMethods.uv_shutdown;
_uv_err_name = NativeMethods.uv_err_name;
_uv_strerror = NativeMethods.uv_strerror;
_uv_loop_size = NativeMethods.uv_loop_size;
_uv_handle_size = NativeMethods.uv_handle_size;
_uv_req_size = NativeMethods.uv_req_size;
_uv_ip4_addr = NativeMethods.uv_ip4_addr;
_uv_ip6_addr = NativeMethods.uv_ip6_addr;
_uv_tcp_getpeername = NativeMethods.uv_tcp_getpeername;
_uv_tcp_getsockname = NativeMethods.uv_tcp_getsockname;
_uv_walk = NativeMethods.uv_walk;
_uv_timer_init = NativeMethods.uv_timer_init;
_uv_timer_start = NativeMethods.uv_timer_start;
_uv_timer_stop = NativeMethods.uv_timer_stop;
_uv_now = NativeMethods.uv_now;
}
// Second ctor that doesn't set any fields only to be used by MockLibuv
public LibuvFunctions(bool onlyForTesting)
{
}
public readonly bool IsWindows;
public void ThrowIfErrored(int statusCode)
{
// Note: method is explicitly small so the success case is easily inlined
if (statusCode < 0)
{
ThrowError(statusCode);
}
}
private void ThrowError(int statusCode)
{
// Note: only has one throw block so it will marked as "Does not return" by the jit
// and not inlined into previous function, while also marking as a function
// that does not need cpu register prep to call (see: https://github.com/dotnet/coreclr/pull/6103)
throw GetError(statusCode);
}
public void Check(int statusCode, out Exception error)
{
// Note: method is explicitly small so the success case is easily inlined
error = statusCode < 0 ? GetError(statusCode) : null;
}
[MethodImpl(MethodImplOptions.NoInlining)]
private UvException GetError(int statusCode)
{
// Note: method marked as NoInlining so it doesn't bloat either of the two preceeding functions
// Check and ThrowError and alter their jit heuristics.
var errorName = err_name(statusCode);
var errorDescription = strerror(statusCode);
return new UvException("Error " + statusCode + " " + errorName + " " + errorDescription, statusCode);
}
protected Func<UvLoopHandle, int> _uv_loop_init;
public void loop_init(UvLoopHandle handle)
{
ThrowIfErrored(_uv_loop_init(handle));
}
protected Func<IntPtr, int> _uv_loop_close;
public void loop_close(UvLoopHandle handle)
{
handle.Validate(closed: true);
ThrowIfErrored(_uv_loop_close(handle.InternalGetHandle()));
}
protected Func<UvLoopHandle, int, int> _uv_run;
public void run(UvLoopHandle handle, int mode)
{
handle.Validate();
ThrowIfErrored(_uv_run(handle, mode));
}
protected Action<UvLoopHandle> _uv_stop;
public void stop(UvLoopHandle handle)
{
handle.Validate();
_uv_stop(handle);
}
protected Action<UvHandle> _uv_ref;
public void @ref(UvHandle handle)
{
handle.Validate();
_uv_ref(handle);
}
protected Action<UvHandle> _uv_unref;
public void unref(UvHandle handle)
{
handle.Validate();
_uv_unref(handle);
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
protected delegate int uv_fileno_func(UvHandle handle, ref IntPtr socket);
protected uv_fileno_func _uv_fileno;
public void uv_fileno(UvHandle handle, ref IntPtr socket)
{
handle.Validate();
ThrowIfErrored(_uv_fileno(handle, ref socket));
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_close_cb(IntPtr handle);
protected Action<IntPtr, uv_close_cb> _uv_close;
public void close(UvHandle handle, uv_close_cb close_cb)
{
handle.Validate(closed: true);
_uv_close(handle.InternalGetHandle(), close_cb);
}
public void close(IntPtr handle, uv_close_cb close_cb)
{
_uv_close(handle, close_cb);
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_async_cb(IntPtr handle);
protected Func<UvLoopHandle, UvAsyncHandle, uv_async_cb, int> _uv_async_init;
public void async_init(UvLoopHandle loop, UvAsyncHandle handle, uv_async_cb cb)
{
loop.Validate();
handle.Validate();
ThrowIfErrored(_uv_async_init(loop, handle, cb));
}
protected Func<UvAsyncHandle, int> _uv_async_send;
public void async_send(UvAsyncHandle handle)
{
ThrowIfErrored(_uv_async_send(handle));
}
protected Func<IntPtr, int> _uv_unsafe_async_send;
public void unsafe_async_send(IntPtr handle)
{
ThrowIfErrored(_uv_unsafe_async_send(handle));
}
protected Func<UvLoopHandle, UvTcpHandle, int> _uv_tcp_init;
public void tcp_init(UvLoopHandle loop, UvTcpHandle handle)
{
loop.Validate();
handle.Validate();
ThrowIfErrored(_uv_tcp_init(loop, handle));
}
protected delegate int uv_tcp_bind_func(UvTcpHandle handle, ref SockAddr addr, int flags);
protected uv_tcp_bind_func _uv_tcp_bind;
public void tcp_bind(UvTcpHandle handle, ref SockAddr addr, int flags)
{
handle.Validate();
ThrowIfErrored(_uv_tcp_bind(handle, ref addr, flags));
}
protected Func<UvTcpHandle, IntPtr, int> _uv_tcp_open;
public void tcp_open(UvTcpHandle handle, IntPtr hSocket)
{
handle.Validate();
ThrowIfErrored(_uv_tcp_open(handle, hSocket));
}
protected Func<UvTcpHandle, int, int> _uv_tcp_nodelay;
public void tcp_nodelay(UvTcpHandle handle, bool enable)
{
handle.Validate();
ThrowIfErrored(_uv_tcp_nodelay(handle, enable ? 1 : 0));
}
protected Func<UvLoopHandle, UvPipeHandle, int, int> _uv_pipe_init;
public void pipe_init(UvLoopHandle loop, UvPipeHandle handle, bool ipc)
{
loop.Validate();
handle.Validate();
ThrowIfErrored(_uv_pipe_init(loop, handle, ipc ? -1 : 0));
}
protected Func<UvPipeHandle, string, int> _uv_pipe_bind;
public void pipe_bind(UvPipeHandle handle, string name)
{
handle.Validate();
ThrowIfErrored(_uv_pipe_bind(handle, name));
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_connection_cb(IntPtr server, int status);
protected Func<UvStreamHandle, int, uv_connection_cb, int> _uv_listen;
public void listen(UvStreamHandle handle, int backlog, uv_connection_cb cb)
{
handle.Validate();
ThrowIfErrored(_uv_listen(handle, backlog, cb));
}
protected Func<UvStreamHandle, UvStreamHandle, int> _uv_accept;
public void accept(UvStreamHandle server, UvStreamHandle client)
{
server.Validate();
client.Validate();
ThrowIfErrored(_uv_accept(server, client));
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_connect_cb(IntPtr req, int status);
protected Action<UvConnectRequest, UvPipeHandle, string, uv_connect_cb> _uv_pipe_connect;
public void pipe_connect(UvConnectRequest req, UvPipeHandle handle, string name, uv_connect_cb cb)
{
req.Validate();
handle.Validate();
_uv_pipe_connect(req, handle, name, cb);
}
protected Func<UvPipeHandle, int> _uv_pipe_pending_count;
public int pipe_pending_count(UvPipeHandle handle)
{
handle.Validate();
return _uv_pipe_pending_count(handle);
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_alloc_cb(IntPtr server, int suggested_size, out uv_buf_t buf);
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_read_cb(IntPtr server, int nread, ref uv_buf_t buf);
protected Func<UvStreamHandle, uv_alloc_cb, uv_read_cb, int> _uv_read_start;
public void read_start(UvStreamHandle handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb)
{
handle.Validate();
ThrowIfErrored(_uv_read_start(handle, alloc_cb, read_cb));
}
protected Func<UvStreamHandle, int> _uv_read_stop;
public void read_stop(UvStreamHandle handle)
{
handle.Validate();
ThrowIfErrored(_uv_read_stop(handle));
}
protected Func<UvStreamHandle, uv_buf_t[], int, int> _uv_try_write;
public int try_write(UvStreamHandle handle, uv_buf_t[] bufs, int nbufs)
{
handle.Validate();
var count = _uv_try_write(handle, bufs, nbufs);
ThrowIfErrored(count);
return count;
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_write_cb(IntPtr req, int status);
unsafe protected delegate int uv_write_func(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb);
protected uv_write_func _uv_write;
unsafe public void write(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb)
{
req.Validate();
handle.Validate();
ThrowIfErrored(_uv_write(req, handle, bufs, nbufs, cb));
}
unsafe protected delegate int uv_write2_func(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb);
protected uv_write2_func _uv_write2;
unsafe public void write2(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb)
{
req.Validate();
handle.Validate();
ThrowIfErrored(_uv_write2(req, handle, bufs, nbufs, sendHandle, cb));
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_shutdown_cb(IntPtr req, int status);
protected Func<UvShutdownReq, UvStreamHandle, uv_shutdown_cb, int> _uv_shutdown;
public void shutdown(UvShutdownReq req, UvStreamHandle handle, uv_shutdown_cb cb)
{
req.Validate();
handle.Validate();
ThrowIfErrored(_uv_shutdown(req, handle, cb));
}
protected Func<int, IntPtr> _uv_err_name;
public string err_name(int err)
{
IntPtr ptr = _uv_err_name(err);
return ptr == IntPtr.Zero ? null : Marshal.PtrToStringAnsi(ptr);
}
protected Func<int, IntPtr> _uv_strerror;
public string strerror(int err)
{
IntPtr ptr = _uv_strerror(err);
return ptr == IntPtr.Zero ? null : Marshal.PtrToStringAnsi(ptr);
}
protected Func<int> _uv_loop_size;
public int loop_size()
{
return _uv_loop_size();
}
protected Func<HandleType, int> _uv_handle_size;
public int handle_size(HandleType handleType)
{
return _uv_handle_size(handleType);
}
protected Func<RequestType, int> _uv_req_size;
public int req_size(RequestType reqType)
{
return _uv_req_size(reqType);
}
protected delegate int uv_ip4_addr_func(string ip, int port, out SockAddr addr);
protected uv_ip4_addr_func _uv_ip4_addr;
public void ip4_addr(string ip, int port, out SockAddr addr, out Exception error)
{
Check(_uv_ip4_addr(ip, port, out addr), out error);
}
protected delegate int uv_ip6_addr_func(string ip, int port, out SockAddr addr);
protected uv_ip6_addr_func _uv_ip6_addr;
public void ip6_addr(string ip, int port, out SockAddr addr, out Exception error)
{
Check(_uv_ip6_addr(ip, port, out addr), out error);
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_walk_cb(IntPtr handle, IntPtr arg);
protected Func<UvLoopHandle, uv_walk_cb, IntPtr, int> _uv_walk;
public void walk(UvLoopHandle loop, uv_walk_cb walk_cb, IntPtr arg)
{
loop.Validate();
_uv_walk(loop, walk_cb, arg);
}
protected Func<UvLoopHandle, UvTimerHandle, int> _uv_timer_init;
unsafe public void timer_init(UvLoopHandle loop, UvTimerHandle handle)
{
loop.Validate();
handle.Validate();
ThrowIfErrored(_uv_timer_init(loop, handle));
}
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
public delegate void uv_timer_cb(IntPtr handle);
protected Func<UvTimerHandle, uv_timer_cb, long, long, int> _uv_timer_start;
unsafe public void timer_start(UvTimerHandle handle, uv_timer_cb cb, long timeout, long repeat)
{
handle.Validate();
ThrowIfErrored(_uv_timer_start(handle, cb, timeout, repeat));
}
protected Func<UvTimerHandle, int> _uv_timer_stop;
unsafe public void timer_stop(UvTimerHandle handle)
{
handle.Validate();
ThrowIfErrored(_uv_timer_stop(handle));
}
protected Func<UvLoopHandle, long> _uv_now;
unsafe public long now(UvLoopHandle loop)
{
loop.Validate();
return _uv_now(loop);
}
public delegate int uv_tcp_getsockname_func(UvTcpHandle handle, out SockAddr addr, ref int namelen);
protected uv_tcp_getsockname_func _uv_tcp_getsockname;
public void tcp_getsockname(UvTcpHandle handle, out SockAddr addr, ref int namelen)
{
handle.Validate();
ThrowIfErrored(_uv_tcp_getsockname(handle, out addr, ref namelen));
}
public delegate int uv_tcp_getpeername_func(UvTcpHandle handle, out SockAddr addr, ref int namelen);
protected uv_tcp_getpeername_func _uv_tcp_getpeername;
public void tcp_getpeername(UvTcpHandle handle, out SockAddr addr, ref int namelen)
{
handle.Validate();
ThrowIfErrored(_uv_tcp_getpeername(handle, out addr, ref namelen));
}
public uv_buf_t buf_init(IntPtr memory, int len)
{
return new uv_buf_t(memory, len, IsWindows);
}
public struct uv_buf_t
{
// this type represents a WSABUF struct on Windows
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms741542(v=vs.85).aspx
// and an iovec struct on *nix
// http://man7.org/linux/man-pages/man2/readv.2.html
// because the order of the fields in these structs is different, the field
// names in this type don't have meaningful symbolic names. instead, they are
// assigned in the correct order by the constructor at runtime
private readonly IntPtr _field0;
private readonly IntPtr _field1;
public uv_buf_t(IntPtr memory, int len, bool IsWindows)
{
if (IsWindows)
{
_field0 = (IntPtr)len;
_field1 = memory;
}
else
{
_field0 = memory;
_field1 = (IntPtr)len;
}
}
}
public enum HandleType
{
Unknown = 0,
ASYNC,
CHECK,
FS_EVENT,
FS_POLL,
HANDLE,
IDLE,
NAMED_PIPE,
POLL,
PREPARE,
PROCESS,
STREAM,
TCP,
TIMER,
TTY,
UDP,
SIGNAL,
}
public enum RequestType
{
Unknown = 0,
REQ,
CONNECT,
WRITE,
SHUTDOWN,
UDP_SEND,
FS,
WORK,
GETADDRINFO,
GETNAMEINFO,
}
private static class NativeMethods
{
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_loop_init(UvLoopHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_loop_close(IntPtr a0);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_run(UvLoopHandle handle, int mode);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern void uv_stop(UvLoopHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern void uv_ref(UvHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern void uv_unref(UvHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_fileno(UvHandle handle, ref IntPtr socket);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern void uv_close(IntPtr handle, uv_close_cb close_cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_async_init(UvLoopHandle loop, UvAsyncHandle handle, uv_async_cb cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public extern static int uv_async_send(UvAsyncHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl, EntryPoint = "uv_async_send")]
public extern static int uv_unsafe_async_send(IntPtr handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_tcp_init(UvLoopHandle loop, UvTcpHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_tcp_bind(UvTcpHandle handle, ref SockAddr addr, int flags);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_tcp_open(UvTcpHandle handle, IntPtr hSocket);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_tcp_nodelay(UvTcpHandle handle, int enable);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_pipe_init(UvLoopHandle loop, UvPipeHandle handle, int ipc);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_pipe_bind(UvPipeHandle loop, string name);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_listen(UvStreamHandle handle, int backlog, uv_connection_cb cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_accept(UvStreamHandle server, UvStreamHandle client);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern void uv_pipe_connect(UvConnectRequest req, UvPipeHandle handle, string name, uv_connect_cb cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public extern static int uv_pipe_pending_count(UvPipeHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public extern static int uv_read_start(UvStreamHandle handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_read_stop(UvStreamHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_try_write(UvStreamHandle handle, uv_buf_t[] bufs, int nbufs);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
unsafe public static extern int uv_write(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, uv_write_cb cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
unsafe public static extern int uv_write2(UvRequest req, UvStreamHandle handle, uv_buf_t* bufs, int nbufs, UvStreamHandle sendHandle, uv_write_cb cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_shutdown(UvShutdownReq req, UvStreamHandle handle, uv_shutdown_cb cb);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public extern static IntPtr uv_err_name(int err);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern IntPtr uv_strerror(int err);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_loop_size();
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_handle_size(HandleType handleType);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_req_size(RequestType reqType);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_ip4_addr(string ip, int port, out SockAddr addr);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_ip6_addr(string ip, int port, out SockAddr addr);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_tcp_getsockname(UvTcpHandle handle, out SockAddr name, ref int namelen);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_tcp_getpeername(UvTcpHandle handle, out SockAddr name, ref int namelen);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
public static extern int uv_walk(UvLoopHandle loop, uv_walk_cb walk_cb, IntPtr arg);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
unsafe public static extern int uv_timer_init(UvLoopHandle loop, UvTimerHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
unsafe public static extern int uv_timer_start(UvTimerHandle handle, uv_timer_cb cb, long timeout, long repeat);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
unsafe public static extern int uv_timer_stop(UvTimerHandle handle);
[DllImport("libuv", CallingConvention = CallingConvention.Cdecl)]
unsafe public static extern long uv_now(UvLoopHandle loop);
[DllImport("WS2_32.dll", CallingConvention = CallingConvention.Winapi)]
unsafe public static extern int WSAIoctl(
IntPtr socket,
int dwIoControlCode,
int* lpvInBuffer,
uint cbInBuffer,
int* lpvOutBuffer,
int cbOutBuffer,
out uint lpcbBytesReturned,
IntPtr lpOverlapped,
IntPtr lpCompletionRoutine
);
[DllImport("WS2_32.dll", CallingConvention = CallingConvention.Winapi)]
public static extern int WSAGetLastError();
}
}
} | 1 | 12,533 | Nit: I should have done this before, but rename the file to LibuvFunctions.cs. Make sure no other files are misnamed. | aspnet-KestrelHttpServer | .cs |
@@ -100,6 +100,7 @@ public class JavaBinCodec implements PushWriter {
MAP_ENTRY_ITER = 17,
ENUM_FIELD_VALUE = 18,
MAP_ENTRY = 19,
+ UUID = 20, // This is reserved to be used only in LogCodec
// types that combine tag + length (or other info) in a single byte
TAG_AND_LEN = (byte) (1 << 5),
STR = (byte) (1 << 5), | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.util;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.invoke.MethodHandles;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiConsumer;
import java.util.function.Function;
import org.apache.solr.common.ConditionalMapWriter;
import org.apache.solr.common.EnumFieldValue;
import org.apache.solr.common.IteratorWriter;
import org.apache.solr.common.IteratorWriter.ItemWriter;
import org.apache.solr.common.MapSerializable;
import org.apache.solr.common.MapWriter;
import org.apache.solr.common.PushWriter;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.SolrInputField;
import org.noggit.CharArr;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.util.ByteArrayUtf8CharSequence.convertCharSeq;
/**
* Defines a space-efficient serialization/deserialization format for transferring data.
* <p>
* JavaBinCodec has built in support many commonly used types. This includes primitive types (boolean, byte,
* short, double, int, long, float), common Java containers/utilities (Date, Map, Collection, Iterator, String,
* Object[], byte[]), and frequently used Solr types ({@link NamedList}, {@link SolrDocument},
* {@link SolrDocumentList}). Each of the above types has a pair of associated methods which read and write
* that type to a stream.
* <p>
* Classes that aren't supported natively can still be serialized/deserialized by providing
* an {@link JavaBinCodec.ObjectResolver} object that knows how to work with the unsupported class.
* This allows {@link JavaBinCodec} to be used to marshall/unmarshall arbitrary content.
* <p>
* NOTE -- {@link JavaBinCodec} instances cannot be reused for more than one marshall or unmarshall operation.
*/
public class JavaBinCodec implements PushWriter {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final AtomicBoolean WARNED_ABOUT_INDEX_TIME_BOOSTS = new AtomicBoolean();
public static final byte
NULL = 0,
BOOL_TRUE = 1,
BOOL_FALSE = 2,
BYTE = 3,
SHORT = 4,
DOUBLE = 5,
INT = 6,
LONG = 7,
FLOAT = 8,
DATE = 9,
MAP = 10,
SOLRDOC = 11,
SOLRDOCLST = 12,
BYTEARR = 13,
ITERATOR = 14,
/**
* this is a special tag signals an end. No value is associated with it
*/
END = 15,
SOLRINPUTDOC = 16,
MAP_ENTRY_ITER = 17,
ENUM_FIELD_VALUE = 18,
MAP_ENTRY = 19,
// types that combine tag + length (or other info) in a single byte
TAG_AND_LEN = (byte) (1 << 5),
STR = (byte) (1 << 5),
SINT = (byte) (2 << 5),
SLONG = (byte) (3 << 5),
ARR = (byte) (4 << 5), //
ORDERED_MAP = (byte) (5 << 5), // SimpleOrderedMap (a NamedList subclass, and more common)
NAMED_LST = (byte) (6 << 5), // NamedList
EXTERN_STRING = (byte) (7 << 5);
private static final int MAX_UTF8_SIZE_FOR_ARRAY_GROW_STRATEGY = 65536;
private static byte VERSION = 2;
private final ObjectResolver resolver;
protected FastOutputStream daos;
private StringCache stringCache;
private WritableDocFields writableDocFields;
private boolean alreadyMarshalled;
private boolean alreadyUnmarshalled;
protected boolean readStringAsCharSeq = false;
public JavaBinCodec() {
resolver =null;
writableDocFields =null;
}
public JavaBinCodec setReadStringAsCharSeq(boolean flag) {
readStringAsCharSeq = flag;
return this;
}
/**
* Use this to use this as a PushWriter. ensure that close() is called explicitly after use
*
* @param os The output stream
*/
public JavaBinCodec(OutputStream os, ObjectResolver resolver) throws IOException {
this.resolver = resolver;
initWrite(os);
}
public JavaBinCodec(ObjectResolver resolver) {
this(resolver, null);
}
public JavaBinCodec setWritableDocFields(WritableDocFields writableDocFields){
this.writableDocFields = writableDocFields;
return this;
}
public JavaBinCodec(ObjectResolver resolver, StringCache stringCache) {
this.resolver = resolver;
this.stringCache = stringCache;
}
public ObjectResolver getResolver() {
return resolver;
}
public void marshal(Object nl, OutputStream os) throws IOException {
try {
initWrite(os);
writeVal(nl);
} finally {
alreadyMarshalled = true;
daos.flushBuffer();
}
}
protected void initWrite(OutputStream os) throws IOException {
assert !alreadyMarshalled;
init(FastOutputStream.wrap(os));
daos.writeByte(VERSION);
}
/** expert: sets a new output stream */
public void init(FastOutputStream os) {
daos = os;
}
byte version;
public Object unmarshal(byte[] buf) throws IOException {
FastInputStream dis = initRead(buf);
return readVal(dis);
}
public Object unmarshal(InputStream is) throws IOException {
FastInputStream dis = initRead(is);
return readVal(dis);
}
protected FastInputStream initRead(InputStream is) throws IOException {
assert !alreadyUnmarshalled;
FastInputStream dis = FastInputStream.wrap(is);
return _init(dis);
}
protected FastInputStream initRead(byte[] buf) throws IOException {
assert !alreadyUnmarshalled;
FastInputStream dis = new FastInputStream(null, buf, 0, buf.length);
return _init(dis);
}
protected FastInputStream _init(FastInputStream dis) throws IOException {
version = dis.readByte();
if (version != VERSION) {
throw new RuntimeException("Invalid version (expected " + VERSION +
", but " + version + ") or the data in not in 'javabin' format");
}
alreadyUnmarshalled = true;
return dis;
}
public SimpleOrderedMap<Object> readOrderedMap(DataInputInputStream dis) throws IOException {
int sz = readSize(dis);
SimpleOrderedMap<Object> nl = new SimpleOrderedMap<>(sz);
for (int i = 0; i < sz; i++) {
String name = (String) readVal(dis);
Object val = readVal(dis);
nl.add(name, val);
}
return nl;
}
public NamedList<Object> readNamedList(DataInputInputStream dis) throws IOException {
int sz = readSize(dis);
NamedList<Object> nl = new NamedList<>(sz);
for (int i = 0; i < sz; i++) {
String name = (String) readVal(dis);
Object val = readVal(dis);
nl.add(name, val);
}
return nl;
}
public void writeNamedList(NamedList<?> nl) throws IOException {
writeTag(nl instanceof SimpleOrderedMap ? ORDERED_MAP : NAMED_LST, nl.size());
for (int i = 0; i < nl.size(); i++) {
String name = nl.getName(i);
writeExternString(name);
Object val = nl.getVal(i);
writeVal(val);
}
}
public void writeVal(Object val) throws IOException {
if (writeKnownType(val)) {
return;
} else {
ObjectResolver resolver = null;
if(val instanceof ObjectResolver) {
resolver = (ObjectResolver)val;
}
else {
resolver = this.resolver;
}
if (resolver != null) {
Object tmpVal = resolver.resolve(val, this);
if (tmpVal == null) return; // null means the resolver took care of it fully
if (writeKnownType(tmpVal)) return;
}
}
// Fallback to do *something*.
// note: if the user of this codec doesn't want this (e.g. UpdateLog) it can supply an ObjectResolver that does
// something else like throw an exception.
writeVal(val.getClass().getName() + ':' + val.toString());
}
protected static final Object END_OBJ = new Object();
protected byte tagByte;
public Object readVal(DataInputInputStream dis) throws IOException {
tagByte = dis.readByte();
return readObject(dis);
}
protected Object readObject(DataInputInputStream dis) throws IOException {
// if ((tagByte & 0xe0) == 0) {
// if top 3 bits are clear, this is a normal tag
// OK, try type + size in single byte
switch (tagByte >>> 5) {
case STR >>> 5:
return readStr(dis, stringCache, readStringAsCharSeq);
case SINT >>> 5:
return readSmallInt(dis);
case SLONG >>> 5:
return readSmallLong(dis);
case ARR >>> 5:
return readArray(dis);
case ORDERED_MAP >>> 5:
return readOrderedMap(dis);
case NAMED_LST >>> 5:
return readNamedList(dis);
case EXTERN_STRING >>> 5:
return readExternString(dis);
}
switch (tagByte) {
case NULL:
return null;
case DATE:
return new Date(dis.readLong());
case INT:
return dis.readInt();
case BOOL_TRUE:
return Boolean.TRUE;
case BOOL_FALSE:
return Boolean.FALSE;
case FLOAT:
return dis.readFloat();
case DOUBLE:
return dis.readDouble();
case LONG:
return dis.readLong();
case BYTE:
return dis.readByte();
case SHORT:
return dis.readShort();
case MAP:
return readMap(dis);
case SOLRDOC:
return readSolrDocument(dis);
case SOLRDOCLST:
return readSolrDocumentList(dis);
case BYTEARR:
return readByteArray(dis);
case ITERATOR:
return readIterator(dis);
case END:
return END_OBJ;
case SOLRINPUTDOC:
return readSolrInputDocument(dis);
case ENUM_FIELD_VALUE:
return readEnumFieldValue(dis);
case MAP_ENTRY:
return readMapEntry(dis);
case MAP_ENTRY_ITER:
return readMapIter(dis);
}
throw new RuntimeException("Unknown type " + tagByte);
}
public boolean writeKnownType(Object val) throws IOException {
if (writePrimitive(val)) return true;
if (val instanceof NamedList) {
writeNamedList((NamedList<?>) val);
return true;
}
if (val instanceof SolrDocumentList) { // SolrDocumentList is a List, so must come before List check
writeSolrDocumentList((SolrDocumentList) val);
return true;
}
if (val instanceof SolrInputField) {
return writeKnownType(((SolrInputField) val).getRawValue());
}
if (val instanceof IteratorWriter) {
writeIterator((IteratorWriter) val);
return true;
}
if (val instanceof Collection) {
writeArray((Collection) val);
return true;
}
if (val instanceof Object[]) {
writeArray((Object[]) val);
return true;
}
if (val instanceof SolrDocument) {
//this needs special treatment to know which fields are to be written
writeSolrDocument((SolrDocument) val);
return true;
}
if (val instanceof SolrInputDocument) {
writeSolrInputDocument((SolrInputDocument)val);
return true;
}
if (val instanceof MapWriter) {
writeMap((MapWriter) val);
return true;
}
if (val instanceof Map) {
writeMap((Map) val);
return true;
}
if (val instanceof Iterator) {
writeIterator((Iterator) val);
return true;
}
if (val instanceof Path) {
writeStr(((Path) val).toAbsolutePath().toString());
return true;
}
if (val instanceof Iterable) {
writeIterator(((Iterable) val).iterator());
return true;
}
if (val instanceof EnumFieldValue) {
writeEnumFieldValue((EnumFieldValue) val);
return true;
}
if (val instanceof Map.Entry) {
writeMapEntry((Map.Entry)val);
return true;
}
if (val instanceof MapSerializable) {
//todo find a better way to reuse the map more efficiently
writeMap(((MapSerializable) val).toMap(new NamedList().asShallowMap()));
return true;
}
if (val instanceof AtomicInteger) {
writeInt(((AtomicInteger) val).get());
return true;
}
if (val instanceof AtomicLong) {
writeLong(((AtomicLong) val).get());
return true;
}
if (val instanceof AtomicBoolean) {
writeBoolean(((AtomicBoolean) val).get());
return true;
}
return false;
}
public class BinEntryWriter implements MapWriter.EntryWriter {
@Override
public MapWriter.EntryWriter put(CharSequence k, Object v) throws IOException {
writeExternString(k);
JavaBinCodec.this.writeVal(v);
return this;
}
@Override
public MapWriter.EntryWriter put(CharSequence k, int v) throws IOException {
writeExternString(k);
JavaBinCodec.this.writeInt(v);
return this;
}
@Override
public MapWriter.EntryWriter put(CharSequence k, long v) throws IOException {
writeExternString(k);
JavaBinCodec.this.writeLong(v);
return this;
}
@Override
public MapWriter.EntryWriter put(CharSequence k, float v) throws IOException {
writeExternString(k);
JavaBinCodec.this.writeFloat(v);
return this;
}
@Override
public MapWriter.EntryWriter put(CharSequence k, double v) throws IOException {
writeExternString(k);
JavaBinCodec.this.writeDouble(v);
return this;
}
@Override
public MapWriter.EntryWriter put(CharSequence k, boolean v) throws IOException {
writeExternString(k);
writeBoolean(v);
return this;
}
@Override
public MapWriter.EntryWriter put(CharSequence k, CharSequence v) throws IOException {
writeExternString(k);
writeStr(v);
return this;
}
private BiConsumer<CharSequence, Object> biConsumer;
@Override
public BiConsumer<CharSequence, Object> getBiConsumer() {
if (biConsumer == null) biConsumer = MapWriter.EntryWriter.super.getBiConsumer();
return biConsumer;
}
}
public final BinEntryWriter ew = new BinEntryWriter();
public void writeMap(MapWriter val) throws IOException {
writeTag(MAP_ENTRY_ITER);
val.writeMap(ew);
writeTag(END);
}
public void writeTag(byte tag) throws IOException {
daos.writeByte(tag);
}
public void writeTag(byte tag, int size) throws IOException {
if ((tag & 0xe0) != 0) {
if (size < 0x1f) {
daos.writeByte(tag | size);
} else {
daos.writeByte(tag | 0x1f);
writeVInt(size - 0x1f, daos);
}
} else {
daos.writeByte(tag);
writeVInt(size, daos);
}
}
public void writeByteArray(byte[] arr, int offset, int len) throws IOException {
writeTag(BYTEARR, len);
daos.write(arr, offset, len);
}
public byte[] readByteArray(DataInputInputStream dis) throws IOException {
byte[] arr = new byte[readVInt(dis)];
dis.readFully(arr);
return arr;
}
//use this to ignore the writable interface because , child docs will ignore the fl flag
// is it a good design?
private boolean ignoreWritable =false;
private ConditionalMapWriter.EntryWriterWrapper cew;
public void writeSolrDocument(SolrDocument doc) throws IOException {
List<SolrDocument> children = doc.getChildDocuments();
int fieldsCount = 0;
if(writableDocFields == null || writableDocFields.wantsAllFields() || ignoreWritable){
fieldsCount = doc.size();
} else {
for (Entry<String, Object> e : doc) {
if(toWrite(e.getKey())) fieldsCount++;
}
}
int sz = fieldsCount + (children==null ? 0 : children.size());
writeTag(SOLRDOC);
writeTag(ORDERED_MAP, sz);
if (cew == null) cew = new ConditionalMapWriter.EntryWriterWrapper(ew, (k, o) -> toWrite(k.toString()));
doc.writeMap(cew);
if (children != null) {
try {
ignoreWritable = true;
for (SolrDocument child : children) {
writeSolrDocument(child);
}
} finally {
ignoreWritable = false;
}
}
}
protected boolean toWrite(String key) {
return writableDocFields == null || ignoreWritable || writableDocFields.isWritable(key);
}
public SolrDocument readSolrDocument(DataInputInputStream dis) throws IOException {
tagByte = dis.readByte();
int size = readSize(dis);
SolrDocument doc = new SolrDocument(new LinkedHashMap<>(size));
for (int i = 0; i < size; i++) {
String fieldName;
Object obj = readVal(dis); // could be a field name, or a child document
if (obj instanceof SolrDocument) {
doc.addChildDocument((SolrDocument)obj);
continue;
} else {
fieldName = (String)obj;
}
Object fieldVal = readVal(dis);
doc.setField(fieldName, fieldVal);
}
return doc;
}
public SolrDocumentList readSolrDocumentList(DataInputInputStream dis) throws IOException {
SolrDocumentList solrDocs = new SolrDocumentList();
List list = (List) readVal(dis);
solrDocs.setNumFound((Long) list.get(0));
solrDocs.setStart((Long) list.get(1));
solrDocs.setMaxScore((Float) list.get(2));
@SuppressWarnings("unchecked")
List<SolrDocument> l = (List<SolrDocument>) readVal(dis);
solrDocs.addAll(l);
return solrDocs;
}
public void writeSolrDocumentList(SolrDocumentList docs)
throws IOException {
writeTag(SOLRDOCLST);
List<Number> l = new ArrayList<>(3);
l.add(docs.getNumFound());
l.add(docs.getStart());
l.add(docs.getMaxScore());
writeArray(l);
writeArray(docs);
}
public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) throws IOException {
int sz = readVInt(dis);
float docBoost = (Float)readVal(dis);
if (docBoost != 1f) {
String message = "Ignoring document boost: " + docBoost + " as index-time boosts are not supported anymore";
if (WARNED_ABOUT_INDEX_TIME_BOOSTS.compareAndSet(false, true)) {
log.warn(message);
} else {
log.debug(message);
}
}
SolrInputDocument sdoc = createSolrInputDocument(sz);
for (int i = 0; i < sz; i++) {
String fieldName;
Object obj = readVal(dis); // could be a boost, a field name, or a child document
if (obj instanceof Float) {
float boost = (Float)obj;
if (boost != 1f) {
String message = "Ignoring field boost: " + boost + " as index-time boosts are not supported anymore";
if (WARNED_ABOUT_INDEX_TIME_BOOSTS.compareAndSet(false, true)) {
log.warn(message);
} else {
log.debug(message);
}
}
fieldName = (String)readVal(dis);
} else if (obj instanceof SolrInputDocument) {
sdoc.addChildDocument((SolrInputDocument)obj);
continue;
} else {
fieldName = (String)obj;
}
Object fieldVal = readVal(dis);
sdoc.setField(fieldName, fieldVal);
}
return sdoc;
}
protected SolrInputDocument createSolrInputDocument(int sz) {
return new SolrInputDocument(new LinkedHashMap<>(sz));
}
public void writeSolrInputDocument(SolrInputDocument sdoc) throws IOException {
List<SolrInputDocument> children = sdoc.getChildDocuments();
int sz = sdoc.size() + (children==null ? 0 : children.size());
writeTag(SOLRINPUTDOC, sz);
writeFloat(1f); // document boost
sdoc.writeMap(ew);
if (children != null) {
for (SolrInputDocument child : children) {
writeSolrInputDocument(child);
}
}
}
public Map<Object, Object> readMapIter(DataInputInputStream dis) throws IOException {
Map<Object, Object> m = newMap(-1);
for (; ; ) {
Object key = readVal(dis);
if (key == END_OBJ) break;
Object val = readVal(dis);
m.put(key, val);
}
return m;
}
/**
* create a new Map object
* @param size expected size, -1 means unknown size
*/
protected Map<Object, Object> newMap(int size) {
return size < 0 ? new LinkedHashMap<>() : new LinkedHashMap<>(size);
}
public Map<Object,Object> readMap(DataInputInputStream dis)
throws IOException {
int sz = readVInt(dis);
return readMap(dis, sz);
}
protected Map<Object, Object> readMap(DataInputInputStream dis, int sz) throws IOException {
Map<Object, Object> m = newMap(sz);
for (int i = 0; i < sz; i++) {
Object key = readVal(dis);
Object val = readVal(dis);
m.put(key, val);
}
return m;
}
public final ItemWriter itemWriter = new ItemWriter() {
@Override
public ItemWriter add(Object o) throws IOException {
writeVal(o);
return this;
}
@Override
public ItemWriter add(int v) throws IOException {
writeInt(v);
return this;
}
@Override
public ItemWriter add(long v) throws IOException {
writeLong(v);
return this;
}
@Override
public ItemWriter add(float v) throws IOException {
writeFloat(v);
return this;
}
@Override
public ItemWriter add(double v) throws IOException {
writeDouble(v);
return this;
}
@Override
public ItemWriter add(boolean v) throws IOException {
writeBoolean(v);
return this;
}
};
@Override
public void writeIterator(IteratorWriter val) throws IOException {
writeTag(ITERATOR);
val.writeIter(itemWriter);
writeTag(END);
}
public void writeIterator(Iterator iter) throws IOException {
writeTag(ITERATOR);
while (iter.hasNext()) {
writeVal(iter.next());
}
writeTag(END);
}
public List<Object> readIterator(DataInputInputStream fis) throws IOException {
ArrayList<Object> l = new ArrayList<>();
while (true) {
Object o = readVal(fis);
if (o == END_OBJ) break;
l.add(o);
}
return l;
}
public void writeArray(List l) throws IOException {
writeTag(ARR, l.size());
for (int i = 0; i < l.size(); i++) {
writeVal(l.get(i));
}
}
public void writeArray(Collection coll) throws IOException {
writeTag(ARR, coll.size());
for (Object o : coll) {
writeVal(o);
}
}
public void writeArray(Object[] arr) throws IOException {
writeTag(ARR, arr.length);
for (int i = 0; i < arr.length; i++) {
Object o = arr[i];
writeVal(o);
}
}
public List<Object> readArray(DataInputInputStream dis) throws IOException {
int sz = readSize(dis);
return readArray(dis, sz);
}
protected List readArray(DataInputInputStream dis, int sz) throws IOException {
ArrayList<Object> l = new ArrayList<>(sz);
for (int i = 0; i < sz; i++) {
l.add(readVal(dis));
}
return l;
}
/**
* write {@link EnumFieldValue} as tag+int value+string value
* @param enumFieldValue to write
*/
public void writeEnumFieldValue(EnumFieldValue enumFieldValue) throws IOException {
writeTag(ENUM_FIELD_VALUE);
writeInt(enumFieldValue.toInt());
writeStr(enumFieldValue.toString());
}
public void writeMapEntry(Map.Entry val) throws IOException {
writeTag(MAP_ENTRY);
writeVal(val.getKey());
writeVal(val.getValue());
}
/**
* read {@link EnumFieldValue} (int+string) from input stream
* @param dis data input stream
* @return {@link EnumFieldValue}
*/
public EnumFieldValue readEnumFieldValue(DataInputInputStream dis) throws IOException {
Integer intValue = (Integer) readVal(dis);
String stringValue = (String) convertCharSeq (readVal(dis));
return new EnumFieldValue(intValue, stringValue);
}
public Map.Entry<Object,Object> readMapEntry(DataInputInputStream dis) throws IOException {
final Object key = readVal(dis);
final Object value = readVal(dis);
return new Map.Entry<Object,Object>() {
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public String toString() {
return "MapEntry[" + key + ":" + value + "]";
}
@Override
public Object setValue(Object value) {
throw new UnsupportedOperationException();
}
@Override
public int hashCode() {
int result = 31;
result *=31 + getKey().hashCode();
result *=31 + getValue().hashCode();
return result;
}
@Override
public boolean equals(Object obj) {
if(this == obj) {
return true;
}
if(!(obj instanceof Entry)) {
return false;
}
Map.Entry<Object, Object> entry = (Entry<Object, Object>) obj;
return (this.getKey().equals(entry.getKey()) && this.getValue().equals(entry.getValue()));
}
};
}
/**
* write the string as tag+length, with length being the number of UTF-8 bytes
*/
public void writeStr(CharSequence s) throws IOException {
if (s == null) {
writeTag(NULL);
return;
}
if (s instanceof Utf8CharSequence) {
writeUTF8Str((Utf8CharSequence) s);
return;
}
int end = s.length();
int maxSize = end * ByteUtils.MAX_UTF8_BYTES_PER_CHAR;
if (maxSize <= MAX_UTF8_SIZE_FOR_ARRAY_GROW_STRATEGY) {
if (bytes == null || bytes.length < maxSize) bytes = new byte[maxSize];
int sz = ByteUtils.UTF16toUTF8(s, 0, end, bytes, 0);
writeTag(STR, sz);
daos.write(bytes, 0, sz);
} else {
// double pass logic for large strings, see SOLR-7971
int sz = ByteUtils.calcUTF16toUTF8Length(s, 0, end);
writeTag(STR, sz);
if (bytes == null || bytes.length < 8192) bytes = new byte[8192];
ByteUtils.writeUTF16toUTF8(s, 0, end, daos, bytes);
}
}
byte[] bytes;
CharArr arr = new CharArr();
private StringBytes bytesRef = new StringBytes(bytes,0,0);
public CharSequence readStr(DataInputInputStream dis) throws IOException {
return readStr(dis, null, readStringAsCharSeq);
}
public CharSequence readStr(DataInputInputStream dis, StringCache stringCache, boolean readStringAsCharSeq) throws IOException {
if (readStringAsCharSeq) {
return readUtf8(dis);
}
int sz = readSize(dis);
return _readStr(dis, stringCache, sz);
}
private CharSequence _readStr(DataInputInputStream dis, StringCache stringCache, int sz) throws IOException {
if (bytes == null || bytes.length < sz) bytes = new byte[sz];
dis.readFully(bytes, 0, sz);
if (stringCache != null) {
return stringCache.get(bytesRef.reset(bytes, 0, sz));
} else {
arr.reset();
ByteUtils.UTF8toUTF16(bytes, 0, sz, arr);
return arr.toString();
}
}
/////////// code to optimize reading UTF8
static final int MAX_UTF8_SZ = 1024 * 64;//too big strings can cause too much memory allocation
private Function<ByteArrayUtf8CharSequence, String> stringProvider;
private BytesBlock bytesBlock;
protected CharSequence readUtf8(DataInputInputStream dis) throws IOException {
int sz = readSize(dis);
return readUtf8(dis, sz);
}
protected CharSequence readUtf8(DataInputInputStream dis, int sz) throws IOException {
ByteArrayUtf8CharSequence result = new ByteArrayUtf8CharSequence(null,0,0);
if(dis.readDirectUtf8(result, sz)){
result.stringProvider= getStringProvider();
return result;
}
if (sz > MAX_UTF8_SZ) return _readStr(dis, null, sz);
if (bytesBlock == null) bytesBlock = new BytesBlock(1024 * 4);
BytesBlock block = this.bytesBlock.expand(sz);
dis.readFully(block.getBuf(), block.getStartPos(), sz);
result.reset(block.getBuf(), block.getStartPos(), sz,null);
result.stringProvider = getStringProvider();
return result;
}
private Function<ByteArrayUtf8CharSequence, String> getStringProvider() {
if (stringProvider == null) {
stringProvider = butf8cs -> {
synchronized (JavaBinCodec.this) {
arr.reset();
ByteUtils.UTF8toUTF16(butf8cs.buf, butf8cs.offset(), butf8cs.size(), arr);
return arr.toString();
}
};
}
return this.stringProvider;
}
public void writeInt(int val) throws IOException {
if (val > 0) {
int b = SINT | (val & 0x0f);
if (val >= 0x0f) {
b |= 0x10;
daos.writeByte(b);
writeVInt(val >>> 4, daos);
} else {
daos.writeByte(b);
}
} else {
daos.writeByte(INT);
daos.writeInt(val);
}
}
public int readSmallInt(DataInputInputStream dis) throws IOException {
int v = tagByte & 0x0F;
if ((tagByte & 0x10) != 0)
v = (readVInt(dis) << 4) | v;
return v;
}
public void writeLong(long val) throws IOException {
if ((val & 0xff00000000000000L) == 0) {
int b = SLONG | ((int) val & 0x0f);
if (val >= 0x0f) {
b |= 0x10;
daos.writeByte(b);
writeVLong(val >>> 4, daos);
} else {
daos.writeByte(b);
}
} else {
daos.writeByte(LONG);
daos.writeLong(val);
}
}
public long readSmallLong(DataInputInputStream dis) throws IOException {
long v = tagByte & 0x0F;
if ((tagByte & 0x10) != 0)
v = (readVLong(dis) << 4) | v;
return v;
}
public void writeFloat(float val) throws IOException {
daos.writeByte(FLOAT);
daos.writeFloat(val);
}
public boolean writePrimitive(Object val) throws IOException {
if (val == null) {
daos.writeByte(NULL);
return true;
} else if (val instanceof Utf8CharSequence) {
writeUTF8Str((Utf8CharSequence) val);
return true;
} else if (val instanceof CharSequence) {
writeStr((CharSequence) val);
return true;
} else if (val instanceof Number) {
if (val instanceof Integer) {
writeInt(((Integer) val).intValue());
return true;
} else if (val instanceof Long) {
writeLong(((Long) val).longValue());
return true;
} else if (val instanceof Float) {
writeFloat(((Float) val).floatValue());
return true;
} else if (val instanceof Double) {
writeDouble(((Double) val).doubleValue());
return true;
} else if (val instanceof Byte) {
daos.writeByte(BYTE);
daos.writeByte(((Byte) val).intValue());
return true;
} else if (val instanceof Short) {
daos.writeByte(SHORT);
daos.writeShort(((Short) val).intValue());
return true;
}
return false;
} else if (val instanceof Date) {
daos.writeByte(DATE);
daos.writeLong(((Date) val).getTime());
return true;
} else if (val instanceof Boolean) {
writeBoolean((Boolean) val);
return true;
} else if (val instanceof byte[]) {
writeByteArray((byte[]) val, 0, ((byte[]) val).length);
return true;
} else if (val instanceof ByteBuffer) {
ByteBuffer buf = (ByteBuffer) val;
writeByteArray(buf.array(),buf.position(),buf.limit() - buf.position());
return true;
} else if (val == END_OBJ) {
writeTag(END);
return true;
}
return false;
}
protected void writeBoolean(boolean val) throws IOException {
if (val) daos.writeByte(BOOL_TRUE);
else daos.writeByte(BOOL_FALSE);
}
protected void writeDouble(double val) throws IOException {
daos.writeByte(DOUBLE);
daos.writeDouble(val);
}
public void writeMap(Map<?,?> val) throws IOException {
writeTag(MAP, val.size());
if (val instanceof MapWriter) {
((MapWriter) val).writeMap(ew);
return;
}
for (Map.Entry<?,?> entry : val.entrySet()) {
Object key = entry.getKey();
if (key instanceof String) {
writeExternString((String) key);
} else {
writeVal(key);
}
writeVal(entry.getValue());
}
}
public int readSize(DataInputInputStream in) throws IOException {
int sz = tagByte & 0x1f;
if (sz == 0x1f) sz += readVInt(in);
return sz;
}
/**
* Special method for variable length int (copied from lucene). Usually used for writing the length of a
* collection/array/map In most of the cases the length can be represented in one byte (length < 127) so it saves 3
* bytes/object
*
* @throws IOException If there is a low-level I/O error.
*/
public static void writeVInt(int i, FastOutputStream out) throws IOException {
while ((i & ~0x7F) != 0) {
out.writeByte((byte) ((i & 0x7f) | 0x80));
i >>>= 7;
}
out.writeByte((byte) i);
}
/**
* The counterpart for {@link #writeVInt(int, FastOutputStream)}
*
* @throws IOException If there is a low-level I/O error.
*/
public static int readVInt(DataInputInputStream in) throws IOException {
byte b = in.readByte();
int i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = in.readByte();
i |= (b & 0x7F) << shift;
}
return i;
}
public static void writeVLong(long i, FastOutputStream out) throws IOException {
while ((i & ~0x7F) != 0) {
out.writeByte((byte) ((i & 0x7f) | 0x80));
i >>>= 7;
}
out.writeByte((byte) i);
}
public static long readVLong(DataInputInputStream in) throws IOException {
byte b = in.readByte();
long i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = in.readByte();
i |= (long) (b & 0x7F) << shift;
}
return i;
}
private int stringsCount = 0;
private Map<String, Integer> stringsMap;
private List<CharSequence> stringsList;
public void writeExternString(CharSequence s) throws IOException {
if (s == null) {
writeTag(NULL);
return;
}
Integer idx = stringsMap == null ? null : stringsMap.get(s);
if (idx == null) idx = 0;
writeTag(EXTERN_STRING, idx);
if (idx == 0) {
writeStr(s);
if (stringsMap == null) stringsMap = new HashMap<>();
stringsMap.put(s.toString(), ++stringsCount);
}
}
public CharSequence readExternString(DataInputInputStream fis) throws IOException {
int idx = readSize(fis);
if (idx != 0) {// idx != 0 is the index of the extern string
return stringsList.get(idx - 1);
} else {// idx == 0 means it has a string value
tagByte = fis.readByte();
CharSequence s = readStr(fis, stringCache, false);
if (s != null) s = s.toString();
if (stringsList == null) stringsList = new ArrayList<>();
stringsList.add(s);
return s;
}
}
public void writeUTF8Str(Utf8CharSequence utf8) throws IOException {
writeTag(STR, utf8.size());
daos.writeUtf8CharSeq(utf8);
}
public long getTotalBytesWritten() {
if (daos != null) {
return daos.written;
}
return 0;
}
/**
* Allows extension of {@link JavaBinCodec} to support serialization of arbitrary data types.
* <p>
* Implementors of this interface write a method to serialize a given object using an existing {@link JavaBinCodec}
*/
public interface ObjectResolver {
/**
* Examine and attempt to serialize the given object, using a {@link JavaBinCodec} to write it to a stream.
*
* @param o the object that the caller wants serialized.
* @param codec used to actually serialize {@code o}.
* @return the object {@code o} itself if it could not be serialized, or {@code null} if the whole object was successfully serialized.
* @see JavaBinCodec
*/
Object resolve(Object o, JavaBinCodec codec) throws IOException;
}
public interface WritableDocFields {
boolean isWritable(String name);
boolean wantsAllFields();
}
public static class StringCache {
private final Cache<StringBytes, String> cache;
public StringCache(Cache<StringBytes, String> cache) {
this.cache = cache;
}
public String get(StringBytes b) {
String result = cache.get(b);
if (result == null) {
//make a copy because the buffer received may be changed later by the caller
StringBytes copy = new StringBytes(Arrays.copyOfRange(b.bytes, b.offset, b.offset + b.length), 0, b.length);
CharArr arr = new CharArr();
ByteUtils.UTF8toUTF16(b.bytes, b.offset, b.length, arr);
result = arr.toString();
cache.put(copy, result);
}
return result;
}
}
@Override
public void close() throws IOException {
if (daos != null) {
daos.flushBuffer();
}
}
}
| 1 | 29,193 | We should not add new types unless absolutely required. I'm not sure `UUID` is requires a new type | apache-lucene-solr | java |
@@ -461,3 +461,14 @@ class JUnitMirrorsManager(MirrorsManager):
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
+
+
+class JavaEnv(object):
+ def get_additional_classpath(self):
+ pass
+
+ def get_cp_from_files(self):
+ pass
+
+ def get_files_from_cp(self):
+ pass | 1 | """
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import shutil
import subprocess
import time
from os import listdir
from os.path import join
from bzt import ToolError, TaurusConfigError
from bzt.engine import HavingInstallableTools, Scenario
from bzt.modules import SubprocessedExecutor
from bzt.utils import get_full_path, shell_exec, TclLibrary, JavaVM, RequiredTool, MirrorsManager
SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/3.6/" \
"selenium-server-standalone-3.6.0.jar"
SELENIUM_VERSION = "3.6" # FIXME: unused, remove it
JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \
"{version}/junit-{version}.jar"
JUNIT_VERSION = "4.12"
JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \
"junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION)
TESTNG_VERSION = "6.8.5"
TESTNG_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/testng/testng/" \
"{version}/testng-{version}.jar".format(version=TESTNG_VERSION)
HAMCREST_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core" \
"/1.3/hamcrest-core-1.3.jar"
JSON_JAR_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/json/json/20160810/json-20160810.jar"
class JavaTestRunner(SubprocessedExecutor, HavingInstallableTools):
"""
Allows to test java and jar files
:type script: str
"""
def __init__(self):
super(JavaTestRunner, self).__init__()
self.working_dir = os.getcwd()
self.target_java = "1.8"
self.props_file = None
self.base_class_path = []
def path_lambda(self, x):
return os.path.abspath(self.engine.find_file(x))
def install_required_tools(self):
self.hamcrest_path = self.path_lambda(self.settings.get("hamcrest-core",
"~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar"))
self.json_jar_path = self.path_lambda(
self.settings.get("json-jar", "~/.bzt/selenium-taurus/tools/junit/json.jar"))
self.selenium_server_jar_path = self.path_lambda(self.settings.get("selenium-server",
"~/.bzt/selenium-taurus/selenium-server.jar"))
def prepare(self):
"""
make jar.
"""
self.script = self.get_scenario().get(Scenario.SCRIPT,
TaurusConfigError("Script not passed to runner %s" % self))
self.script = self.engine.find_file(self.script)
self.install_required_tools()
self.working_dir = self.engine.create_artifact(self.settings.get("working-dir", "classes"), "")
self.target_java = str(self.settings.get("compile-target-java", self.target_java))
self.base_class_path.extend(self.settings.get("additional-classpath", []))
self.base_class_path.extend(self.get_scenario().get("additional-classpath", []))
self.base_class_path.extend([self.hamcrest_path, self.json_jar_path, self.selenium_server_jar_path])
self.props_file = self.engine.create_artifact("runner", ".properties")
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self.reporting_setup(suffix=".ldjson")
def resource_files(self):
resources = super(JavaTestRunner, self).resource_files()
resources.extend(self.get_scenario().get("additional-classpath", []))
global_additional_classpath = self.settings.get("additional-classpath", [])
execution_files = self.execution.get('files', []) # later we need to fix path for sending into cloud
execution_files.extend(global_additional_classpath)
return resources
def _collect_script_files(self, extensions):
file_list = []
if self.script is not None and os.path.isdir(self.script):
for root, _, files in os.walk(self.script):
for test_file in files:
if os.path.splitext(test_file)[1].lower() in extensions:
path = get_full_path(join(root, test_file))
file_list.append(path)
else:
if os.path.splitext(self.script)[1].lower() in extensions:
file_list.append(get_full_path(self.script))
return file_list
def compile_scripts(self):
"""
Compile .java files
"""
self.log.debug("Compiling .java files started")
jar_path = join(self.engine.artifacts_dir, self.working_dir, self.settings.get("jar-name", "compiled.jar"))
if os.path.exists(jar_path):
self.log.debug(".java files are already compiled, skipping")
return
compile_cl = ["javac",
"-source", self.target_java,
"-target", self.target_java,
"-d", self.working_dir,
]
compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)])
compile_cl.extend(self._collect_script_files({".java"}))
with open(self.engine.create_artifact("javac", ".out"), 'ab') as javac_out:
with open(self.engine.create_artifact("javac", ".err"), 'ab') as javac_err:
self.log.debug("running javac: %s", compile_cl)
self.process = shell_exec(compile_cl, stdout=javac_out, stderr=javac_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Compiling .java files...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
self.log.debug("javac exit code: %s", ret_code)
with open(javac_err.name) as err_file:
out = err_file.read()
raise ToolError("Javac exited with code: %s\n %s" % (ret_code, out.strip()))
self.log.info("Compiling .java files completed")
self.make_jar()
def make_jar(self):
"""
move all .class files to compiled.jar
"""
self.log.debug("Making .jar started")
with open(join(self.engine.artifacts_dir, "jar.out"), 'ab') as jar_out:
with open(join(self.engine.artifacts_dir, "jar.err"), 'ab') as jar_err:
class_files = [java_file for java_file in listdir(self.working_dir) if java_file.endswith(".class")]
jar_name = self.settings.get("jar-name", "compiled.jar")
if class_files:
compile_jar_cl = ["jar", "-cf", jar_name]
compile_jar_cl.extend(class_files)
else:
compile_jar_cl = ["jar", "-cf", jar_name, "."]
self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Making jar file...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
with open(jar_err.name) as err_file:
out = err_file.read()
raise ToolError("Jar exited with code %s\n%s" % (ret_code, out.strip()))
self.log.info("Making .jar file completed")
class JUnitTester(JavaTestRunner, HavingInstallableTools):
"""
Allows to test java and jar files
"""
def __init__(self):
super(JUnitTester, self).__init__()
self.junit_path = None
self.junit_listener_path = None
def prepare(self):
super(JUnitTester, self).prepare()
self.install_required_tools()
self.base_class_path += [self.junit_path, self.junit_listener_path]
self.base_class_path = [self.path_lambda(x) for x in self.base_class_path]
if any(self._collect_script_files({'.java'})):
self.compile_scripts()
def install_required_tools(self):
super(JUnitTester, self).install_required_tools()
self.junit_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar"))
self.junit_listener_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-junit-1.0.jar")
tools = []
# only check javac if we need to compile. if we have JAR as script - we don't need javac
if self.script and any(self._collect_script_files({'.java'})):
tools.append(JavaC(self.log))
tools.append(TclLibrary(self.log))
tools.append(JavaVM(self.log))
link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION)
tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
tools.append(JUnitJar(self.junit_path, self.log, JUNIT_VERSION))
tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK))
tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK))
tools.append(JUnitListenerJar(self.junit_listener_path, ""))
self._check_tools(tools)
def startup(self):
# java -cp junit.jar:selenium-test-small.jar:
# selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar
# taurusjunit.CustomRunner runner.properties
jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")]
jar_list.extend(self._collect_script_files({".jar"}))
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("report_file=%s\n" % self.report_file)
load = self.get_load()
if load.iterations:
props.write("iterations=%s\n" % load.iterations)
if load.hold:
props.write("hold_for=%s\n" % load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
class_path = os.pathsep.join(self.base_class_path)
junit_cmd_line = ["java", "-cp", class_path, "-Djna.nosys=true", "taurusjunit.CustomRunner", self.props_file]
self._start_subprocess(junit_cmd_line)
class TestNGTester(JavaTestRunner, HavingInstallableTools):
"""
Allows to test java and jar files with TestNG
"""
__test__ = False # Hello, nosetests discovery mechanism
def __init__(self):
super(TestNGTester, self).__init__()
self.testng_path = None
self.testng_plugin_path = None
def prepare(self):
super(TestNGTester, self).prepare()
self.install_required_tools()
self.base_class_path += [self.testng_path, self.testng_plugin_path]
if any(self._collect_script_files({'.java'})):
self.compile_scripts()
def detected_testng_xml(self):
script_path = self.get_script_path()
if script_path and self.settings.get("autodetect-xml", True):
script_dir = get_full_path(script_path, step_up=1)
testng_xml = os.path.join(script_dir, 'testng.xml')
if os.path.exists(testng_xml):
return testng_xml
return None
def resource_files(self):
resources = super(TestNGTester, self).resource_files()
testng_xml = self.execution.get('testng-xml', None)
if not testng_xml:
testng_xml = self.detected_testng_xml()
if testng_xml:
self.log.info("Detected testng.xml file at %s", testng_xml)
self.execution['testng-xml'] = testng_xml
if testng_xml:
resources.append(testng_xml)
return resources
def install_required_tools(self):
super(TestNGTester, self).install_required_tools()
self.testng_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/testng/testng.jar"))
self.testng_plugin_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-testng-1.0.jar")
tools = []
if self.script and any(self._collect_script_files({'.java'})):
tools.append(JavaC(self.log))
tools.append(TclLibrary(self.log))
tools.append(JavaVM(self.log))
link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION)
tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
tools.append(TestNGJar(self.testng_path, TESTNG_DOWNLOAD_LINK))
tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK))
tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK))
tools.append(TestNGPluginJar(self.testng_plugin_path, ""))
self._check_tools(tools)
def startup(self):
# java -classpath
# testng.jar:selenium-server.jar:taurus-testng-1.0.jar:json.jar:compiled.jar
# taurustestng.TestNGRunner runner.properties
jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")]
jar_list.extend(self._collect_script_files({".jar"}))
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("report_file=%s\n" % self.report_file)
load = self.get_load()
if load.iterations:
props.write("iterations=%s\n" % load.iterations)
if load.hold:
props.write("hold_for=%s\n" % load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
testng_xml = self.execution.get('testng-xml', None) or self.detected_testng_xml()
if testng_xml:
props.write('testng_config=%s\n' % testng_xml.replace(os.path.sep, '/'))
cmdline = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurustestng.TestNGRunner", self.props_file]
self._start_subprocess(cmdline)
class TestNGJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(TestNGJar, self).__init__("TestNG", tool_path, download_link)
class HamcrestJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link)
class JsonJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JsonJar, self).__init__("JsonJar", tool_path, download_link)
class JavaC(RequiredTool):
def __init__(self, parent_logger, tool_path='javac', download_link=''):
super(JavaC, self).__init__("JavaC", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
try:
output = subprocess.check_output([self.tool_path, '-version'], stderr=subprocess.STDOUT)
self.log.debug("%s output: %s", self.tool_name, output)
return True
except (subprocess.CalledProcessError, OSError):
return False
def install(self):
raise ToolError("The %s is not operable or not available. Consider installing it" % self.tool_name)
class SeleniumServerJar(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
self.log.debug("%s path: %s", self.tool_name, self.tool_path)
selenium_launch_command = ["java", "-jar", self.tool_path, "-help"]
selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT)
output = selenium_subproc.communicate()
self.log.debug("%s output: %s", self.tool_name, output)
if selenium_subproc.returncode == 0:
self.already_installed = True
return True
else:
return False
class JUnitJar(RequiredTool):
def __init__(self, tool_path, parent_logger, junit_version):
super(JUnitJar, self).__init__("JUnit", tool_path)
self.log = parent_logger.getChild(self.__class__.__name__)
self.version = junit_version
self.mirror_manager = JUnitMirrorsManager(self.log, self.version)
def install(self):
dest = get_full_path(self.tool_path, step_up=1)
self.log.info("Will install %s into %s", self.tool_name, dest)
junit_dist = self._download(suffix=".jar")
if not os.path.exists(dest):
os.makedirs(dest)
shutil.move(junit_dist, self.tool_path)
self.log.info("Installed JUnit successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class JUnitListenerJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link)
def install(self):
raise ToolError("Automatic installation of JUnitListener isn't implemented")
class TestNGPluginJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(TestNGPluginJar, self).__init__("TestNGPlugin", tool_path, download_link)
def install(self):
raise ToolError("TestNG plugin should be bundled with Taurus distribution")
class JUnitMirrorsManager(MirrorsManager):
def __init__(self, parent_logger, junit_version):
self.junit_version = junit_version
super(JUnitMirrorsManager, self).__init__(JUNIT_MIRRORS_SOURCE, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
try:
resp = json.loads(self.page_source)
objects = resp.get("response", {}).get("docs", [])
if objects:
obj = objects[0]
group = obj.get("g")
artifact = obj.get("a")
version = obj.get("v")
ext = obj.get("p")
link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \
"{version}/{artifact}-{version}.{ext}"
link = link_template.format(group=group, artifact=artifact, version=version, ext=ext)
links.append(link)
except BaseException as exc:
self.log.error("Error while parsing mirrors %s", exc)
default_link = JUNIT_DOWNLOAD_LINK.format(version=self.junit_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
| 1 | 14,748 | If only Gatling uses it, can we skip introducing new entity for now? Looks like overkill... | Blazemeter-taurus | py |
@@ -54,6 +54,14 @@ type VersionsResponse struct {
Versions []string `json:"versions"`
}
+// Response to `GET /versions`
+// swagger:response VersionsResponse
+type versionsResponse struct {
+ //Versions Response
+ //in:body
+ Body *VersionsResponse
+}
+
// APIV1GETWalletsResponse is the response to `GET /v1/wallets`
// friendly:ListWalletsResponse
type APIV1GETWalletsResponse struct { | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package kmdapi
import (
"errors"
)
// APIV1Response is the interface that all API V1 responses must satisfy
type APIV1Response interface {
GetError() error
}
// APIV1ResponseEnvelope is a common envelope that all API V1 responses must embed
type APIV1ResponseEnvelope struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Error bool `json:"error"`
Message string `json:"message"`
}
// GetError allows VersionResponse to satisfy the APIV1Response interface, even
// though it can never return an error and is not versioned
func (r VersionsResponse) GetError() error {
return nil
}
// GetError allows responses that embed an APIV1ResponseEnvelope to satisfy the
// APIV1Response interface
func (r APIV1ResponseEnvelope) GetError() error {
if r.Error {
return errors.New(r.Message)
}
return nil
}
// VersionsResponse is the response to `GET /versions`
// friendly:VersionsResponse
type VersionsResponse struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Versions []string `json:"versions"`
}
// APIV1GETWalletsResponse is the response to `GET /v1/wallets`
// friendly:ListWalletsResponse
type APIV1GETWalletsResponse struct {
APIV1ResponseEnvelope
Wallets []APIV1Wallet `json:"wallets"`
}
// APIV1POSTWalletResponse is the response to `POST /v1/wallet`
// friendly:CreateWalletResponse
type APIV1POSTWalletResponse struct {
APIV1ResponseEnvelope
Wallet APIV1Wallet `json:"wallet"`
}
// APIV1POSTWalletInitResponse is the response to `POST /v1/wallet/init`
// friendly:InitWalletHandleTokenResponse
type APIV1POSTWalletInitResponse struct {
APIV1ResponseEnvelope
WalletHandleToken string `json:"wallet_handle_token"`
}
// APIV1POSTWalletReleaseResponse is the response to `POST /v1/wallet/release`
// friendly:ReleaseWalletHandleTokenResponse
type APIV1POSTWalletReleaseResponse struct {
APIV1ResponseEnvelope
}
// APIV1POSTWalletRenewResponse is the response to `POST /v1/wallet/renew`
// friendly:RenewWalletHandleTokenResponse
type APIV1POSTWalletRenewResponse struct {
APIV1ResponseEnvelope
WalletHandle APIV1WalletHandle `json:"wallet_handle"`
}
// APIV1POSTWalletRenameResponse is the response to `POST /v1/wallet/rename`
// friendly:RenameWalletResponse
type APIV1POSTWalletRenameResponse struct {
APIV1ResponseEnvelope
Wallet APIV1Wallet `json:"wallet"`
}
// APIV1POSTWalletInfoResponse is the response to `POST /v1/wallet/info`
// friendly:WalletInfoResponse
type APIV1POSTWalletInfoResponse struct {
APIV1ResponseEnvelope
WalletHandle APIV1WalletHandle `json:"wallet_handle"`
}
// APIV1POSTMasterKeyExportResponse is the reponse to `POST /v1/master-key/export`
// friendly:ExportMasterKeyResponse
type APIV1POSTMasterKeyExportResponse struct {
APIV1ResponseEnvelope
MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"`
}
// APIV1POSTKeyImportResponse is the repsonse to `POST /v1/key/import`
// friendly:ImportKeyResponse
type APIV1POSTKeyImportResponse struct {
APIV1ResponseEnvelope
Address string `json:"address"`
}
// APIV1POSTKeyExportResponse is the reponse to `POST /v1/key/export`
// friendly:ExportKeyResponse
type APIV1POSTKeyExportResponse struct {
APIV1ResponseEnvelope
PrivateKey APIV1PrivateKey `json:"private_key"`
}
// APIV1POSTKeyResponse is the response to `POST /v1/key`
// friendly:GenerateKeyResponse
type APIV1POSTKeyResponse struct {
APIV1ResponseEnvelope
Address string `json:"address"`
}
// APIV1DELETEKeyResponse is the response to `DELETE /v1/key`
// friendly:DeleteKeyResponse
type APIV1DELETEKeyResponse struct {
APIV1ResponseEnvelope
}
// APIV1POSTKeyListResponse is the response to `POST /v1/key/list`
// friendly:ListKeysResponse
type APIV1POSTKeyListResponse struct {
APIV1ResponseEnvelope
Addresses []string `json:"addresses"`
}
// APIV1POSTTransactionSignResponse is the repsonse to `POST /v1/transaction/sign`
// friendly:SignTransactionResponse
type APIV1POSTTransactionSignResponse struct {
APIV1ResponseEnvelope
// swagger:strfmt byte
SignedTransaction []byte `json:"signed_transaction"`
}
// APIV1POSTProgramSignResponse is the repsonse to `POST /v1/data/sign`
// friendly:SignProgramResponse
type APIV1POSTProgramSignResponse struct {
APIV1ResponseEnvelope
// swagger:strfmt byte
Signature []byte `json:"sig"`
}
// APIV1POSTMultisigListResponse is the response to `POST /v1/multisig/list`
// friendly:ListMultisigResponse
type APIV1POSTMultisigListResponse struct {
APIV1ResponseEnvelope
Addresses []string `json:"addresses"`
}
// APIV1POSTMultisigImportResponse is the response to `POST /v1/multisig/import`
// friendly:ImportMultisigResponse
type APIV1POSTMultisigImportResponse struct {
APIV1ResponseEnvelope
Address string `json:"address"`
}
// APIV1POSTMultisigExportResponse is the response to `POST /v1/multisig/export`
// friendly:ExportMultisigResponse
type APIV1POSTMultisigExportResponse struct {
APIV1ResponseEnvelope
Version uint8 `json:"multisig_version"`
Threshold uint8 `json:"threshold"`
PKs []APIV1PublicKey `json:"pks"`
}
// APIV1DELETEMultisigResponse is the response to POST /v1/multisig/delete`
// friendly:DeleteMultisigResponse
type APIV1DELETEMultisigResponse struct {
APIV1ResponseEnvelope
}
// APIV1POSTMultisigTransactionSignResponse is the response to `POST /v1/multisig/sign`
// friendly:SignMultisigResponse
type APIV1POSTMultisigTransactionSignResponse struct {
APIV1ResponseEnvelope
// swagger:strfmt byte
Multisig []byte `json:"multisig"`
}
// APIV1POSTMultisigProgramSignResponse is the response to `POST /v1/multisig/signdata`
// friendly:SignProgramMultisigResponse
type APIV1POSTMultisigProgramSignResponse struct {
APIV1ResponseEnvelope
// swagger:strfmt byte
Multisig []byte `json:"multisig"`
}
| 1 | 42,166 | rename -> info ? | algorand-go-algorand | go |
@@ -135,6 +135,10 @@ class scheduler(Config):
prune_on_get_work = parameter.BoolParameter(default=False)
+ upstream_status_when_all = parameter.BoolParameter(default=False,
+ config_path=dict(section='scheduler',
+ name='upstream-status-when-all'))
+
class Failures(object):
""" | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
import inspect
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import itertools
import logging
import os
import re
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger(__name__)
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]')
RPC_METHODS = {}
def rpc_method(**request_args):
def _rpc_method(fn):
# If request args are passed, return this function again for use as
# the decorator function with the request args attached.
fn_args = inspect.getargspec(fn)
assert not fn_args.varargs
assert fn_args.args[0] == 'self'
all_args = fn_args.args[1:]
defaults = dict(zip(reversed(all_args), reversed(fn_args.defaults or ())))
required_args = frozenset(arg for arg in all_args if arg not in defaults)
fn_name = fn.__name__
@functools.wraps(fn)
def rpc_func(self, *args, **kwargs):
actual_args = defaults.copy()
actual_args.update(dict(zip(all_args, args)))
actual_args.update(kwargs)
if not all(arg in actual_args for arg in required_args):
raise TypeError('{} takes {} arguments ({} given)'.format(
fn_name, len(all_args), len(actual_args)))
return self._request('/api/{}'.format(fn_name), actual_args, **request_args)
RPC_METHODS[fn_name] = rpc_func
return fn
return _rpc_method
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We
# should drop the compatibility at some point
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=999999999,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=999999999,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and self.failures[0] < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None,
tracking_url=None, status_message=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.updated = self.time
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.tracking_url = tracking_url
self.status_message = status_message
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if self.failures.first_failure_time is not None:
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
@property
def pretty_id(self):
param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items())
return '{}({})'.format(self.family, param_str)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
self.disabled = False
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from empty state.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
else:
logger.info("No prior state file exists at %s. Starting with empty state", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if new_status == FAILED and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
if new_status != task.status:
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
task.updated = time.time()
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def update_status(self, task, config):
# Mark tasks with no remaining active stakeholders for deletion
if (not task.stakeholders) and (task.remove is None) and (task.status != RUNNING):
# We don't check for the RUNNING case, because that is already handled
# by the fail_dead_worker_task function.
logger.debug("Task %r has no stakeholders anymore -> might remove "
"task in %s seconds", task.id, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - task.scheduler_disable_time > config.disable_persist:
self.re_enable(task, config)
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
def may_prune(self, task):
return task.remove and time.time() > task.remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
self._remove_workers_from_tasks(delete_workers)
def _remove_workers_from_tasks(self, workers, remove_stakeholders=True):
for task in self.get_active_tasks():
if remove_stakeholders:
task.stakeholders.difference_update(workers)
task.workers.difference_update(workers)
def disable_workers(self, workers):
self._remove_workers_from_tasks(workers, remove_stakeholders=False)
for worker in workers:
self.get_worker(worker).disabled = True
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_impl: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
@rpc_method()
def prune(self):
logger.info("Starting pruning of task graph")
self._prune_workers()
self._prune_tasks()
logger.info("Done pruning task graph")
def _prune_workers(self):
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
def _prune_tasks(self):
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
self._state.update_status(task, self._config)
if self._state.may_prune(task):
logger.info("Removing task %r", task.id)
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
return not getattr(worker, 'disabled', False)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
@rpc_method()
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, worker=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
assert worker is not None
worker_id = worker
worker_enabled = self.update(worker_id)
if worker_enabled:
_default_task = self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params,
)
else:
_default_task = None
task = self._state.get_task(task_id, setdefault=_default_task)
if task is None or (task.status != RUNNING and not worker_enabled):
return
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if worker_enabled and not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable and status != FAILED and worker_enabled:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
@rpc_method()
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
@rpc_method()
def disable_worker(self, worker):
self._state.disable_workers({worker})
@rpc_method()
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks(status=RUNNING):
if task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
@rpc_method(allow_null=False)
def get_work(self, host=None, assistant=False, current_tasks=None, worker=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
assert worker is not None
worker_id = worker
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_running_tasks(), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers:
upstream_status = self._upstream_status(task.id, upstream_table)
if upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
@rpc_method(attempts=1)
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
dep = self._state.get_task(dep_id)
if dep:
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack += [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
status = max((upstream_status_table.get(a_task_id, '')
for a_task_id in dep.deps),
key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'display_name': task.pretty_id,
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'last_updated': getattr(task, "updated", task.time),
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
'status_message': getattr(task, "status_message", None)
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
return ret
@rpc_method()
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _filter_done(self, task_ids):
for task_id in task_ids:
task = self._state.get_task(task_id)
if task is None or task.status != DONE:
yield task_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.debug('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
@rpc_method()
def dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id, include_done=include_done)
@rpc_method()
def inverse_dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(
task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done)
@rpc_method()
def task_list(self, status='', upstream_status='', limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def _first_task_display_name(self, worker):
task_id = worker.info.get('first_task', '')
if self._state.has_task(task_id):
return self._state.get_task(task_id).pretty_id
else:
return task_id
@rpc_method()
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
first_task_display_name=self._first_task_display_name(worker),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
@rpc_method()
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_dict['used']
) for resource, r_dict in six.iteritems(self.resources())]
if self._resources is not None:
consumers = collections.defaultdict(dict)
for task in self._state.get_running_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
consumers[resource][task.id] = self._serialize_task(task.id, False)
for resource in resources:
tasks = consumers[resource['name']]
resource['num_consumer'] = len(tasks)
resource['running'] = tasks
return resources
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in six.iteritems(self._resources):
ret[resource]['total'] = total
if resource in used_resources:
ret[resource]['used'] = used_resources[resource]
else:
ret[resource]['used'] = 0
return ret
@rpc_method()
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
@rpc_method()
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
@rpc_method()
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id}
else:
return {"taskId": task_id, "error": ""}
@rpc_method()
def set_task_status_message(self, task_id, status_message):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
task.status_message = status_message
@rpc_method()
def get_task_status_message(self, task_id):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "statusMessage": task.status_message}
else:
return {"taskId": task_id, "statusMessage": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| 1 | 15,360 | Remove the `config_path=dict(...)`, that is automatic! | spotify-luigi | py |
@@ -23,9 +23,11 @@ function cleanupPlugins(resolve, reject) {
});
});
- axe.utils.toArray(document.querySelectorAll('frame, iframe')).forEach(function (frame) {
+ var flattenedTree = axe.utils.getFlattenedTree(document.body)
+
+ axe.utils.querySelectorAll(flattenedTree, 'iframe, frame').forEach(function (node) {
q.defer(function (res, rej) {
- return axe.utils.sendCommandToFrame(frame, {
+ return axe.utils.sendCommandToFrame(node.actualNode, {
command: 'cleanup-plugin'
}, res, rej);
}); | 1 |
function cleanupPlugins(resolve, reject) {
'use strict';
if (!axe._audit) {
throw new Error('No audit configured');
}
var q = axe.utils.queue();
// If a plugin fails it's cleanup, we still want the others to run
var cleanupErrors = [];
Object.keys(axe.plugins).forEach(function (key) {
q.defer(function (res) {
var rej = function (err) {
cleanupErrors.push(err);
res();
};
try {
axe.plugins[key].cleanup(res, rej);
} catch(err) {
rej(err);
}
});
});
axe.utils.toArray(document.querySelectorAll('frame, iframe')).forEach(function (frame) {
q.defer(function (res, rej) {
return axe.utils.sendCommandToFrame(frame, {
command: 'cleanup-plugin'
}, res, rej);
});
});
q.then(function (results) {
if (cleanupErrors.length === 0) {
resolve(results);
} else {
reject(cleanupErrors);
}
})
.catch(reject);
}
axe.cleanup = cleanupPlugins;
| 1 | 11,508 | I had to create a new flattened tree to support this functionality. The only other place in which a flattened tree is created is during the audit itself. Since `axe.cleanup` can be called at any arbitrary time, we can't reuse the same tree. | dequelabs-axe-core | js |
@@ -52,9 +52,15 @@ public final class ConfigUtil {
private static final String MICROSERVICE_CONFIG_LOADER_KEY = "cse-microservice-config-loader";
+ private static ConfigModel model = new ConfigModel();
+
private ConfigUtil() {
}
+ public static void addConfigs(Map<String, Object> config) {
+ model.setConfig(config);
+ }
+
public static Object getProperty(String key) {
Object config = DynamicPropertyFactory.getBackingConfigurationSource();
return getProperty(config, key); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.config;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_CSE_PREFIX;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_KEY_SPLITER;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_SERVICECOMB_PREFIX;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.EnvironmentConfiguration;
import org.apache.commons.configuration.SystemConfiguration;
import org.apache.servicecomb.config.archaius.scheduler.NeverStartPollingScheduler;
import org.apache.servicecomb.config.archaius.sources.ConfigModel;
import org.apache.servicecomb.config.archaius.sources.MicroserviceConfigLoader;
import org.apache.servicecomb.config.archaius.sources.MicroserviceConfigurationSource;
import org.apache.servicecomb.config.spi.ConfigCenterConfigurationSource;
import org.apache.servicecomb.foundation.common.utils.SPIServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.ConcurrentCompositeConfiguration;
import com.netflix.config.ConcurrentMapConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicConfiguration;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicWatchedConfiguration;
import com.netflix.config.WatchedUpdateListener;
import com.netflix.config.WatchedUpdateResult;
public final class ConfigUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(ConfigUtil.class);
private static final String MICROSERVICE_CONFIG_LOADER_KEY = "cse-microservice-config-loader";
private ConfigUtil() {
}
public static Object getProperty(String key) {
Object config = DynamicPropertyFactory.getBackingConfigurationSource();
return getProperty(config, key);
}
public static Object getProperty(Object config, String key) {
if (null != config && Configuration.class.isInstance(config)) {
Configuration configuration = (Configuration) config;
return configuration.getProperty(key);
}
return null;
}
private static void setMicroserviceConfigLoader(Configuration config, MicroserviceConfigLoader loader) {
config.setProperty(MICROSERVICE_CONFIG_LOADER_KEY, loader);
}
public static MicroserviceConfigLoader getMicroserviceConfigLoader() {
return (MicroserviceConfigLoader) getProperty(MICROSERVICE_CONFIG_LOADER_KEY);
}
public static MicroserviceConfigLoader getMicroserviceConfigLoader(Configuration config) {
return (MicroserviceConfigLoader) getProperty(config, MICROSERVICE_CONFIG_LOADER_KEY);
}
public static ConcurrentCompositeConfiguration createLocalConfig() {
MicroserviceConfigLoader loader = new MicroserviceConfigLoader();
loader.loadAndSort();
LOGGER.info("create local config:");
for (ConfigModel configModel : loader.getConfigModels()) {
LOGGER.info(" {}.", configModel.getUrl());
}
ConcurrentCompositeConfiguration config = ConfigUtil.createLocalConfig(loader.getConfigModels());
ConfigUtil.setMicroserviceConfigLoader(config, loader);
return config;
}
public static ConcurrentCompositeConfiguration createLocalConfig(List<ConfigModel> configModelList) {
ConcurrentCompositeConfiguration config = new ConcurrentCompositeConfiguration();
duplicateServiceCombConfigToCse(config,
new ConcurrentMapConfiguration(new SystemConfiguration()),
"configFromSystem");
duplicateServiceCombConfigToCse(config,
convertEnvVariable(new ConcurrentMapConfiguration(new EnvironmentConfiguration())),
"configFromEnvironment");
duplicateServiceCombConfigToCse(config,
new DynamicConfiguration(
new MicroserviceConfigurationSource(configModelList), new NeverStartPollingScheduler()),
"configFromYamlFile");
return config;
}
public static AbstractConfiguration convertEnvVariable(AbstractConfiguration source) {
Iterator<String> keys = source.getKeys();
while (keys.hasNext()) {
String key = keys.next();
String[] separatedKey = key.split(CONFIG_KEY_SPLITER);
if (separatedKey.length == 1) {
continue;
}
String newKey = String.join(".", separatedKey);
source.addProperty(newKey, source.getProperty(key));
}
return source;
}
//inject a copy of cse.xxx for servicecomb.xxx
private static void duplicateServiceCombConfigToCse(AbstractConfiguration source) {
Iterator<String> keys = source.getKeys();
while (keys.hasNext()) {
String key = keys.next();
if (!key.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
continue;
}
String cseKey = CONFIG_CSE_PREFIX + key.substring(key.indexOf(".") + 1);
source.addProperty(cseKey, source.getProperty(key));
}
}
private static void duplicateServiceCombConfigToCse(ConcurrentCompositeConfiguration compositeConfiguration,
AbstractConfiguration source,
String sourceName) {
duplicateServiceCombConfigToCse(source);
compositeConfiguration.addConfiguration(source, sourceName);
}
public static DynamicWatchedConfiguration createConfigFromConfigCenter(Configuration localConfiguration) {
ConfigCenterConfigurationSource configCenterConfigurationSource =
SPIServiceUtils.getTargetService(ConfigCenterConfigurationSource.class);
if (null == configCenterConfigurationSource) {
LOGGER.info(
"config center SPI service can not find, skip to load configuration from config center");
return null;
}
if (!configCenterConfigurationSource.isValidSource(localConfiguration)) {
LOGGER.info("Config Source serverUri is not correctly configured.");
return null;
}
configCenterConfigurationSource.init(localConfiguration);
return new DynamicWatchedConfiguration(configCenterConfigurationSource);
}
public static AbstractConfiguration createDynamicConfig() {
LOGGER.info("create dynamic config:");
ConcurrentCompositeConfiguration config = ConfigUtil.createLocalConfig();
DynamicWatchedConfiguration configFromConfigCenter = createConfigFromConfigCenter(config);
if (configFromConfigCenter != null) {
ConcurrentMapConfiguration injectConfig = new ConcurrentMapConfiguration();
config.addConfigurationAtFront(injectConfig, "extraInjectConfig");
duplicateServiceCombConfigToCse(configFromConfigCenter);
config.addConfigurationAtFront(configFromConfigCenter, "configCenterConfig");
configFromConfigCenter.getSource().addUpdateListener(new ServiceCombPropertyUpdateListener(injectConfig));
}
return config;
}
public static void installDynamicConfig() {
if (ConfigurationManager.isConfigurationInstalled()) {
LOGGER.warn("Configuration installed by others, will ignore this configuration.");
return;
}
AbstractConfiguration dynamicConfig = ConfigUtil.createDynamicConfig();
ConfigurationManager.install(dynamicConfig);
}
private static class ServiceCombPropertyUpdateListener implements WatchedUpdateListener {
private final ConcurrentMapConfiguration injectConfig;
ServiceCombPropertyUpdateListener(ConcurrentMapConfiguration injectConfig) {
this.injectConfig = injectConfig;
}
@Override
public void updateConfiguration(WatchedUpdateResult watchedUpdateResult) {
Map<String, Object> adds = watchedUpdateResult.getAdded();
if (adds != null) {
for (String add : adds.keySet()) {
if (add.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
String key = CONFIG_CSE_PREFIX + add.substring(add.indexOf(".") + 1);
injectConfig.addProperty(key, adds.get(add));
}
}
}
Map<String, Object> deletes = watchedUpdateResult.getDeleted();
if (deletes != null) {
for (String delete : deletes.keySet()) {
if (delete.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
injectConfig.clearProperty(CONFIG_CSE_PREFIX + delete.substring(delete.indexOf(".") + 1));
}
}
}
Map<String, Object> changes = watchedUpdateResult.getChanged();
if (changes != null) {
for (String change : changes.keySet()) {
if (change.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
String key = CONFIG_CSE_PREFIX + change.substring(change.indexOf(".") + 1);
injectConfig.setProperty(key, changes.get(change));
}
}
}
}
}
}
| 1 | 9,192 | As this method is just set the internal cofigModel, it's better to use setConfigs instead of addConfigs. | apache-servicecomb-java-chassis | java |
@@ -44,7 +44,13 @@ const (
// Timelimits for docker operations enforced above docker
const (
// ListContainersTimeout is the timeout for the ListContainers API.
- ListContainersTimeout = 10 * time.Minute
+ ListContainersTimeout = 10 * time.Minute
+ // LoadImageTimeout is the timeout for the LoadImage API. It's set
+ // to much lower value than pullImageTimeout as it involves loading
+ // image from either a file or STDIN
+ // calls involved.
+ // TODO: Benchmark and re-evaluate this value
+ LoadImageTimeout = 10 * time.Minute
pullImageTimeout = 2 * time.Hour
createContainerTimeout = 4 * time.Minute
startContainerTimeout = 3 * time.Minute | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"archive/tar"
"bufio"
"encoding/json"
"io"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/ecr"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerauth"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/engine/dockeriface"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/cihub/seelog"
docker "github.com/fsouza/go-dockerclient"
)
const (
dockerDefaultTag = "latest"
)
// Timelimits for docker operations enforced above docker
const (
// ListContainersTimeout is the timeout for the ListContainers API.
ListContainersTimeout = 10 * time.Minute
pullImageTimeout = 2 * time.Hour
createContainerTimeout = 4 * time.Minute
startContainerTimeout = 3 * time.Minute
stopContainerTimeout = 30 * time.Second
removeContainerTimeout = 5 * time.Minute
inspectContainerTimeout = 30 * time.Second
removeImageTimeout = 3 * time.Minute
// dockerPullBeginTimeout is the timeout from when a 'pull' is called to when
// we expect to see output on the pull progress stream. This is to work
// around a docker bug which sometimes results in pulls not progressing.
dockerPullBeginTimeout = 5 * time.Minute
// pullStatusSuppressDelay controls the time where pull status progress bar
// output will be suppressed in debug mode
pullStatusSuppressDelay = 2 * time.Second
// StatsInactivityTimeout controls the amount of time we hold open a
// connection to the Docker daemon waiting for stats data
StatsInactivityTimeout = 5 * time.Second
)
// DockerClient interface to make testing it easier
type DockerClient interface {
// SupportedVersions returns a slice of the supported docker versions (or at least supposedly supported).
SupportedVersions() []dockerclient.DockerVersion
// WithVersion returns a new DockerClient for which all operations will use the given remote api version.
// A default version will be used for a client not produced via this method.
WithVersion(dockerclient.DockerVersion) DockerClient
ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error)
PullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata
CreateContainer(*docker.Config, *docker.HostConfig, string, time.Duration) DockerContainerMetadata
StartContainer(string, time.Duration) DockerContainerMetadata
StopContainer(string, time.Duration) DockerContainerMetadata
DescribeContainer(string) (api.ContainerStatus, DockerContainerMetadata)
RemoveContainer(string, time.Duration) error
InspectContainer(string, time.Duration) (*docker.Container, error)
ListContainers(bool, time.Duration) ListContainersResponse
Stats(string, context.Context) (<-chan *docker.Stats, error)
Version() (string, error)
InspectImage(string) (*docker.Image, error)
RemoveImage(string, time.Duration) error
}
// DockerGoClient wraps the underlying go-dockerclient library.
// It exists primarily for the following three purposes:
// 1) Provide an abstraction over inputs and outputs,
// a) Inputs: Trims them down to what we actually need (largely unchanged tbh)
// b) Outputs: Unifies error handling and the common 'start->inspect'
// pattern by having a consistent error output. This error output
// contains error data with a given Name that aims to be presentable as a
// 'reason' in state changes. It also filters out the information about a
// container that is of interest, such as network bindings, while
// ignoring the rest.
// 2) Timeouts: It adds timeouts everywhere, mostly as a reaction to
// pull-related issues in the Docker daemon.
// 3) Versioning: It abstracts over multiple client versions to allow juggling
// appropriately there.
// Implements DockerClient
type dockerGoClient struct {
clientFactory dockerclient.Factory
version dockerclient.DockerVersion
auth dockerauth.DockerAuthProvider
ecrClientFactory ecr.ECRFactory
config *config.Config
_time ttime.Time
_timeOnce sync.Once
}
func (dg *dockerGoClient) WithVersion(version dockerclient.DockerVersion) DockerClient {
return &dockerGoClient{
clientFactory: dg.clientFactory,
version: version,
auth: dg.auth,
config: dg.config,
}
}
// scratchCreateLock guards against multiple 'scratch' image creations at once
var scratchCreateLock sync.Mutex
// NewDockerGoClient creates a new DockerGoClient
func NewDockerGoClient(clientFactory dockerclient.Factory, cfg *config.Config) (DockerClient, error) {
client, err := clientFactory.GetDefaultClient()
if err != nil {
log.Error("Unable to connect to docker daemon. Ensure docker is running.", "err", err)
return nil, err
}
// Even if we have a dockerclient, the daemon might not be running. Ping it
// to ensure it's up.
err = client.Ping()
if err != nil {
log.Error("Unable to ping docker daemon. Ensure docker is running.", "err", err)
return nil, err
}
var dockerAuthData json.RawMessage
if cfg.EngineAuthData != nil {
dockerAuthData = cfg.EngineAuthData.Contents()
}
return &dockerGoClient{
clientFactory: clientFactory,
auth: dockerauth.NewDockerAuthProvider(cfg.EngineAuthType, dockerAuthData),
ecrClientFactory: ecr.NewECRFactory(cfg.AcceptInsecureCert),
config: cfg,
}, nil
}
func (dg *dockerGoClient) dockerClient() (dockeriface.Client, error) {
if dg.version == "" {
return dg.clientFactory.GetDefaultClient()
}
return dg.clientFactory.GetClient(dg.version)
}
func (dg *dockerGoClient) time() ttime.Time {
dg._timeOnce.Do(func() {
if dg._time == nil {
dg._time = &ttime.DefaultTime{}
}
})
return dg._time
}
func (dg *dockerGoClient) PullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata {
timeout := dg.time().After(pullImageTimeout)
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.pullImage(image, authData) }()
select {
case resp := <-response:
return resp
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{pullImageTimeout, "pulled"}}
}
}
func (dg *dockerGoClient) pullImage(image string, authData *api.RegistryAuthenticationData) DockerContainerMetadata {
log.Debug("Pulling image", "image", image)
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
// Special case; this image is not one that should be pulled, but rather
// should be created locally if necessary
if image == emptyvolume.Image+":"+emptyvolume.Tag {
scratchErr := dg.createScratchImageIfNotExists()
if scratchErr != nil {
return DockerContainerMetadata{Error: &api.DefaultNamedError{Name: "CreateEmptyVolumeError", Err: "Could not create empty volume " + scratchErr.Error()}}
}
return DockerContainerMetadata{}
}
authConfig, err := dg.getAuthdata(image, authData)
if err != nil {
return DockerContainerMetadata{Error: CannotPullContainerError{err}}
}
pullDebugOut, pullWriter := io.Pipe()
defer pullWriter.Close()
repository, tag := parseRepositoryTag(image)
if tag == "" {
repository = repository + ":" + dockerDefaultTag
} else {
repository = image
}
opts := docker.PullImageOptions{
Repository: repository,
OutputStream: pullWriter,
}
timeout := dg.time().After(dockerPullBeginTimeout)
// pullBegan is a channel indicating that we have seen at least one line of data on the 'OutputStream' above.
// It is here to guard against a bug wherin docker never writes anything to that channel and hangs in pulling forever.
pullBegan := make(chan bool, 1)
// pullBeganOnce ensures we only indicate it began once (since our channel will only be read 0 or 1 times)
pullBeganOnce := sync.Once{}
go func() {
reader := bufio.NewReader(pullDebugOut)
var line string
var pullErr error
var statusDisplayed time.Time
for pullErr == nil {
line, pullErr = reader.ReadString('\n')
if pullErr != nil {
break
}
pullBeganOnce.Do(func() {
pullBegan <- true
})
now := time.Now()
if !strings.Contains(line, "[=") || now.After(statusDisplayed.Add(pullStatusSuppressDelay)) {
// skip most of the progress bar lines, but retain enough for debugging
log.Debug("Pulling image", "image", image, "status", line)
statusDisplayed = now
}
if strings.Contains(line, "already being pulled by another client. Waiting.") {
// This can mean the daemon is 'hung' in pulling status for this image, but we can't be sure.
log.Error("Image 'pull' status marked as already being pulled", "image", image, "status", line)
}
}
if pullErr != nil && pullErr != io.EOF {
log.Warn("Error reading pull image status", "image", image, "err", pullErr)
}
}()
pullFinished := make(chan error, 1)
go func() {
pullFinished <- client.PullImage(opts, authConfig)
log.Debug("Pulling image complete", "image", image)
}()
select {
case <-pullBegan:
break
case pullErr := <-pullFinished:
if pullErr != nil {
return DockerContainerMetadata{Error: CannotPullContainerError{pullErr}}
}
return DockerContainerMetadata{}
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{dockerPullBeginTimeout, "pullBegin"}}
}
log.Debug("Pull began for image", "image", image)
defer log.Debug("Pull completed for image", "image", image)
err = <-pullFinished
if err != nil {
return DockerContainerMetadata{Error: CannotPullContainerError{err}}
}
return DockerContainerMetadata{}
}
func (dg *dockerGoClient) createScratchImageIfNotExists() error {
client, err := dg.dockerClient()
if err != nil {
return err
}
scratchCreateLock.Lock()
defer scratchCreateLock.Unlock()
_, err = client.InspectImage(emptyvolume.Image + ":" + emptyvolume.Tag)
if err == nil {
// Already exists; assume that it's okay to use it
return nil
}
reader, writer := io.Pipe()
emptytarball := tar.NewWriter(writer)
go func() {
emptytarball.Close()
writer.Close()
}()
// Create it from an empty tarball
err = client.ImportImage(docker.ImportImageOptions{
Repository: emptyvolume.Image,
Tag: emptyvolume.Tag,
Source: "-",
InputStream: reader,
})
return err
}
func (dg *dockerGoClient) InspectImage(image string) (*docker.Image, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
return client.InspectImage(image)
}
func (dg *dockerGoClient) getAuthdata(image string, authData *api.RegistryAuthenticationData) (docker.AuthConfiguration, error) {
if authData == nil || authData.Type != "ecr" {
return dg.auth.GetAuthconfig(image)
}
provider := dockerauth.NewECRAuthProvider(authData.ECRAuthData, dg.ecrClientFactory)
authConfig, err := provider.GetAuthconfig(image)
if err != nil {
return authConfig, CannotPullECRContainerError{err}
}
return authConfig, nil
}
func (dg *dockerGoClient) CreateContainer(config *docker.Config, hostConfig *docker.HostConfig, name string, timeout time.Duration) DockerContainerMetadata {
// Create a context that times out after the 'timeout' duration
// This is defined by the const 'createContainerTimeout'. Injecting the 'timeout'
// makes it easier to write tests.
// Eventually, the context should be initialized from a parent root context
// instead of TODO.
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.createContainer(ctx, config, hostConfig, name) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "created"}}
}
// Context was canceled even though there was no timeout. Send
// back an error.
return DockerContainerMetadata{Error: &CannotCreateContainerError{err}}
}
}
func (dg *dockerGoClient) createContainer(ctx context.Context, config *docker.Config, hostConfig *docker.HostConfig, name string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
containerOptions := docker.CreateContainerOptions{
Config: config,
HostConfig: hostConfig,
Name: name,
Context: ctx,
}
dockerContainer, err := client.CreateContainer(containerOptions)
if err != nil {
return DockerContainerMetadata{Error: CannotCreateContainerError{err}}
}
return dg.containerMetadata(dockerContainer.ID)
}
func (dg *dockerGoClient) StartContainer(id string, timeout time.Duration) DockerContainerMetadata {
// Create a context that times out after the 'timeout' duration
// This is defined by the const 'startContainerTimeout'. Injecting the 'timeout'
// makes it easier to write tests.
// Eventually, the context should be initialized from a parent root context
// instead of TODO.
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.startContainer(ctx, id) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "started"}}
}
return DockerContainerMetadata{Error: CannotStartContainerError{err}}
}
}
func (dg *dockerGoClient) startContainer(ctx context.Context, id string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
err = client.StartContainerWithContext(id, nil, ctx)
metadata := dg.containerMetadata(id)
if err != nil {
metadata.Error = CannotStartContainerError{err}
}
return metadata
}
func dockerStateToState(state docker.State) api.ContainerStatus {
if state.Running {
return api.ContainerRunning
}
return api.ContainerStopped
}
func (dg *dockerGoClient) DescribeContainer(dockerID string) (api.ContainerStatus, DockerContainerMetadata) {
dockerContainer, err := dg.InspectContainer(dockerID, inspectContainerTimeout)
if err != nil {
return api.ContainerStatusNone, DockerContainerMetadata{Error: CannotDescribeContainerError{err}}
}
return dockerStateToState(dockerContainer.State), metadataFromContainer(dockerContainer)
}
func (dg *dockerGoClient) InspectContainer(dockerID string, timeout time.Duration) (*docker.Container, error) {
type inspectResponse struct {
container *docker.Container
err error
}
// Create a context that times out after the 'timeout' duration
// This is defined by the const 'inspectContainerTimeout'. Injecting the 'timeout'
// makes it easier to write tests.
// Eventually, the context should be initialized from a parent root context
// instead of TODO.
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan inspectResponse, 1)
go func() {
container, err := dg.inspectContainer(dockerID, ctx)
response <- inspectResponse{container, err}
}()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp.container, resp.err
case <-ctx.Done():
err := ctx.Err()
if err == context.DeadlineExceeded {
return nil, &DockerTimeoutError{timeout, "inspecting"}
}
return nil, &CannotInspectContainerError{err}
}
}
func (dg *dockerGoClient) inspectContainer(dockerID string, ctx context.Context) (*docker.Container, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
return client.InspectContainerWithContext(dockerID, ctx)
}
func (dg *dockerGoClient) StopContainer(dockerID string, timeout time.Duration) DockerContainerMetadata {
timeout = timeout + dg.config.DockerStopTimeout
// Create a context that times out after the 'timeout' duration
// This is defined by the const 'stopContainerTimeout' and the
// 'DockerStopTimeout' in the config. Injecting the 'timeout'
// makes it easier to write tests.
// Eventually, the context should be initialized from a parent root context
// instead of TODO.
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.stopContainer(ctx, dockerID) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "stopped"}}
}
return DockerContainerMetadata{Error: &CannotStopContainerError{err}}
}
}
func (dg *dockerGoClient) stopContainer(ctx context.Context, dockerID string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
err = client.StopContainerWithContext(dockerID, uint(dg.config.DockerStopTimeout/time.Second), ctx)
metadata := dg.containerMetadata(dockerID)
if err != nil {
log.Debug("Error stopping container", "err", err, "id", dockerID)
if metadata.Error == nil {
metadata.Error = CannotStopContainerError{err}
}
}
return metadata
}
func (dg *dockerGoClient) RemoveContainer(dockerID string, timeout time.Duration) error {
// Remove a context that times out after the 'timeout' duration
// This is defined by 'removeContainerTimeout'. 'timeout' makes it
// easier to write tests
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan error, 1)
go func() { response <- dg.removeContainer(dockerID, ctx) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
err := ctx.Err()
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
if err == context.DeadlineExceeded {
return &DockerTimeoutError{removeContainerTimeout, "removing"}
}
return &CannotRemoveContainerError{err}
}
}
func (dg *dockerGoClient) removeContainer(dockerID string, ctx context.Context) error {
client, err := dg.dockerClient()
if err != nil {
return err
}
return client.RemoveContainer(docker.RemoveContainerOptions{
ID: dockerID,
RemoveVolumes: true,
Force: false,
Context: ctx,
})
}
func (dg *dockerGoClient) containerMetadata(id string) DockerContainerMetadata {
dockerContainer, err := dg.InspectContainer(id, inspectContainerTimeout)
if err != nil {
return DockerContainerMetadata{DockerID: id, Error: CannotInspectContainerError{err}}
}
return metadataFromContainer(dockerContainer)
}
func metadataFromContainer(dockerContainer *docker.Container) DockerContainerMetadata {
var bindings []api.PortBinding
var err api.NamedError
if dockerContainer.NetworkSettings != nil {
// Convert port bindings into the format our container expects
bindings, err = api.PortBindingFromDockerPortBinding(dockerContainer.NetworkSettings.Ports)
if err != nil {
log.Crit("Docker had network bindings we couldn't understand", "err", err)
return DockerContainerMetadata{Error: api.NamedError(err)}
}
}
metadata := DockerContainerMetadata{
DockerID: dockerContainer.ID,
PortBindings: bindings,
Volumes: dockerContainer.Volumes,
}
// Workaround for https://github.com/docker/docker/issues/27601
// See https://github.com/docker/docker/blob/v1.12.2/daemon/inspect_unix.go#L38-L43
// for how Docker handles API compatibility on Linux
if len(metadata.Volumes) == 0 {
metadata.Volumes = make(map[string]string)
for _, m := range dockerContainer.Mounts {
metadata.Volumes[m.Destination] = m.Source
}
}
if !dockerContainer.State.Running && !dockerContainer.State.FinishedAt.IsZero() {
// Only record an exitcode if it has exited
metadata.ExitCode = &dockerContainer.State.ExitCode
}
if dockerContainer.State.Error != "" {
metadata.Error = NewDockerStateError(dockerContainer.State.Error)
}
if dockerContainer.State.OOMKilled {
metadata.Error = OutOfMemoryError{}
}
return metadata
}
// Listen to the docker event stream for container changes and pass them up
func (dg *dockerGoClient) ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
events := make(chan *docker.APIEvents)
err = client.AddEventListener(events)
if err != nil {
log.Error("Unable to add a docker event listener", "err", err)
return nil, err
}
go func() {
<-ctx.Done()
client.RemoveEventListener(events)
}()
changedContainers := make(chan DockerContainerChangeEvent)
go func() {
for event := range events {
// currently only container events type needs to be handled
if event.Type != "container" || event.ID == "" {
continue
}
containerID := event.ID
log.Debug("Got event from docker daemon", "event", event)
var status api.ContainerStatus
switch event.Status {
case "create":
status = api.ContainerCreated
case "start":
status = api.ContainerRunning
case "stop":
fallthrough
case "die":
status = api.ContainerStopped
case "kill":
fallthrough
case "rename":
// TODO, ensure this wasn't one of our containers. This isn't critical
// because we typically have the docker id stored too and a wrong name
// won't be fatal once we do
continue
case "restart":
case "resize":
case "destroy":
case "unpause":
// These result in us falling through to inspect the container, some
// out of caution, some because it's a form of state change
case "oom":
seelog.Infof("process within container %v died due to OOM", event.ID)
// "oom" can either means any process got OOM'd, but doesn't always
// mean the container dies (non-init processes). If the container also
// dies, you see a "die" status as well; we'll update suitably there
fallthrough
case "pause":
// non image events that aren't of interest currently
fallthrough
case "exec_create":
fallthrough
case "exec_start":
fallthrough
case "top":
fallthrough
case "attach":
fallthrough
// image events
case "export":
fallthrough
case "pull":
fallthrough
case "push":
fallthrough
case "tag":
fallthrough
case "untag":
fallthrough
case "import":
fallthrough
case "delete":
// No interest in image events
continue
default:
if strings.HasPrefix(event.Status, "exec_create:") || strings.HasPrefix(event.Status, "exec_start:") {
continue
}
// Because docker emits new events even when you use an old event api
// version, it's not that big a deal
seelog.Debugf("Unknown status event from docker: %s", event.Status)
}
metadata := dg.containerMetadata(containerID)
changedContainers <- DockerContainerChangeEvent{
Status: status,
DockerContainerMetadata: metadata,
}
}
}()
return changedContainers, nil
}
// ListContainers returns a slice of container IDs.
func (dg *dockerGoClient) ListContainers(all bool, timeout time.Duration) ListContainersResponse {
// Create a context that times out after the 'timeout' duration
// This is defined by the const 'listContainersTimeout'. Injecting the 'timeout'
// makes it easier to write tests.
// Eventually, the context should be initialized from a parent root context
// instead of TODO.
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan ListContainersResponse, 1)
go func() { response <- dg.listContainers(all, ctx) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return ListContainersResponse{Error: &DockerTimeoutError{timeout, "listing"}}
}
return ListContainersResponse{Error: &CannotListContainersError{err}}
}
}
func (dg *dockerGoClient) listContainers(all bool, ctx context.Context) ListContainersResponse {
client, err := dg.dockerClient()
if err != nil {
return ListContainersResponse{Error: err}
}
containers, err := client.ListContainers(docker.ListContainersOptions{
All: all,
Context: ctx,
})
if err != nil {
return ListContainersResponse{Error: err}
}
// We get an empty slice if there are no containers to be listed.
// Extract container IDs from this list.
containerIDs := make([]string, len(containers))
for i, container := range containers {
containerIDs[i] = container.ID
}
return ListContainersResponse{DockerIDs: containerIDs, Error: nil}
}
func (dg *dockerGoClient) SupportedVersions() []dockerclient.DockerVersion {
return dg.clientFactory.FindAvailableVersions()
}
func (dg *dockerGoClient) Version() (string, error) {
client, err := dg.dockerClient()
if err != nil {
return "", err
}
info, err := client.Version()
if err != nil {
return "", err
}
return info.Get("Version"), nil
}
// Stats returns a channel of *docker.Stats entries for the container.
func (dg *dockerGoClient) Stats(id string, ctx context.Context) (<-chan *docker.Stats, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
stats := make(chan *docker.Stats)
options := docker.StatsOptions{
ID: id,
Stats: stats,
Stream: true,
Context: ctx,
InactivityTimeout: StatsInactivityTimeout,
}
go func() {
statsErr := client.Stats(options)
if statsErr != nil {
seelog.Infof("Error retrieving stats for container %s: %v", id, statsErr)
}
}()
return stats, nil
}
func (dg *dockerGoClient) RemoveImage(imageName string, imageRemovalTimeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), imageRemovalTimeout)
defer cancel()
response := make(chan error, 1)
go func() { response <- dg.removeImage(imageName) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
return &DockerTimeoutError{imageRemovalTimeout, "removing image"}
}
}
func (dg *dockerGoClient) removeImage(imageName string) error {
client, err := dg.dockerClient()
if err != nil {
return err
}
return client.RemoveImage(imageName)
}
| 1 | 15,539 | Is this line needed? | aws-amazon-ecs-agent | go |
@@ -20,6 +20,8 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/google/knative-gcp/pkg/apis/events/v1alpha1"
+ "github.com/google/knative-gcp/pkg/apis/events/v1beta1"
"net/url"
"os"
"time" | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helpers
import (
"context"
"encoding/json"
"fmt"
"net/url"
"os"
"time"
"cloud.google.com/go/pubsub"
v1 "k8s.io/api/core/v1"
eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
eventingtestlib "knative.dev/eventing/test/lib"
eventingtestresources "knative.dev/eventing/test/lib/resources"
"knative.dev/pkg/apis"
"knative.dev/pkg/test/helpers"
// The following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
kngcptesting "github.com/google/knative-gcp/pkg/reconciler/testing"
"github.com/google/knative-gcp/test/e2e/lib"
"github.com/google/knative-gcp/test/e2e/lib/resources"
)
/*
BrokerEventTransformationTestHelper provides the helper methods which test the following scenario:
5 4
------------------ --------------------
| | | |
1 v 2 | v 3 |
(Sender) ---> Broker ---> dummyTrigger -------> Knative Service(Receiver)
|
| 6 7
|-------> respTrigger -------> Service(Target)
Note: the number denotes the sequence of the event that flows in this test case.
*/
func BrokerEventTransformationTestHelper(client *lib.Client, brokerURL url.URL, brokerName string) {
client.T.Helper()
senderName := helpers.AppendRandomString("sender")
targetName := helpers.AppendRandomString("target")
// Create a target Job to receive the events.
makeTargetJobOrDie(client, targetName)
createTriggersAndKService(client, brokerName, targetName)
// Just to make sure all resources are ready.
time.Sleep(5 * time.Second)
// Create a sender Job to sender the event.
senderJob := resources.SenderJob(senderName, []v1.EnvVar{{
Name: "BROKER_URL",
Value: brokerURL.String(),
}})
client.CreateJobOrFail(senderJob)
// Check if dummy CloudEvent is sent out.
if done := jobDone(client, senderName); !done {
client.T.Error("dummy event wasn't sent to broker")
client.T.Failed()
}
// Check if resp CloudEvent hits the target Service.
if done := jobDone(client, targetName); !done {
client.T.Error("resp event didn't hit the target pod")
client.T.Failed()
}
}
func BrokerEventTransformationTestWithPubSubSourceHelper(client *lib.Client, authConfig lib.AuthConfig, brokerURL url.URL, brokerName string) {
client.T.Helper()
topicName, deleteTopic := lib.MakeTopicOrDie(client.T)
defer deleteTopic()
psName := helpers.AppendRandomString(topicName + "-pubsub")
targetName := helpers.AppendRandomString(topicName + "-target")
// Create a target Job to receive the events.
makeTargetJobOrDie(client, targetName)
createTriggersAndKService(client, brokerName, targetName)
var url apis.URL = apis.URL(brokerURL)
// Just to make sure all resources are ready.
time.Sleep(5 * time.Second)
// Create the PubSub source.
lib.MakePubSubOrDie(client,
lib.ServiceGVK,
psName,
targetName,
topicName,
authConfig.PubsubServiceAccount,
kngcptesting.WithCloudPubSubSourceSinkURI(&url),
)
topic := lib.GetTopic(client.T, topicName)
r := topic.Publish(context.TODO(), &pubsub.Message{
Attributes: map[string]string{
"target": "falldown",
},
Data: []byte(`{"foo":bar}`),
})
_, err := r.Get(context.TODO())
if err != nil {
client.T.Logf("%s", err)
}
// Check if resp CloudEvent hits the target Service.
if done := jobDone(client, targetName); !done {
client.T.Error("resp event didn't hit the target pod")
client.T.Failed()
}
}
func BrokerEventTransformationTestWithStorageSourceHelper(client *lib.Client, authConfig lib.AuthConfig, brokerURL url.URL, brokerName string) {
client.T.Helper()
ctx := context.Background()
project := os.Getenv(lib.ProwProjectKey)
bucketName := lib.MakeBucket(ctx, client.T, project)
storageName := helpers.AppendRandomString(bucketName + "-storage")
targetName := helpers.AppendRandomString(bucketName + "-target")
fileName := helpers.AppendRandomString("test-file-for-storage")
// Create a target StorageJob to receive the events.
lib.MakeStorageJobOrDie(client, fileName, targetName)
createTriggersAndKService(client, brokerName, targetName)
var url apis.URL = apis.URL(brokerURL)
// Just to make sure all resources are ready.
time.Sleep(5 * time.Second)
// Create the Storage source.
lib.MakeStorageOrDie(
client,
bucketName,
storageName,
targetName,
authConfig.PubsubServiceAccount,
kngcptesting.WithCloudStorageSourceSinkURI(&url),
)
// Add a random name file in the bucket
lib.AddRandomFile(ctx, client.T, bucketName, fileName, project)
// Check if resp CloudEvent hits the target Service.
if done := jobDone(client, targetName); !done {
client.T.Error("resp event didn't hit the target pod")
}
}
func BrokerEventTransformationTestWithAuditLogsSourceHelper(client *lib.Client, authConfig lib.AuthConfig, brokerURL url.URL, brokerName string) {
client.T.Helper()
project := os.Getenv(lib.ProwProjectKey)
auditlogsName := helpers.AppendRandomString("auditlogs-e2e-test")
targetName := helpers.AppendRandomString(auditlogsName + "-target")
topicName := helpers.AppendRandomString(auditlogsName + "-topic")
resourceName := fmt.Sprintf("projects/%s/topics/%s", project, topicName)
// Create a target Job to receive the events.
lib.MakeAuditLogsJobOrDie(client, lib.PubSubCreateTopicMethodName, project, resourceName, lib.PubSubServiceName, targetName)
createTriggersAndKService(client, brokerName, targetName)
var url apis.URL = apis.URL(brokerURL)
// Just to make sure all resources are ready.
time.Sleep(5 * time.Second)
// Create the CloudAuditLogsSource.
lib.MakeAuditLogsOrDie(client,
auditlogsName,
lib.PubSubCreateTopicMethodName,
project,
resourceName,
lib.PubSubServiceName,
targetName,
authConfig.PubsubServiceAccount,
kngcptesting.WithCloudAuditLogsSourceSinkURI(&url),
)
client.Core.WaitForResourceReadyOrFail(auditlogsName, lib.CloudAuditLogsSourceTypeMeta)
// Audit logs source misses the topic which gets created shortly after the source becomes ready. Need to wait for a few seconds.
// Tried with 45 seconds but the test has been quite flaky.
time.Sleep(90 * time.Second)
topicName, deleteTopic := lib.MakeTopicWithNameOrDie(client.T, topicName)
defer deleteTopic()
// Check if resp CloudEvent hits the target Service.
if done := jobDone(client, targetName); !done {
client.T.Error("resp event didn't hit the target pod")
client.T.Failed()
}
}
func BrokerEventTransformationTestWithSchedulerSourceHelper(client *lib.Client, authConfig lib.AuthConfig, brokerURL url.URL, brokerName string) {
client.T.Helper()
data := "my test data"
targetName := "event-display"
sName := "scheduler-test"
// Create a target Job to receive the events.
lib.MakeSchedulerJobOrDie(client, data, targetName)
createTriggersAndKService(client, brokerName, targetName)
var url apis.URL = apis.URL(brokerURL)
// Just to make sure all resources are ready.
time.Sleep(5 * time.Second)
// Create the CloudSchedulerSource.
lib.MakeSchedulerOrDie(client, sName, data, targetName, authConfig.PubsubServiceAccount,
kngcptesting.WithCloudSchedulerSourceSinkURI(&url),
)
// Check if resp CloudEvent hits the target Service.
if done := jobDone(client, targetName); !done {
client.T.Error("resp event didn't hit the target pod")
client.T.Failed()
}
}
func CreateKService(client *lib.Client) string {
client.T.Helper()
kserviceName := helpers.AppendRandomString("kservice")
// Create the Knative Service.
kservice := resources.ReceiverKService(
kserviceName, client.Namespace)
client.CreateUnstructuredObjOrFail(kservice)
return kserviceName
}
func createTriggerWithKServiceSubscriber(client *lib.Client, brokerName, kserviceName string) {
client.T.Helper()
// Please refer to the graph in the file to check what dummy trigger is used for.
dummyTriggerName := "dummy-broker-" + brokerName
client.Core.CreateTriggerOrFail(
dummyTriggerName,
eventingtestresources.WithBroker(brokerName),
eventingtestresources.WithAttributesTriggerFilter(
eventingv1alpha1.TriggerAnyFilter, eventingv1alpha1.TriggerAnyFilter,
map[string]interface{}{"type": "e2e-testing-dummy"}),
eventingtestresources.WithSubscriberServiceRefForTrigger(kserviceName),
)
}
func createTriggerWithTargetServiceSubscriber(client *lib.Client, brokerName, targetName string) {
client.T.Helper()
respTriggerName := "resp-broker-" + brokerName
client.Core.CreateTriggerOrFail(
respTriggerName,
eventingtestresources.WithBroker(brokerName),
eventingtestresources.WithAttributesTriggerFilter(
eventingv1alpha1.TriggerAnyFilter, eventingv1alpha1.TriggerAnyFilter,
map[string]interface{}{"type": "e2e-testing-resp"}),
eventingtestresources.WithSubscriberServiceRefForTrigger(targetName),
)
}
func createTriggersAndKService(client *lib.Client, brokerName, targetName string) {
client.T.Helper()
// Create the Knative Service.
kserviceName := CreateKService(client)
// Create a Trigger with the Knative Service subscriber.
createTriggerWithKServiceSubscriber(client, brokerName, kserviceName)
// Create a Trigger with the target Service subscriber.
createTriggerWithTargetServiceSubscriber(client, brokerName, targetName)
// Wait for ksvc, trigger ready.
client.Core.WaitForResourceReadyOrFail(kserviceName, lib.KsvcTypeMeta)
client.Core.WaitForResourcesReadyOrFail(eventingtestlib.TriggerTypeMeta)
}
func makeTargetJobOrDie(client *lib.Client, targetName string) {
client.T.Helper()
job := resources.TargetJob(targetName, []v1.EnvVar{{
Name: "TARGET",
Value: "falldown",
}})
client.CreateJobOrFail(job, lib.WithServiceForJob(targetName))
}
func jobDone(client *lib.Client, podName string) bool {
client.T.Helper()
msg, err := client.WaitUntilJobDone(client.Namespace, podName)
if err != nil {
client.T.Error(err)
return false
}
if msg == "" {
client.T.Error("No terminating message from the pod")
return false
}
out := &lib.TargetOutput{}
if err := json.Unmarshal([]byte(msg), out); err != nil {
client.T.Error(err)
return false
}
if !out.Success {
if logs, err := client.LogsFor(client.Namespace, podName, lib.JobTypeMeta); err != nil {
client.T.Error(err)
} else {
client.T.Logf("job: %s\n", logs)
}
return false
}
return true
}
| 1 | 14,063 | nit: import formatter | google-knative-gcp | go |
@@ -118,9 +118,18 @@ namespace AutoRest.Swagger
headerType = null;
}
+
// Response format
List<Stack<IModelType>> typesList = BuildResponses(method, headerType);
+ // if each of the responses have explicitly set their nullable property to false, we will have to consider it
+ bool isNonNullableReturnType = true;
+ _operation.Responses.Keys.ForEach(key =>
+ {
+ isNonNullableReturnType = isNonNullableReturnType && !(_operation.Responses[key].Extensions?.Get<bool>("x-nullable") ?? true);
+ });
+ method.IsXNullableReturnType = !(isNonNullableReturnType);
+
method.ReturnType = BuildMethodReturnType(typesList, headerType);
if (method.Responses.Count == 0)
{ | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.Linq;
using System.Net;
using System.Text;
using AutoRest.Core.Model;
using AutoRest.Core.Logging;
using AutoRest.Core.Utilities;
using AutoRest.Swagger.Model;
using AutoRest.Swagger.Properties;
using ParameterLocation = AutoRest.Swagger.Model.ParameterLocation;
using static AutoRest.Core.Utilities.DependencyInjection;
namespace AutoRest.Swagger
{
/// <summary>
/// The builder for building swagger operations into client model methods.
/// </summary>
public class OperationBuilder
{
private IList<string> _effectiveProduces;
private IList<string> _effectiveConsumes;
private SwaggerModeler _swaggerModeler;
private Operation _operation;
private const string APP_JSON_MIME = "application/json";
public OperationBuilder(Operation operation, SwaggerModeler swaggerModeler)
{
if (operation == null)
{
throw new ArgumentNullException("operation");
}
if (swaggerModeler == null)
{
throw new ArgumentNullException("swaggerModeler");
}
this._operation = operation;
this._swaggerModeler = swaggerModeler;
this._effectiveProduces = operation.Produces.Any() ? operation.Produces : swaggerModeler.ServiceDefinition.Produces;
this._effectiveConsumes = operation.Consumes.Any() ? operation.Consumes : swaggerModeler.ServiceDefinition.Consumes;
}
public Method BuildMethod(HttpMethod httpMethod, string url, string methodName, string methodGroup)
{
EnsureUniqueMethodName(methodName, methodGroup);
var method = New<Method>(new
{
HttpMethod = httpMethod,
Url = url,
Name = methodName,
SerializedName = _operation.OperationId
});
method.RequestContentType = _effectiveConsumes.FirstOrDefault() ?? APP_JSON_MIME;
string produce = _effectiveConsumes.FirstOrDefault(s => s.StartsWith(APP_JSON_MIME, StringComparison.OrdinalIgnoreCase));
if (!string.IsNullOrEmpty(produce))
{
method.RequestContentType = produce;
}
if (method.RequestContentType.StartsWith(APP_JSON_MIME, StringComparison.OrdinalIgnoreCase) &&
method.RequestContentType.IndexOf("charset=", StringComparison.OrdinalIgnoreCase) == -1)
{
// Enable UTF-8 charset
method.RequestContentType += "; charset=utf-8";
}
method.Description = _operation.Description;
method.Summary = _operation.Summary;
method.ExternalDocsUrl = _operation.ExternalDocs?.Url;
method.Deprecated = _operation.Deprecated;
// Service parameters
if (_operation.Parameters != null)
{
BuildMethodParameters(method);
}
// Build header object
var responseHeaders = new Dictionary<string, Header>();
foreach (var response in _operation.Responses.Values)
{
if (response.Headers != null)
{
response.Headers.ForEach(h => responseHeaders[h.Key] = h.Value);
}
}
var headerTypeName = string.Format(CultureInfo.InvariantCulture,
"{0}-{1}-Headers", methodGroup, methodName).Trim('-');
var headerType = New<CompositeType>(headerTypeName,new
{
SerializedName = headerTypeName,
Documentation = string.Format(CultureInfo.InvariantCulture, "Defines headers for {0} operation.", methodName)
});
responseHeaders.ForEach(h =>
{
var property = New<Property>(new
{
Name = h.Key,
SerializedName = h.Key,
ModelType = h.Value.GetBuilder(this._swaggerModeler).BuildServiceType(h.Key),
Documentation = h.Value.Description
});
headerType.Add(property);
});
if (!headerType.Properties.Any())
{
headerType = null;
}
// Response format
List<Stack<IModelType>> typesList = BuildResponses(method, headerType);
method.ReturnType = BuildMethodReturnType(typesList, headerType);
if (method.Responses.Count == 0)
{
method.ReturnType = method.DefaultResponse;
}
if (method.ReturnType.Headers != null)
{
_swaggerModeler.CodeModel.AddHeader(method.ReturnType.Headers as CompositeType);
}
// Copy extensions
_operation.Extensions.ForEach(extention => method.Extensions.Add(extention.Key, extention.Value));
return method;
}
private static IEnumerable<SwaggerParameter> DeduplicateParameters(IEnumerable<SwaggerParameter> parameters)
{
return parameters
.Select(s =>
{
// if parameter with the same name exists in Body and Path/Query then we need to give it a unique name
if (s.In == ParameterLocation.Body)
{
string newName = s.Name;
while (parameters.Any(t => t.In != ParameterLocation.Body &&
string.Equals(t.Name, newName,
StringComparison.OrdinalIgnoreCase)))
{
newName += "Body";
}
s.Name = newName;
}
// if parameter with same name exists in Query and Path, make Query one required
if (s.In == ParameterLocation.Query &&
parameters.Any(t => t.In == ParameterLocation.Path &&
t.Name.EqualsIgnoreCase(s.Name)))
{
s.IsRequired = true;
}
return s;
});
}
private static void BuildMethodReturnTypeStack(IModelType type, List<Stack<IModelType>> types)
{
var typeStack = new Stack<IModelType>();
typeStack.Push(type);
types.Add(typeStack);
}
private void BuildMethodParameters(Method method)
{
foreach (var swaggerParameter in DeduplicateParameters(_operation.Parameters))
{
var parameter = ((ParameterBuilder)swaggerParameter.GetBuilder(_swaggerModeler)).Build();
method.Add(parameter);
StringBuilder parameterName = new StringBuilder(parameter.Name);
parameterName = CollectionFormatBuilder.OnBuildMethodParameter(method, swaggerParameter,
parameterName);
if (swaggerParameter.In == ParameterLocation.Header)
{
method.RequestHeaders[swaggerParameter.Name] =
string.Format(CultureInfo.InvariantCulture, "{{{0}}}", parameterName);
}
}
}
private List<Stack<IModelType>> BuildResponses(Method method, CompositeType headerType)
{
string methodName = method.Name;
var typesList = new List<Stack<IModelType>>();
foreach (var response in _operation.Responses)
{
if (response.Key.EqualsIgnoreCase("default"))
{
TryBuildDefaultResponse(methodName, response.Value, method, headerType);
}
else
{
if (
!(TryBuildResponse(methodName, response.Key.ToHttpStatusCode(), response.Value, method,
typesList, headerType) ||
TryBuildStreamResponse(response.Key.ToHttpStatusCode(), response.Value, method, typesList, headerType) ||
TryBuildEmptyResponse(methodName, response.Key.ToHttpStatusCode(), response.Value, method,
typesList, headerType)))
{
throw new InvalidOperationException(
string.Format(CultureInfo.InvariantCulture,
Resources.UnsupportedMimeTypeForResponseBody,
methodName,
response.Key));
}
}
}
return typesList;
}
private Response BuildMethodReturnType(List<Stack<IModelType>> types, IModelType headerType)
{
IModelType baseType = New<PrimaryType>(KnownPrimaryType.Object);
// Return null if no response is specified
if (types.Count == 0)
{
return new Response(null, headerType);
}
// Return first if only one return type
if (types.Count == 1)
{
return new Response(types.First().Pop(), headerType);
}
// BuildParameter up type inheritance tree
types.ForEach(typeStack =>
{
IModelType type = typeStack.Peek();
while (!Equals(type, baseType))
{
if (type is CompositeType && _swaggerModeler.ExtendedTypes.ContainsKey(type.Name.RawValue))
{
type = _swaggerModeler.GeneratedTypes[_swaggerModeler.ExtendedTypes[type.Name.RawValue]];
}
else
{
type = baseType;
}
typeStack.Push(type);
}
});
// Eliminate commonly shared base classes
while (!types.First().IsNullOrEmpty())
{
IModelType currentType = types.First().Peek();
foreach (var typeStack in types)
{
IModelType t = typeStack.Pop();
if (!t.StructurallyEquals(currentType))
{
return new Response(baseType, headerType);
}
}
baseType = currentType;
}
return new Response(baseType, headerType);
}
private bool TryBuildStreamResponse(HttpStatusCode responseStatusCode, OperationResponse response,
Method method, List<Stack<IModelType>> types, IModelType headerType)
{
bool handled = false;
if (SwaggerOperationProducesNotEmpty())
{
if (response.Schema != null)
{
IModelType serviceType = response.Schema.GetBuilder(_swaggerModeler)
.BuildServiceType(response.Schema.Reference.StripDefinitionPath());
Debug.Assert(serviceType != null);
BuildMethodReturnTypeStack(serviceType, types);
var compositeType = serviceType as CompositeType;
if (compositeType != null)
{
VerifyFirstPropertyIsByteArray(compositeType);
}
method.Responses[responseStatusCode] = new Response(serviceType, headerType);
handled = true;
}
}
return handled;
}
private void VerifyFirstPropertyIsByteArray(CompositeType serviceType)
{
var referenceKey = serviceType.Name.RawValue;
var responseType = _swaggerModeler.GeneratedTypes[referenceKey];
var property = responseType.Properties.FirstOrDefault(p => p.ModelType is PrimaryType && ((PrimaryType)p.ModelType).KnownPrimaryType == KnownPrimaryType.ByteArray);
if (property == null)
{
throw new KeyNotFoundException(
"Please specify a field with type of System.Byte[] to deserialize the file contents to");
}
}
private bool TryBuildResponse(string methodName, HttpStatusCode responseStatusCode,
OperationResponse response, Method method, List<Stack<IModelType>> types, IModelType headerType)
{
bool handled = false;
IModelType serviceType;
if (SwaggerOperationProducesJson())
{
if (TryBuildResponseBody(methodName, response,
s => GenerateResponseObjectName(s, responseStatusCode), out serviceType))
{
method.Responses[responseStatusCode] = new Response(serviceType, headerType);
BuildMethodReturnTypeStack(serviceType, types);
handled = true;
}
}
return handled;
}
private bool TryBuildEmptyResponse(string methodName, HttpStatusCode responseStatusCode,
OperationResponse response, Method method, List<Stack<IModelType>> types, IModelType headerType)
{
bool handled = false;
if (response.Schema == null)
{
method.Responses[responseStatusCode] = new Response(null, headerType);
handled = true;
}
else
{
if (_operation.Produces.IsNullOrEmpty())
{
method.Responses[responseStatusCode] = new Response(New<PrimaryType>(KnownPrimaryType.Object), headerType);
BuildMethodReturnTypeStack(New<PrimaryType>(KnownPrimaryType.Object), types);
handled = true;
}
var unwrapedSchemaProperties =
_swaggerModeler.Resolver.Unwrap(response.Schema).Properties;
if (unwrapedSchemaProperties != null && unwrapedSchemaProperties.Any())
{
Logger.Instance.Log(Category.Warning, Resources.NoProduceOperationWithBody,
methodName);
}
}
return handled;
}
private void TryBuildDefaultResponse(string methodName, OperationResponse response, Method method, IModelType headerType)
{
IModelType errorModel = null;
if (SwaggerOperationProducesJson())
{
if (TryBuildResponseBody(methodName, response, s => GenerateErrorModelName(s), out errorModel))
{
method.DefaultResponse = new Response(errorModel, headerType);
}
}
}
private bool TryBuildResponseBody(string methodName, OperationResponse response,
Func<string, string> typeNamer, out IModelType responseType)
{
bool handled = false;
responseType = null;
if (SwaggerOperationProducesJson())
{
if (response.Schema != null)
{
string referenceKey;
if (response.Schema.Reference != null)
{
referenceKey = response.Schema.Reference.StripDefinitionPath();
response.Schema.Reference = referenceKey;
}
else
{
referenceKey = typeNamer(methodName);
}
responseType = response.Schema.GetBuilder(_swaggerModeler).BuildServiceType(referenceKey);
handled = true;
}
}
return handled;
}
private bool SwaggerOperationProducesJson()
{
return _effectiveProduces != null &&
_effectiveProduces.Any(s => s.StartsWith(APP_JSON_MIME, StringComparison.OrdinalIgnoreCase));
}
private bool SwaggerOperationProducesNotEmpty()
{
return _effectiveProduces != null
&& _effectiveProduces.Any();
}
private void EnsureUniqueMethodName(string methodName, string methodGroup)
{
string serviceOperationPrefix = "";
if (methodGroup != null)
{
serviceOperationPrefix = methodGroup + "_";
}
if (_swaggerModeler.CodeModel.Methods.Any(m => m.Group == methodGroup && m.Name == methodName))
{
throw new ArgumentException(string.Format(CultureInfo.InvariantCulture,
Resources.DuplicateOperationIdException,
serviceOperationPrefix + methodName));
}
}
private static string GenerateResponseObjectName(string methodName, HttpStatusCode responseStatusCode)
{
return string.Format(CultureInfo.InvariantCulture,
"{0}{1}Response", methodName, responseStatusCode);
}
private static string GenerateErrorModelName(string methodName)
{
return string.Format(CultureInfo.InvariantCulture,
"{0}ErrorModel", methodName);
}
}
}
| 1 | 23,647 | can you use LINQ's `All` instead? Should be a side-effects free one liner with that. | Azure-autorest | java |
@@ -24,7 +24,9 @@
#include <inttypes.h>
#include <netdb.h>
#include <netinet/in.h>
+#ifndef __ANDROID__
#include <spawn.h>
+#endif
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h> | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <netdb.h>
#include <netinet/in.h>
#include <spawn.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/types.h>
#include "h2o.h"
#include "h2o/serverutil.h"
struct st_h2o_access_log_filehandle_t {
h2o_logconf_t *logconf;
int fd;
};
struct st_h2o_access_logger_t {
h2o_logger_t super;
h2o_access_log_filehandle_t *fh;
};
static void log_access(h2o_logger_t *_self, h2o_req_t *req)
{
struct st_h2o_access_logger_t *self = (struct st_h2o_access_logger_t *)_self;
h2o_access_log_filehandle_t *fh = self->fh;
char *logline, buf[4096];
size_t len;
/* stringify */
len = sizeof(buf);
logline = h2o_log_request(fh->logconf, req, &len, buf);
/* emit */
write(fh->fd, logline, len);
/* free memory */
if (logline != buf)
free(logline);
}
static void on_dispose_handle(void *_fh)
{
h2o_access_log_filehandle_t *fh = _fh;
h2o_logconf_dispose(fh->logconf);
close(fh->fd);
}
int h2o_access_log_open_log(const char *path)
{
int fd;
if (path[0] == '|') {
int pipefds[2];
pid_t pid;
char *argv[4] = {"/bin/sh", "-c", (char *)(path + 1), NULL};
/* create pipe */
if (pipe(pipefds) != 0) {
perror("pipe failed");
return -1;
}
if (fcntl(pipefds[1], F_SETFD, FD_CLOEXEC) == -1) {
perror("failed to set FD_CLOEXEC on pipefds[1]");
return -1;
}
/* spawn the logger */
int mapped_fds[] = {pipefds[0], 0, /* map pipefds[0] to stdin */
-1};
if ((pid = h2o_spawnp(argv[0], argv, mapped_fds, 0)) == -1) {
fprintf(stderr, "failed to open logger: %s:%s\n", path + 1, strerror(errno));
return -1;
}
/* close the read side of the pipefds and return the write side */
close(pipefds[0]);
fd = pipefds[1];
} else {
if ((fd = open(path, O_CREAT | O_WRONLY | O_APPEND | O_CLOEXEC, 0644)) == -1) {
fprintf(stderr, "failed to open log file:%s:%s\n", path, strerror(errno));
return -1;
}
}
return fd;
}
h2o_access_log_filehandle_t *h2o_access_log_open_handle(const char *path, const char *fmt, int escape)
{
h2o_logconf_t *logconf;
int fd;
h2o_access_log_filehandle_t *fh;
char errbuf[256];
/* default to combined log format */
if (fmt == NULL)
fmt = "%h %l %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-agent}i\"";
if ((logconf = h2o_logconf_compile(fmt, escape, errbuf)) == NULL) {
fprintf(stderr, "%s\n", errbuf);
return NULL;
}
/* open log file */
if ((fd = h2o_access_log_open_log(path)) == -1) {
h2o_logconf_dispose(logconf);
return NULL;
}
fh = h2o_mem_alloc_shared(NULL, sizeof(*fh), on_dispose_handle);
fh->logconf = logconf;
fh->fd = fd;
return fh;
}
static void dispose(h2o_logger_t *_self)
{
struct st_h2o_access_logger_t *self = (void *)_self;
h2o_mem_release_shared(self->fh);
}
h2o_logger_t *h2o_access_log_register(h2o_pathconf_t *pathconf, h2o_access_log_filehandle_t *fh)
{
struct st_h2o_access_logger_t *self = (void *)h2o_create_logger(pathconf, sizeof(*self));
self->super.dispose = dispose;
self->super.log_access = log_access;
self->fh = fh;
h2o_mem_addref_shared(fh);
return &self->super;
}
| 1 | 12,673 | Could you please remove `#include <spawn.h>` as a whole and see what happens? I believe that this is no longer needed; it was necessary when we called `posix_spawnp` directly, but now we use `h2o_spawnp`. | h2o-h2o | c |
@@ -94,10 +94,16 @@ func (in *fakeTwoPhaseChaos) GetStatus() *v1alpha1.ChaosStatus {
return &in.Status
}
+// IsDeleted returns whether this resource has been deleted
func (in *fakeTwoPhaseChaos) IsDeleted() bool {
return in.Deleted
}
+// IsPaused returns whether this resource is paused
+func (in *fakeTwoPhaseChaos) IsPaused() bool {
+ return false
+}
+
func (r fakeReconciler) Object() reconciler.InnerObject {
return &fakeTwoPhaseChaos{}
} | 1 | package twophase_test
import (
"context"
"errors"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/pingcap/chaos-mesh/api/v1alpha1"
"github.com/pingcap/chaos-mesh/controllers/reconciler"
"github.com/pingcap/chaos-mesh/controllers/twophase"
"github.com/pingcap/chaos-mesh/pkg/mock"
)
func TestTwoPhase(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"TwoPhase Suite",
[]Reporter{envtest.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
Expect(addFakeToScheme(scheme.Scheme)).To(Succeed())
close(done)
}, 60)
var _ = AfterSuite(func() {
})
var _ reconciler.InnerReconciler = (*fakeReconciler)(nil)
type fakeReconciler struct{}
func (r fakeReconciler) Apply(ctx context.Context, req ctrl.Request, chaos reconciler.InnerObject) error {
if err := mock.On("MockApplyError"); err != nil {
return err.(error)
}
return nil
}
func (r fakeReconciler) Recover(ctx context.Context, req ctrl.Request, chaos reconciler.InnerObject) error {
if err := mock.On("MockRecoverError"); err != nil {
return err.(error)
}
return nil
}
var _ twophase.InnerSchedulerObject = (*fakeTwoPhaseChaos)(nil)
type fakeTwoPhaseChaos struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Status v1alpha1.ChaosStatus `json:"status,omitempty"`
// Selector is used to select pods that are used to inject chaos action.
Selector v1alpha1.SelectorSpec `json:"selector"`
Deleted bool `json:"deleted"`
// Duration represents the duration of the chaos action
Duration *string `json:"duration,omitempty"`
// Scheduler defines some schedule rules to control the running time of the chaos experiment about time.
Scheduler *v1alpha1.SchedulerSpec `json:"scheduler,omitempty"`
// Next time when this action will be applied again
// +optional
NextStart *metav1.Time `json:"nextStart,omitempty"`
// Next time when this action will be recovered
// +optional
NextRecover *metav1.Time `json:"nextRecover,omitempty"`
}
func (in *fakeTwoPhaseChaos) GetStatus() *v1alpha1.ChaosStatus {
return &in.Status
}
func (in *fakeTwoPhaseChaos) IsDeleted() bool {
return in.Deleted
}
func (r fakeReconciler) Object() reconciler.InnerObject {
return &fakeTwoPhaseChaos{}
}
func (in *fakeTwoPhaseChaos) GetDuration() (*time.Duration, error) {
if in.Duration == nil {
return nil, nil
}
duration, err := time.ParseDuration(*in.Duration)
if err != nil {
return nil, err
}
return &duration, nil
}
func (in *fakeTwoPhaseChaos) GetNextStart() time.Time {
if in.NextStart == nil {
return time.Time{}
}
return in.NextStart.Time
}
func (in *fakeTwoPhaseChaos) SetNextStart(t time.Time) {
if t.IsZero() {
in.NextStart = nil
return
}
if in.NextStart == nil {
in.NextStart = &metav1.Time{}
}
in.NextStart.Time = t
}
func (in *fakeTwoPhaseChaos) GetNextRecover() time.Time {
if in.NextRecover == nil {
return time.Time{}
}
return in.NextRecover.Time
}
func (in *fakeTwoPhaseChaos) SetNextRecover(t time.Time) {
if t.IsZero() {
in.NextRecover = nil
return
}
if in.NextRecover == nil {
in.NextRecover = &metav1.Time{}
}
in.NextRecover.Time = t
}
func (in *fakeTwoPhaseChaos) GetScheduler() *v1alpha1.SchedulerSpec {
return in.Scheduler
}
func (in *fakeTwoPhaseChaos) DeepCopyInto(out *fakeTwoPhaseChaos) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
in.Selector.DeepCopyInto(&out.Selector)
out.Deleted = in.Deleted
if in.Duration != nil {
in, out := &in.Duration, &out.Duration
*out = new(string)
**out = **in
}
if in.Scheduler != nil {
in, out := &in.Scheduler, &out.Scheduler
*out = new(v1alpha1.SchedulerSpec)
**out = **in
}
if in.NextRecover != nil {
in, out := &in.NextRecover, &out.NextRecover
*out = new(metav1.Time)
**out = **in
}
if in.NextStart != nil {
in, out := &in.NextStart, &out.NextStart
*out = new(metav1.Time)
**out = **in
}
}
func (in *fakeTwoPhaseChaos) DeepCopy() *fakeTwoPhaseChaos {
if in == nil {
return nil
}
out := new(fakeTwoPhaseChaos)
in.DeepCopyInto(out)
return out
}
func (in *fakeTwoPhaseChaos) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
var (
schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
addFakeToScheme = schemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"},
&fakeTwoPhaseChaos{},
)
return nil
}
var _ = Describe("TwoPhase", func() {
Context("TwoPhase", func() {
var err error
zeroTime := time.Time{}
var _ = zeroTime
pastTime := time.Now().Add(-10 * time.Hour)
futureTime := time.Now().Add(10 * time.Hour)
req := ctrl.Request{
NamespacedName: types.NamespacedName{
Name: "fakechaos-name",
Namespace: metav1.NamespaceDefault,
},
}
typeMeta := metav1.TypeMeta{
Kind: "PodChaos",
APIVersion: "v1",
}
objectMeta := metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: "fakechaos-name",
}
It("TwoPhase Action", func() {
chaos := fakeTwoPhaseChaos{
TypeMeta: typeMeta,
ObjectMeta: objectMeta,
}
c := fake.NewFakeClientWithScheme(scheme.Scheme, &chaos)
r := twophase.Reconciler{
InnerReconciler: fakeReconciler{},
Client: c,
Log: ctrl.Log.WithName("controllers").WithName("TwoPhase"),
}
_, err = r.Reconcile(req)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("misdefined scheduler"))
})
It("TwoPhase Delete", func() {
chaos := fakeTwoPhaseChaos{
TypeMeta: typeMeta,
ObjectMeta: objectMeta,
Scheduler: &v1alpha1.SchedulerSpec{Cron: "@hourly"},
Deleted: true,
}
c := fake.NewFakeClientWithScheme(scheme.Scheme, &chaos)
r := twophase.Reconciler{
InnerReconciler: fakeReconciler{},
Client: c,
Log: ctrl.Log.WithName("controllers").WithName("TwoPhase"),
}
_, err = r.Reconcile(req)
Expect(err).ToNot(HaveOccurred())
_chaos := r.Object()
err = r.Get(context.TODO(), req.NamespacedName, _chaos)
Expect(err).ToNot(HaveOccurred())
Expect(_chaos.(twophase.InnerSchedulerObject).GetStatus().Experiment.Phase).To(Equal(v1alpha1.ExperimentPhaseFinished))
defer mock.With("MockRecoverError", errors.New("RecoverError"))()
_, err = r.Reconcile(req)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("RecoverError"))
})
It("TwoPhase ToRecover", func() {
chaos := fakeTwoPhaseChaos{
TypeMeta: typeMeta,
ObjectMeta: objectMeta,
Scheduler: &v1alpha1.SchedulerSpec{Cron: "@hourly"},
}
chaos.SetNextRecover(pastTime)
c := fake.NewFakeClientWithScheme(scheme.Scheme, &chaos)
r := twophase.Reconciler{
InnerReconciler: fakeReconciler{},
Client: c,
Log: ctrl.Log.WithName("controllers").WithName("TwoPhase"),
}
_, err = r.Reconcile(req)
Expect(err).ToNot(HaveOccurred())
_chaos := r.Object()
err = r.Get(context.TODO(), req.NamespacedName, _chaos)
Expect(err).ToNot(HaveOccurred())
Expect(_chaos.(twophase.InnerSchedulerObject).GetStatus().Experiment.Phase).To(Equal(v1alpha1.ExperimentPhaseFinished))
})
It("TwoPhase ToRecover Error", func() {
chaos := fakeTwoPhaseChaos{
TypeMeta: typeMeta,
ObjectMeta: objectMeta,
Scheduler: &v1alpha1.SchedulerSpec{Cron: "@hourly"},
}
defer mock.With("MockRecoverError", errors.New("RecoverError"))()
chaos.SetNextRecover(pastTime)
c := fake.NewFakeClientWithScheme(scheme.Scheme, &chaos)
r := twophase.Reconciler{
InnerReconciler: fakeReconciler{},
Client: c,
Log: ctrl.Log.WithName("controllers").WithName("TwoPhase"),
}
_, err = r.Reconcile(req)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("RecoverError"))
})
It("TwoPhase ToApply", func() {
chaos := fakeTwoPhaseChaos{
TypeMeta: typeMeta,
ObjectMeta: objectMeta,
Scheduler: &v1alpha1.SchedulerSpec{Cron: "@hourly"},
}
chaos.SetNextRecover(futureTime)
chaos.SetNextStart(pastTime)
c := fake.NewFakeClientWithScheme(scheme.Scheme, &chaos)
r := twophase.Reconciler{
InnerReconciler: fakeReconciler{},
Client: c,
Log: ctrl.Log.WithName("controllers").WithName("TwoPhase"),
}
_, err = r.Reconcile(req)
Expect(err).ToNot(HaveOccurred())
_chaos := r.Object()
err = r.Get(context.TODO(), req.NamespacedName, _chaos)
Expect(err).ToNot(HaveOccurred())
Expect(_chaos.(twophase.InnerSchedulerObject).GetStatus().Experiment.Phase).To(Equal(v1alpha1.ExperimentPhaseRunning))
})
It("TwoPhase ToApply Error", func() {
chaos := fakeTwoPhaseChaos{
TypeMeta: typeMeta,
ObjectMeta: objectMeta,
Scheduler: &v1alpha1.SchedulerSpec{Cron: "@hourly"},
}
chaos.SetNextRecover(futureTime)
chaos.SetNextStart(pastTime)
c := fake.NewFakeClientWithScheme(scheme.Scheme, &chaos)
r := twophase.Reconciler{
InnerReconciler: fakeReconciler{},
Client: c,
Log: ctrl.Log.WithName("controllers").WithName("TwoPhase"),
}
defer mock.With("MockApplyError", errors.New("ApplyError"))()
_, err = r.Reconcile(req)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("ApplyError"))
})
})
})
| 1 | 14,337 | `whether this resource is paused` to `whether this resource has been paused` ? To keep same doc style. | chaos-mesh-chaos-mesh | go |
@@ -36,4 +36,11 @@ public interface PermissionManager {
public boolean hasPermission(final String imageTypeName, final String userId, final Type type)
throws ImageMgmtException;
+ /**
+ * Checks if the user has permission to manage high priority flows.
+ * @param userId
+ * @return
+ * @throws ImageMgmtException
+ */
+ public boolean hasPermission(final String userId) throws ImageMgmtException;
} | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.imagemgmt.permission;
import azkaban.imagemgmt.exception.ImageMgmtException;
import azkaban.user.Permission.Type;
/**
* Interface defines method to check the permission for accessing image management APIs.
*/
public interface PermissionManager {
/**
* Checks the permission based on image type name, user id and Permission type for accessing image
* management APIs.
*
* @param imageTypeName
* @param userId
* @param type
* @return boolean
*/
public boolean hasPermission(final String imageTypeName, final String userId, final Type type)
throws ImageMgmtException;
}
| 1 | 22,779 | This method is validating permission for hp flow. Can you please rename this method to convey the same? | azkaban-azkaban | java |
@@ -102,9 +102,7 @@ class MongoNetworkError extends MongoError {
super(message);
this.name = 'MongoNetworkError';
- if (options && options.beforeHandshake === true) {
- this[kBeforeHandshake] = true;
- }
+ this[kBeforeHandshake] = !!(options && options.beforeHandshake === true);
}
}
| 1 | 'use strict';
const kErrorLabels = Symbol('errorLabels');
/**
* Creates a new MongoError
*
* @augments Error
* @param {Error|string|object} message The error message
* @property {string} message The error message
* @property {string} stack The error call stack
*/
class MongoError extends Error {
constructor(message) {
if (message instanceof Error) {
super(message.message);
this.stack = message.stack;
} else {
if (typeof message === 'string') {
super(message);
} else {
super(message.message || message.errmsg || message.$err || 'n/a');
if (message.errorLabels) {
this[kErrorLabels] = new Set(message.errorLabels);
}
for (var name in message) {
if (name === 'errorLabels' || name === 'errmsg') {
continue;
}
this[name] = message[name];
}
}
Error.captureStackTrace(this, this.constructor);
}
this.name = 'MongoError';
}
/**
* Legacy name for server error responses
*/
get errmsg() {
return this.message;
}
/**
* Creates a new MongoError object
*
* @param {Error|string|object} options The options used to create the error.
* @return {MongoError} A MongoError instance
* @deprecated Use `new MongoError()` instead.
*/
static create(options) {
return new MongoError(options);
}
/**
* Checks the error to see if it has an error label
* @param {string} label The error label to check for
* @returns {boolean} returns true if the error has the provided error label
*/
hasErrorLabel(label) {
if (this[kErrorLabels] == null) {
return false;
}
return this[kErrorLabels].has(label);
}
addErrorLabel(label) {
if (this[kErrorLabels] == null) {
this[kErrorLabels] = new Set();
}
this[kErrorLabels].add(label);
}
get errorLabels() {
return this[kErrorLabels] ? Array.from(this[kErrorLabels]) : [];
}
}
const kBeforeHandshake = Symbol('beforeHandshake');
function isNetworkErrorBeforeHandshake(err) {
return err[kBeforeHandshake] === true;
}
/**
* An error indicating an issue with the network, including TCP
* errors and timeouts.
*
* @param {Error|string|object} message The error message
* @property {string} message The error message
* @property {string} stack The error call stack
* @extends MongoError
*/
class MongoNetworkError extends MongoError {
constructor(message, options) {
super(message);
this.name = 'MongoNetworkError';
if (options && options.beforeHandshake === true) {
this[kBeforeHandshake] = true;
}
}
}
/**
* An error indicating a network timeout occurred
*
* @param {Error|string|object} message The error message
* @property {string} message The error message
* @property {object} [options.beforeHandshake] Indicates the timeout happened before a connection handshake completed
* @extends MongoError
*/
class MongoNetworkTimeoutError extends MongoNetworkError {
constructor(message, options) {
super(message, options);
this.name = 'MongoNetworkTimeoutError';
}
}
/**
* An error used when attempting to parse a value (like a connection string)
*
* @param {Error|string|object} message The error message
* @property {string} message The error message
* @extends MongoError
*/
class MongoParseError extends MongoError {
constructor(message) {
super(message);
this.name = 'MongoParseError';
}
}
/**
* An error signifying a client-side timeout event
*
* @param {Error|string|object} message The error message
* @param {string|object} [reason] The reason the timeout occured
* @property {string} message The error message
* @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers
* @extends MongoError
*/
class MongoTimeoutError extends MongoError {
constructor(message, reason) {
if (reason && reason.error) {
super(reason.error.message || reason.error);
} else {
super(message);
}
this.name = 'MongoTimeoutError';
if (reason) {
this.reason = reason;
}
}
}
/**
* An error signifying a client-side server selection error
*
* @param {Error|string|object} message The error message
* @param {string|object} [reason] The reason the timeout occured
* @property {string} message The error message
* @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers
* @extends MongoError
*/
class MongoServerSelectionError extends MongoTimeoutError {
constructor(message, reason) {
super(message, reason);
this.name = 'MongoServerSelectionError';
}
}
function makeWriteConcernResultObject(input) {
const output = Object.assign({}, input);
if (output.ok === 0) {
output.ok = 1;
delete output.errmsg;
delete output.code;
delete output.codeName;
}
return output;
}
/**
* An error thrown when the server reports a writeConcernError
*
* @param {Error|string|object} message The error message
* @param {object} result The result document (provided if ok: 1)
* @property {string} message The error message
* @property {object} [result] The result document (provided if ok: 1)
* @extends MongoError
*/
class MongoWriteConcernError extends MongoError {
constructor(message, result) {
super(message);
this.name = 'MongoWriteConcernError';
if (result && Array.isArray(result.errorLabels)) {
this[kErrorLabels] = new Set(result.errorLabels);
}
if (result != null) {
this.result = makeWriteConcernResultObject(result);
}
}
}
// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms
const RETRYABLE_ERROR_CODES = new Set([
6, // HostUnreachable
7, // HostNotFound
89, // NetworkTimeout
91, // ShutdownInProgress
189, // PrimarySteppedDown
9001, // SocketException
10107, // NotMaster
11600, // InterruptedAtShutdown
11602, // InterruptedDueToReplStateChange
13435, // NotMasterNoSlaveOk
13436 // NotMasterOrSecondary
]);
const RETRYABLE_WRITE_ERROR_CODES = new Set([
11600, // InterruptedAtShutdown
11602, // InterruptedDueToReplStateChange
10107, // NotMaster
13435, // NotMasterNoSlaveOk
13436, // NotMasterOrSecondary
189, // PrimarySteppedDown
91, // ShutdownInProgress
7, // HostNotFound
6, // HostUnreachable
89, // NetworkTimeout
9001, // SocketException
262 // ExceededTimeLimit
]);
function isRetryableWriteError(error) {
if (error instanceof MongoWriteConcernError) {
return (
RETRYABLE_WRITE_ERROR_CODES.has(error.code) ||
RETRYABLE_WRITE_ERROR_CODES.has(error.result.code)
);
}
return RETRYABLE_WRITE_ERROR_CODES.has(error.code);
}
/**
* Determines whether an error is something the driver should attempt to retry
*
* @ignore
* @param {MongoError|Error} error
*/
function isRetryableError(error) {
return (
RETRYABLE_ERROR_CODES.has(error.code) ||
error instanceof MongoNetworkError ||
error.message.match(/not master/) ||
error.message.match(/node is recovering/)
);
}
const SDAM_RECOVERING_CODES = new Set([
91, // ShutdownInProgress
189, // PrimarySteppedDown
11600, // InterruptedAtShutdown
11602, // InterruptedDueToReplStateChange
13436 // NotMasterOrSecondary
]);
const SDAM_NOTMASTER_CODES = new Set([
10107, // NotMaster
13435 // NotMasterNoSlaveOk
]);
const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([
11600, // InterruptedAtShutdown
91 // ShutdownInProgress
]);
function isRecoveringError(err) {
if (err.code && SDAM_RECOVERING_CODES.has(err.code)) {
return true;
}
return err.message.match(/not master or secondary/) || err.message.match(/node is recovering/);
}
function isNotMasterError(err) {
if (err.code && SDAM_NOTMASTER_CODES.has(err.code)) {
return true;
}
if (isRecoveringError(err)) {
return false;
}
return err.message.match(/not master/);
}
function isNodeShuttingDownError(err) {
return err.code && SDAM_NODE_SHUTTING_DOWN_ERROR_CODES.has(err.code);
}
/**
* Determines whether SDAM can recover from a given error. If it cannot
* then the pool will be cleared, and server state will completely reset
* locally.
*
* @ignore
* @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-master-and-node-is-recovering
* @param {MongoError|Error} error
*/
function isSDAMUnrecoverableError(error) {
// NOTE: null check is here for a strictly pre-CMAP world, a timeout or
// close event are considered unrecoverable
if (error instanceof MongoParseError || error == null) {
return true;
}
if (isRecoveringError(error) || isNotMasterError(error)) {
return true;
}
return false;
}
module.exports = {
MongoError,
MongoNetworkError,
MongoNetworkTimeoutError,
MongoParseError,
MongoTimeoutError,
MongoServerSelectionError,
MongoWriteConcernError,
isRetryableError,
isSDAMUnrecoverableError,
isNodeShuttingDownError,
isRetryableWriteError,
isNetworkErrorBeforeHandshake
};
| 1 | 19,839 | Just to confirm, do we actually want to explicitly set the `kBeforeHandshake` symbol on all network errors regardless of whether the options/property is passed in? | mongodb-node-mongodb-native | js |
@@ -122,6 +122,11 @@ func (r *RouteTable) SetRoutes(ifaceName string, targets []Target) {
r.dirtyIfaces.Add(ifaceName)
}
+func (r *RouteTable) QueueResync() {
+ r.logCxt.Info("Queueing a resync.")
+ r.inSync = false
+}
+
func (r *RouteTable) Apply() error {
if !r.inSync {
links, err := r.dataplane.LinkList() | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routetable
import (
"errors"
"net"
"regexp"
"strings"
"syscall"
log "github.com/Sirupsen/logrus"
"github.com/vishvananda/netlink"
"github.com/projectcalico/felix/conntrack"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ip"
"github.com/projectcalico/felix/set"
calinet "github.com/projectcalico/libcalico-go/lib/net"
)
var (
GetFailed = errors.New("netlink get operation failed")
ListFailed = errors.New("netlink list operation failed")
UpdateFailed = errors.New("netlink update operation failed")
IfaceNotPresent = errors.New("interface not present")
IfaceDown = errors.New("interface down")
ipV6LinkLocalCIDR = ip.MustParseCIDR("fe80::/64")
)
type Target struct {
CIDR ip.CIDR
DestMAC net.HardwareAddr
}
type RouteTable struct {
logCxt *log.Entry
ipVersion uint8
netlinkFamily int
dirtyIfaces set.Set
ifacePrefixes set.Set
ifacePrefixRegexp *regexp.Regexp
ifaceNameToTargets map[string][]Target
pendingIfaceNameToTargets map[string][]Target
inSync bool
// dataplane is our shim for the netlink/arp interface. In production, it maps directly
// through to calls to the netlink package and the arp command.
dataplane dataplaneIface
}
func New(interfacePrefixes []string, ipVersion uint8) *RouteTable {
return NewWithShims(interfacePrefixes, ipVersion, realDataplane{conntrack: conntrack.New()})
}
// NewWithShims is a test constructor, which allows netlink to be replaced by a shim.
func NewWithShims(interfacePrefixes []string, ipVersion uint8, nl dataplaneIface) *RouteTable {
prefixSet := set.New()
regexpParts := []string{}
for _, prefix := range interfacePrefixes {
prefixSet.Add(prefix)
regexpParts = append(regexpParts, "^"+prefix+".*")
}
ifaceNamePattern := strings.Join(regexpParts, "|")
log.WithField("regex", ifaceNamePattern).Info("Calculated interface name regexp")
family := netlink.FAMILY_V4
if ipVersion == 6 {
family = netlink.FAMILY_V6
} else if ipVersion != 4 {
log.WithField("ipVersion", ipVersion).Panic("Unknown IP version")
}
return &RouteTable{
logCxt: log.WithFields(log.Fields{
"ipVersion": ipVersion,
}),
ipVersion: ipVersion,
netlinkFamily: family,
ifacePrefixes: prefixSet,
ifacePrefixRegexp: regexp.MustCompile(ifaceNamePattern),
ifaceNameToTargets: map[string][]Target{},
pendingIfaceNameToTargets: map[string][]Target{},
dirtyIfaces: set.New(),
dataplane: nl,
}
}
func (r *RouteTable) OnIfaceStateChanged(ifaceName string, state ifacemonitor.State) {
logCxt := r.logCxt.WithField("ifaceName", ifaceName)
if !r.ifacePrefixRegexp.MatchString(ifaceName) {
logCxt.Debug("Ignoring interface state change, not a Calico interface.")
return
}
if state == ifacemonitor.StateUp {
logCxt.Debug("Interface up, marking for route sync")
r.dirtyIfaces.Add(ifaceName)
}
}
func (r *RouteTable) SetRoutes(ifaceName string, targets []Target) {
r.pendingIfaceNameToTargets[ifaceName] = targets
r.dirtyIfaces.Add(ifaceName)
}
func (r *RouteTable) Apply() error {
if !r.inSync {
links, err := r.dataplane.LinkList()
if err != nil {
r.logCxt.WithError(err).Error("Failed to list interfaces, retrying...")
return ListFailed
}
// Clear the dirty set; there's no point trying to update non-existent interfaces.
r.dirtyIfaces = set.New()
for _, link := range links {
attrs := link.Attrs()
if attrs == nil {
continue
}
ifaceName := attrs.Name
if r.ifacePrefixRegexp.MatchString(ifaceName) {
r.logCxt.WithField("ifaceName", ifaceName).Debug(
"Resync: found calico-owned interface")
r.dirtyIfaces.Add(ifaceName)
}
}
r.inSync = true
}
r.dirtyIfaces.Iter(func(item interface{}) error {
retries := 2
ifaceName := item.(string)
logCxt := r.logCxt.WithField("ifaceName", ifaceName)
for retries > 0 {
err := r.syncRoutesForLink(ifaceName)
if err == IfaceNotPresent {
logCxt.Info("Interface missing, will retry if it appears.")
break
} else if err == IfaceDown {
logCxt.Info("Interface down, will retry if it goes up.")
break
} else if err != nil {
logCxt.WithError(err).Warn("Failed to syncronise routes.")
retries--
continue
}
logCxt.Info("Synchronised routes on interface")
break
}
if retries == 0 {
// The interface might be flapping or being deleted.
logCxt.Warn("Failed to sync routes to interface even after retries. " +
"Leaving it dirty.")
return nil
}
return set.RemoveItem
})
if r.dirtyIfaces.Len() > 0 {
r.logCxt.Warn("Some interfaces still out-of sync.")
r.inSync = false
return UpdateFailed
}
return nil
}
func (r *RouteTable) syncRoutesForLink(ifaceName string) error {
logCxt := r.logCxt.WithField("ifaceName", ifaceName)
logCxt.Debug("Syncing interface routes")
// If this is a modify or delete, grab a copy of the existing targets so we can clean up
// conntrack entries even if the routes have been removed. We'll remove any still-required
// CIDRs from this set below.
oldCIDRs := set.New()
if updatedTargets, ok := r.pendingIfaceNameToTargets[ifaceName]; ok {
logCxt.Debug("Have updated targets.")
oldTargets := r.ifaceNameToTargets[ifaceName]
if updatedTargets == nil {
delete(r.ifaceNameToTargets, ifaceName)
} else {
r.ifaceNameToTargets[ifaceName] = updatedTargets
}
for _, target := range oldTargets {
oldCIDRs.Add(target.CIDR)
}
delete(r.pendingIfaceNameToTargets, ifaceName)
}
expectedTargets := r.ifaceNameToTargets[ifaceName]
expectedCIDRs := set.New()
for _, t := range expectedTargets {
expectedCIDRs.Add(t.CIDR)
oldCIDRs.Discard(t.CIDR)
}
if r.ipVersion == 6 {
expectedCIDRs.Add(ipV6LinkLocalCIDR)
oldCIDRs.Discard(ipV6LinkLocalCIDR)
}
// The code below may add some more CIDRs to clean up before it is done, make sure we
// remove conntrack entries in any case.
defer oldCIDRs.Iter(func(item interface{}) error {
// Remove and conntrack entries that should no longer be there.
dest := item.(ip.CIDR)
r.dataplane.RemoveConntrackFlows(dest.Version(), dest.Addr().AsNetIP())
return nil
})
// Try to get the link. This may fail if it's been deleted out from under us.
link, err := r.dataplane.LinkByName(ifaceName)
if err != nil {
// Filter the error so that we don't spam errors if the interface is being torn
// down.
filteredErr := r.filterErrorByIfaceState(ifaceName, GetFailed)
if filteredErr == GetFailed {
logCxt.WithError(err).Error("Failed to get interface.")
} else {
logCxt.WithError(err).Info("Failed to get interface; it's down/gone.")
}
return filteredErr
}
// Got the link; try to sync its routes. Note: We used to check if the interface
// was oper down before we tried to do the sync but that prevented us from removing
// routes from an interface in some corner cases (such as being admin up but oper
// down).
linkAttrs := link.Attrs()
oldRoutes, err := r.dataplane.RouteList(link, r.netlinkFamily)
if err != nil {
// Filter the error so that we don't spam errors if the interface is being torn
// down.
filteredErr := r.filterErrorByIfaceState(ifaceName, ListFailed)
if filteredErr == ListFailed {
logCxt.WithError(err).Error("Error listing routes")
} else {
logCxt.WithError(err).Info("Failed to list routes; interface down/gone.")
}
return filteredErr
}
seenCIDRs := set.New()
updatesFailed := false
for _, route := range oldRoutes {
var dest ip.CIDR
if route.Dst != nil {
dest = ip.CIDRFromIPNet(calinet.IPNet{*route.Dst})
}
if !expectedCIDRs.Contains(dest) {
logCxt := logCxt.WithField("dest", dest)
logCxt.Info("Syncing routes: removing old route.")
if err := r.dataplane.RouteDel(&route); err != nil {
// Probably a race with the interface being deleted.
logCxt.WithError(err).Info(
"Route deletion failed, assuming someone got there first.")
updatesFailed = true
}
if dest != nil {
// Collect any old route CIDRs that we find in the dataplane so we
// can remove their conntrack entries later.
oldCIDRs.Add(dest)
}
}
seenCIDRs.Add(dest)
}
for _, target := range expectedTargets {
cidr := target.CIDR
if !seenCIDRs.Contains(cidr) {
logCxt := logCxt.WithField("targetCIDR", target.CIDR)
logCxt.Info("Syncing routes: adding new route.")
ipNet := cidr.ToIPNet()
route := netlink.Route{
LinkIndex: linkAttrs.Index,
Dst: &ipNet,
Type: syscall.RTN_UNICAST,
Protocol: syscall.RTPROT_BOOT,
Scope: netlink.SCOPE_LINK,
}
if err := r.dataplane.RouteAdd(&route); err != nil {
logCxt.WithError(err).Warn("Failed to add route")
updatesFailed = true
}
}
if r.ipVersion == 4 && target.DestMAC != nil {
// TODO(smc) clean up/sync old ARP entries
err := r.dataplane.AddStaticArpEntry(cidr, target.DestMAC, ifaceName)
if err != nil {
logCxt.WithError(err).Warn("Failed to set ARP entry")
updatesFailed = true
}
}
}
if updatesFailed {
// Recheck whether the interface exists so we don't produce spammy logs during
// interface removal.
return r.filterErrorByIfaceState(ifaceName, UpdateFailed)
}
return nil
}
// filterErrorByIfaceState checks the current state of the interface; it's down or gone, it returns
// IfaceDown or IfaceError, otherwise, it returns the given defaultErr.
func (r *RouteTable) filterErrorByIfaceState(ifaceName string, defaultErr error) error {
logCxt := r.logCxt.WithField("ifaceName", ifaceName)
if link, err := r.dataplane.LinkByName(ifaceName); err == nil {
// Link still exists. Check if it's up.
if link.Attrs().Flags&net.FlagUp != 0 {
// Link exists and it's up, no reason that we expect to fail.
return defaultErr
} else {
// Special case: Link exists and it's down. Assume that's the problem.
return IfaceDown
}
} else if strings.Contains(err.Error(), "not found") {
// Special case: Link no longer exists.
return IfaceNotPresent
} else {
// Failed to list routes, then failed to check if interface exists.
logCxt.WithError(err).Error("Failed to access interface after a failure")
return defaultErr
}
}
| 1 | 15,063 | How soon after this will Apply() be called? It would be a shame if there was still a significant delay before a missing or superfluous route was corrected. | projectcalico-felix | go |
@@ -172,9 +172,12 @@ func (g *gen) frame() []byte {
}
sort.Strings(imps)
for _, path := range imps {
- // TODO(light): Omit the local package identifier if it matches
- // the package name.
- fmt.Fprintf(&buf, "\t%s %q\n", g.imports[path], path)
+ // Omit the local package identifier if it matches the package name.
+ if g.imports[path] == path {
+ fmt.Fprintf(&buf, "\t%q\n", path)
+ } else {
+ fmt.Fprintf(&buf, "\t%s %q\n", g.imports[path], path)
+ }
}
buf.WriteString(")\n\n")
} | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package wire provides compile-time dependency injection logic as a
// Go library.
package wire
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/printer"
"go/token"
"go/types"
"path/filepath"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/loader"
)
// Generate performs dependency injection for a single package,
// returning the gofmt'd Go source code.
func Generate(bctx *build.Context, wd string, pkg string) ([]byte, []error) {
prog, errs := load(bctx, wd, []string{pkg})
if len(errs) > 0 {
return nil, errs
}
if len(prog.InitialPackages()) != 1 {
// This is more of a violated precondition than anything else.
return nil, []error{fmt.Errorf("load: got %d packages", len(prog.InitialPackages()))}
}
pkgInfo := prog.InitialPackages()[0]
g := newGen(prog, pkgInfo.Pkg.Path())
injectorFiles, errs := generateInjectors(g, pkgInfo)
if len(errs) > 0 {
return nil, errs
}
copyNonInjectorDecls(g, injectorFiles, &pkgInfo.Info)
goSrc := g.frame()
fmtSrc, err := format.Source(goSrc)
if err != nil {
// This is likely a bug from a poorly generated source file.
// Return an error and the unformatted source.
return goSrc, []error{err}
}
return fmtSrc, nil
}
// generateInjectors generates the injectors for a given package.
func generateInjectors(g *gen, pkgInfo *loader.PackageInfo) (injectorFiles []*ast.File, _ []error) {
oc := newObjectCache(g.prog)
injectorFiles = make([]*ast.File, 0, len(pkgInfo.Files))
ec := new(errorCollector)
for _, f := range pkgInfo.Files {
for _, decl := range f.Decls {
fn, ok := decl.(*ast.FuncDecl)
if !ok {
continue
}
buildCall := isInjector(&pkgInfo.Info, fn)
if buildCall == nil {
continue
}
if len(injectorFiles) == 0 || injectorFiles[len(injectorFiles)-1] != f {
// This is the first injector generated for this file.
// Write a file header.
name := filepath.Base(g.prog.Fset.File(f.Pos()).Name())
g.p("// Injectors from %s:\n\n", name)
injectorFiles = append(injectorFiles, f)
}
set, errs := oc.processNewSet(pkgInfo, buildCall, "")
if len(errs) > 0 {
ec.add(notePositionAll(g.prog.Fset.Position(fn.Pos()), errs)...)
continue
}
sig := pkgInfo.ObjectOf(fn.Name).Type().(*types.Signature)
if errs := g.inject(fn.Pos(), fn.Name.Name, sig, set); len(errs) > 0 {
ec.add(errs...)
continue
}
}
}
if len(ec.errors) > 0 {
return nil, ec.errors
}
return injectorFiles, nil
}
// copyNonInjectorDecls copies any non-injector declarations from the
// given files into the generated output.
func copyNonInjectorDecls(g *gen, files []*ast.File, info *types.Info) {
for _, f := range files {
name := filepath.Base(g.prog.Fset.File(f.Pos()).Name())
first := true
for _, decl := range f.Decls {
switch decl := decl.(type) {
case *ast.FuncDecl:
if isInjector(info, decl) != nil {
continue
}
case *ast.GenDecl:
if decl.Tok == token.IMPORT {
continue
}
default:
continue
}
if first {
g.p("// %s:\n\n", name)
first = false
}
// TODO(light): Add line number at top of each declaration.
g.writeAST(info, decl)
g.p("\n\n")
}
}
}
// gen is the file-wide generator state.
type gen struct {
currPackage string
buf bytes.Buffer
imports map[string]string
values map[ast.Expr]string
prog *loader.Program // for positions and determining package names
}
func newGen(prog *loader.Program, pkg string) *gen {
return &gen{
currPackage: pkg,
imports: make(map[string]string),
values: make(map[ast.Expr]string),
prog: prog,
}
}
// frame bakes the built up source body into an unformatted Go source file.
func (g *gen) frame() []byte {
if g.buf.Len() == 0 {
return nil
}
var buf bytes.Buffer
buf.WriteString("// Code generated by Wire. DO NOT EDIT.\n\n")
buf.WriteString("//go:generate wire\n")
buf.WriteString("//+build !wireinject\n\n")
buf.WriteString("package ")
buf.WriteString(g.prog.Package(g.currPackage).Pkg.Name())
buf.WriteString("\n\n")
if len(g.imports) > 0 {
buf.WriteString("import (\n")
imps := make([]string, 0, len(g.imports))
for path := range g.imports {
imps = append(imps, path)
}
sort.Strings(imps)
for _, path := range imps {
// TODO(light): Omit the local package identifier if it matches
// the package name.
fmt.Fprintf(&buf, "\t%s %q\n", g.imports[path], path)
}
buf.WriteString(")\n\n")
}
buf.Write(g.buf.Bytes())
return buf.Bytes()
}
// inject emits the code for an injector.
func (g *gen) inject(pos token.Pos, name string, sig *types.Signature, set *ProviderSet) []error {
injectSig, err := funcOutput(sig)
if err != nil {
return []error{notePosition(g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: %v", name, err))}
}
params := sig.Params()
given := make([]types.Type, params.Len())
for i := 0; i < params.Len(); i++ {
given[i] = params.At(i).Type()
}
calls, errs := solve(g.prog.Fset, injectSig.out, given, set)
if len(errs) > 0 {
return mapErrors(errs, func(e error) error {
if w, ok := e.(*wireErr); ok {
return notePosition(w.position, fmt.Errorf("inject %s: %v", name, w.error))
}
return notePosition(g.prog.Fset.Position(pos), fmt.Errorf("inject %s: %v", name, e))
})
}
type pendingVar struct {
name string
expr ast.Expr
typeInfo *types.Info
}
var pendingVars []pendingVar
ec := new(errorCollector)
for i := range calls {
c := &calls[i]
if c.hasCleanup && !injectSig.cleanup {
ts := types.TypeString(c.out, nil)
ec.add(notePosition(
g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: provider for %s returns cleanup but injection does not return cleanup function", name, ts)))
}
if c.hasErr && !injectSig.err {
ts := types.TypeString(c.out, nil)
ec.add(notePosition(
g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: provider for %s returns error but injection not allowed to fail", name, ts)))
}
if c.kind == valueExpr {
if err := accessibleFrom(c.valueTypeInfo, c.valueExpr, g.currPackage); err != nil {
// TODO(light): Display line number of value expression.
ts := types.TypeString(c.out, nil)
ec.add(notePosition(
g.prog.Fset.Position(pos),
fmt.Errorf("inject %s: value %s can't be used: %v", name, ts, err)))
}
if g.values[c.valueExpr] == "" {
t := c.valueTypeInfo.TypeOf(c.valueExpr)
name := disambiguate("_wire"+export(typeVariableName(t))+"Value", g.nameInFileScope)
g.values[c.valueExpr] = name
pendingVars = append(pendingVars, pendingVar{
name: name,
expr: c.valueExpr,
typeInfo: c.valueTypeInfo,
})
}
}
}
if len(ec.errors) > 0 {
return ec.errors
}
// Perform one pass to collect all imports, followed by the real pass.
injectPass(name, params, injectSig, calls, &injectorGen{
g: g,
errVar: disambiguate("err", g.nameInFileScope),
discard: true,
})
injectPass(name, params, injectSig, calls, &injectorGen{
g: g,
errVar: disambiguate("err", g.nameInFileScope),
discard: false,
})
if len(pendingVars) > 0 {
g.p("var (\n")
for _, pv := range pendingVars {
g.p("\t%s = ", pv.name)
g.writeAST(pv.typeInfo, pv.expr)
g.p("\n")
}
g.p(")\n\n")
}
return nil
}
// rewritePkgRefs rewrites any package references in an AST into references for the
// generated package.
func (g *gen) rewritePkgRefs(info *types.Info, node ast.Node) ast.Node {
start, end := node.Pos(), node.End()
node = copyAST(node)
// First, rewrite all package names. This lets us know all the
// potentially colliding identifiers.
node = astutil.Apply(node, func(c *astutil.Cursor) bool {
switch node := c.Node().(type) {
case *ast.Ident:
// This is an unqualified identifier (qualified identifiers are peeled off below).
obj := info.ObjectOf(node)
if obj == nil {
return false
}
if pkg := obj.Pkg(); pkg != nil && obj.Parent() == pkg.Scope() && pkg.Path() != g.currPackage {
// An identifier from either a dot import or read from a different package.
newPkgID := g.qualifyImport(pkg.Path())
c.Replace(&ast.SelectorExpr{
X: ast.NewIdent(newPkgID),
Sel: ast.NewIdent(node.Name),
})
return false
}
return true
case *ast.SelectorExpr:
pkgIdent, ok := node.X.(*ast.Ident)
if !ok {
return true
}
pkgName, ok := info.ObjectOf(pkgIdent).(*types.PkgName)
if !ok {
return true
}
// This is a qualified identifier. Rewrite and avoid visiting subexpressions.
newPkgID := g.qualifyImport(pkgName.Imported().Path())
c.Replace(&ast.SelectorExpr{
X: ast.NewIdent(newPkgID),
Sel: ast.NewIdent(node.Sel.Name),
})
return false
default:
return true
}
}, nil)
// Now that we have all the identifiers, rename any variables declared
// in this scope to not collide.
newNames := make(map[types.Object]string)
inNewNames := func(n string) bool {
for _, other := range newNames {
if other == n {
return true
}
}
return false
}
var scopeStack []*types.Scope
pkgScope := g.prog.Package(g.currPackage).Pkg.Scope()
node = astutil.Apply(node, func(c *astutil.Cursor) bool {
if scope := info.Scopes[c.Node()]; scope != nil {
scopeStack = append(scopeStack, scope)
}
id, ok := c.Node().(*ast.Ident)
if !ok {
return true
}
obj := info.ObjectOf(id)
if obj == nil {
// We rewrote this identifier earlier, so it does not need
// further rewriting.
return true
}
if n, ok := newNames[obj]; ok {
// We picked a new name for this symbol. Rewrite it.
c.Replace(ast.NewIdent(n))
return false
}
if par := obj.Parent(); par == nil || par == pkgScope {
// Don't rename methods, field names, or top-level identifiers.
return true
}
// Rename any symbols defined within rewritePkgRefs's node that conflict
// with any symbols in the generated file.
objName := obj.Name()
if pos := obj.Pos(); pos < start || end <= pos || !(g.nameInFileScope(objName) || inNewNames(objName)) {
return true
}
newName := disambiguate(objName, func(n string) bool {
if g.nameInFileScope(n) || inNewNames(n) {
return true
}
if len(scopeStack) > 0 {
// Avoid picking a name that conflicts with other names in the
// current scope.
_, obj := scopeStack[len(scopeStack)-1].LookupParent(n, token.NoPos)
if obj != nil {
return true
}
}
return false
})
newNames[obj] = newName
c.Replace(ast.NewIdent(newName))
return false
}, func(c *astutil.Cursor) bool {
if info.Scopes[c.Node()] != nil {
// Should be top of stack; pop it.
scopeStack = scopeStack[:len(scopeStack)-1]
}
return true
})
return node
}
// writeAST prints an AST node into the generated output, rewriting any
// package references it encounters.
func (g *gen) writeAST(info *types.Info, node ast.Node) {
node = g.rewritePkgRefs(info, node)
if err := printer.Fprint(&g.buf, g.prog.Fset, node); err != nil {
panic(err)
}
}
func (g *gen) qualifiedID(path, sym string) string {
name := g.qualifyImport(path)
if name == "" {
return sym
}
return name + "." + sym
}
func (g *gen) qualifyImport(path string) string {
if path == g.currPackage {
return ""
}
// TODO(light): This is depending on details of the current loader.
const vendorPart = "vendor/"
unvendored := path
if i := strings.LastIndex(path, vendorPart); i != -1 && (i == 0 || path[i-1] == '/') {
unvendored = path[i+len(vendorPart):]
}
if name := g.imports[unvendored]; name != "" {
return name
}
// TODO(light): Use parts of import path to disambiguate.
name := disambiguate(g.prog.Package(path).Pkg.Name(), func(n string) bool {
// Don't let an import take the "err" name. That's annoying.
return n == "err" || g.nameInFileScope(n)
})
g.imports[unvendored] = name
return name
}
func (g *gen) nameInFileScope(name string) bool {
for _, other := range g.imports {
if other == name {
return true
}
}
for _, other := range g.values {
if other == name {
return true
}
}
_, obj := g.prog.Package(g.currPackage).Pkg.Scope().LookupParent(name, token.NoPos)
return obj != nil
}
func (g *gen) qualifyPkg(pkg *types.Package) string {
return g.qualifyImport(pkg.Path())
}
func (g *gen) p(format string, args ...interface{}) {
fmt.Fprintf(&g.buf, format, args...)
}
// injectorGen is the per-injector pass generator state.
type injectorGen struct {
g *gen
paramNames []string
localNames []string
cleanupNames []string
errVar string
// discard causes ig.p and ig.writeAST to no-op. Useful to run
// generation for side-effects like filling in g.imports.
discard bool
}
// injectPass generates an injector given the output from analysis.
func injectPass(name string, params *types.Tuple, injectSig outputSignature, calls []call, ig *injectorGen) {
ig.p("func %s(", name)
for i := 0; i < params.Len(); i++ {
if i > 0 {
ig.p(", ")
}
pi := params.At(i)
a := pi.Name()
if a == "" || a == "_" {
a = unexport(typeVariableName(pi.Type()))
if a == "" {
a = "arg"
}
}
ig.paramNames = append(ig.paramNames, disambiguate(a, ig.nameInInjector))
ig.p("%s %s", ig.paramNames[i], types.TypeString(pi.Type(), ig.g.qualifyPkg))
}
outTypeString := types.TypeString(injectSig.out, ig.g.qualifyPkg)
switch {
case injectSig.cleanup && injectSig.err:
ig.p(") (%s, func(), error) {\n", outTypeString)
case injectSig.cleanup:
ig.p(") (%s, func()) {\n", outTypeString)
case injectSig.err:
ig.p(") (%s, error) {\n", outTypeString)
default:
ig.p(") %s {\n", outTypeString)
}
for i := range calls {
c := &calls[i]
lname := unexport(typeVariableName(c.out))
if lname == "" {
lname = "v"
}
lname = disambiguate(lname, ig.nameInInjector)
ig.localNames = append(ig.localNames, lname)
switch c.kind {
case structProvider:
ig.structProviderCall(lname, c)
case funcProviderCall:
ig.funcProviderCall(lname, c, injectSig)
case valueExpr:
ig.valueExpr(lname, c)
default:
panic("unknown kind")
}
}
if len(calls) == 0 {
for i := 0; i < params.Len(); i++ {
if types.Identical(injectSig.out, params.At(i).Type()) {
ig.p("\treturn %s", ig.paramNames[i])
break
}
}
} else {
ig.p("\treturn %s", ig.localNames[len(calls)-1])
}
if injectSig.cleanup {
ig.p(", func() {\n")
for i := len(ig.cleanupNames) - 1; i >= 0; i-- {
ig.p("\t\t%s()\n", ig.cleanupNames[i])
}
ig.p("\t}")
}
if injectSig.err {
ig.p(", nil")
}
ig.p("\n}\n\n")
}
func (ig *injectorGen) funcProviderCall(lname string, c *call, injectSig outputSignature) {
ig.p("\t%s", lname)
prevCleanup := len(ig.cleanupNames)
if c.hasCleanup {
cname := disambiguate("cleanup", ig.nameInInjector)
ig.cleanupNames = append(ig.cleanupNames, cname)
ig.p(", %s", cname)
}
if c.hasErr {
ig.p(", %s", ig.errVar)
}
ig.p(" := ")
ig.p("%s(", ig.g.qualifiedID(c.importPath, c.name))
for i, a := range c.args {
if i > 0 {
ig.p(", ")
}
if a < len(ig.paramNames) {
ig.p("%s", ig.paramNames[a])
} else {
ig.p("%s", ig.localNames[a-len(ig.paramNames)])
}
}
ig.p(")\n")
if c.hasErr {
ig.p("\tif %s != nil {\n", ig.errVar)
for i := prevCleanup - 1; i >= 0; i-- {
ig.p("\t\t%s()\n", ig.cleanupNames[i])
}
ig.p("\t\treturn %s", zeroValue(injectSig.out, ig.g.qualifyPkg))
if injectSig.cleanup {
ig.p(", nil")
}
// TODO(light): Give information about failing provider.
ig.p(", err\n")
ig.p("\t}\n")
}
}
func (ig *injectorGen) structProviderCall(lname string, c *call) {
ig.p("\t%s", lname)
ig.p(" := ")
if _, ok := c.out.(*types.Pointer); ok {
ig.p("&")
}
ig.p("%s{\n", ig.g.qualifiedID(c.importPath, c.name))
for i, a := range c.args {
ig.p("\t\t%s: ", c.fieldNames[i])
if a < len(ig.paramNames) {
ig.p("%s", ig.paramNames[a])
} else {
ig.p("%s", ig.localNames[a-len(ig.paramNames)])
}
ig.p(",\n")
}
ig.p("\t}\n")
}
func (ig *injectorGen) valueExpr(lname string, c *call) {
ig.p("\t%s := %s\n", lname, ig.g.values[c.valueExpr])
}
// nameInInjector reports whether name collides with any other identifier
// in the current injector.
func (ig *injectorGen) nameInInjector(name string) bool {
if name == ig.errVar {
return true
}
for _, a := range ig.paramNames {
if a == name {
return true
}
}
for _, l := range ig.localNames {
if l == name {
return true
}
}
for _, l := range ig.cleanupNames {
if l == name {
return true
}
}
return ig.g.nameInFileScope(name)
}
func (ig *injectorGen) p(format string, args ...interface{}) {
if ig.discard {
return
}
ig.g.p(format, args...)
}
func (ig *injectorGen) writeAST(info *types.Info, node ast.Node) {
node = ig.g.rewritePkgRefs(info, node)
if ig.discard {
return
}
if err := printer.Fprint(&ig.g.buf, ig.g.prog.Fset, node); err != nil {
panic(err)
}
}
// zeroValue returns the shortest expression that evaluates to the zero
// value for the given type.
func zeroValue(t types.Type, qf types.Qualifier) string {
switch u := t.Underlying().(type) {
case *types.Array, *types.Struct:
return types.TypeString(t, qf) + "{}"
case *types.Basic:
info := u.Info()
switch {
case info&types.IsBoolean != 0:
return "false"
case info&(types.IsInteger|types.IsFloat|types.IsComplex) != 0:
return "0"
case info&types.IsString != 0:
return `""`
default:
panic("unreachable")
}
case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice:
return "nil"
default:
panic("unreachable")
}
}
// typeVariableName invents a variable name derived from the type name
// or returns the empty string if one could not be found. There are no
// guarantees about whether the name is exported or unexported: call
// export() or unexport() to convert.
func typeVariableName(t types.Type) string {
if p, ok := t.(*types.Pointer); ok {
t = p.Elem()
}
switch t := t.(type) {
case *types.Basic:
return t.Name()
case *types.Named:
// TODO(light): Include package name when appropriate.
return t.Obj().Name()
}
return ""
}
// unexport converts a name that is potentially exported to an unexported name.
func unexport(name string) string {
if name == "" {
return ""
}
r, sz := utf8.DecodeRuneInString(name)
if !unicode.IsUpper(r) {
// foo -> foo
return name
}
r2, sz2 := utf8.DecodeRuneInString(name[sz:])
if !unicode.IsUpper(r2) {
// Foo -> foo
return string(unicode.ToLower(r)) + name[sz:]
}
// UPPERWord -> upperWord
sbuf := new(strings.Builder)
sbuf.WriteRune(unicode.ToLower(r))
i := sz
r, sz = r2, sz2
for unicode.IsUpper(r) && sz > 0 {
r2, sz2 := utf8.DecodeRuneInString(name[i+sz:])
if sz2 > 0 && unicode.IsLower(r2) {
break
}
i += sz
sbuf.WriteRune(unicode.ToLower(r))
r, sz = r2, sz2
}
sbuf.WriteString(name[i:])
return sbuf.String()
}
// export converts a name that is potentially unexported to an exported name.
func export(name string) string {
if name == "" {
return ""
}
r, sz := utf8.DecodeRuneInString(name)
if unicode.IsUpper(r) {
// Foo -> Foo
return name
}
// fooBar -> FooBar
sbuf := new(strings.Builder)
sbuf.WriteRune(unicode.ToUpper(r))
sbuf.WriteString(name[sz:])
return sbuf.String()
}
// disambiguate picks a unique name, preferring name if it is already unique.
func disambiguate(name string, collides func(string) bool) string {
if !collides(name) {
return name
}
buf := []byte(name)
if len(buf) > 0 && buf[len(buf)-1] >= '0' && buf[len(buf)-1] <= '9' {
buf = append(buf, '_')
}
base := len(buf)
for n := 2; ; n++ {
buf = strconv.AppendInt(buf[:base], int64(n), 10)
sbuf := string(buf)
if !collides(sbuf) {
return sbuf
}
}
}
// accessibleFrom reports whether node can be copied to wantPkg without
// violating Go visibility rules.
func accessibleFrom(info *types.Info, node ast.Node, wantPkg string) error {
var unexportError error
ast.Inspect(node, func(node ast.Node) bool {
if unexportError != nil {
return false
}
ident, ok := node.(*ast.Ident)
if !ok {
return true
}
obj := info.ObjectOf(ident)
if _, ok := obj.(*types.PkgName); ok {
// Local package names are fine, since we can just reimport them.
return true
}
if pkg := obj.Pkg(); pkg != nil && !ast.IsExported(ident.Name) && pkg.Path() != wantPkg {
unexportError = fmt.Errorf("uses unexported identifier %s", obj.Name())
return false
}
return true
})
return unexportError
}
var (
errorType = types.Universe.Lookup("error").Type()
cleanupType = types.NewSignature(nil, nil, nil, false)
)
| 1 | 11,031 | While this is commonly correct, the more reliable check is to get the package's identifier from the type checker and check those for equality. The information should be there, just another matter of plumbing it. | google-go-cloud | go |
@@ -168,6 +168,9 @@ def to_full_resource_name(full_parent_name, resource_type_name):
Returns:
str: full_resource_name of the child
"""
+ # Strip out the fake composite root parent from the full resource name.
+ if full_parent_name == 'composite_root/root/':
+ return '{}/'.format(resource_type_name)
return '{}{}/'.format(full_parent_name, resource_type_name)
| 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti Server utilities."""
from itertools import izip
import logging
# pylint: disable=protected-access
def autoclose_stream(f):
"""Decorator to close gRPC stream.
Args:
f (func): The function to decorate
Returns:
wrapper: wrapper of the decorator
"""
def wrapper(*args):
"""Wrapper function, checks context state to close stream.
Args:
*args (list): All arguments provided to the wrapped function.
Yields:
object: Whatever the wrapped function yields to the stream.
"""
def closed(context):
"""Returns true iff the connection is closed.
Args:
context (object): the connection to check
Returns:
bool: whether the connection is closed
"""
return context._state.client == 'closed'
context = args[-1]
for result in f(*args):
yield result
if closed(context):
return
return wrapper
def logcall(f, level=logging.CRITICAL):
"""Call logging decorator.
Args:
f (func): The function to decorate
level (str): the level of logging
Returns:
wrapper: wrapper of the decorator
"""
def wrapper(*args, **kwargs):
"""Implements the log wrapper including parameters and result.
Args:
*args: All args provided to the wrapped function.
**kwargs: All kwargs provided to the wrapped function.
Returns:
object: the f execution result
"""
logging.log(level, 'enter %s(%s)', f.__name__, args)
result = f(*args, **kwargs)
logging.log(level, 'exit %s(%s) -> %s', f.__name__, args, result)
return result
return wrapper
def mutual_exclusive(lock):
""" Mutex decorator.
Args:
lock (object): The lock to lock out exclusive method
Returns:
object: decorator generator
"""
def wrap(f):
"""Decorator generator.
Args:
f (func): the function to decorate
Returns:
func: the decorated function
"""
def func(*args, **kw):
"""Decorated functionality, mutexing wrapped function.
Args:
*args: All args provided to the wrapped function
**kw: All kw provided to the wrapped function
Returns:
object: the execution results of f
"""
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return func
return wrap
def oneof(*args):
"""Returns true iff one of the parameters is true.
Args:
*args: arguments to check
Returns:
bool: true iff one of the parameters is true.
"""
return len([x for x in args if x]) == 1
def full_to_type_name(full_resource_name):
"""Creates a type/name format from full resource name.
Args:
full_resource_name (str): the full_resource_name of the resource
Returns:
str: type_name of that resource
"""
return '/'.join(full_resource_name.split('/')[-2:])
def to_full_resource_name(full_parent_name, resource_type_name):
"""Creates a full resource name by parent full name and type name.
Args:
full_parent_name (str): the full_resource_name of the parent
resource_type_name (str): the full_resource_name of the child
Returns:
str: full_resource_name of the child
"""
return '{}{}/'.format(full_parent_name, resource_type_name)
def to_type_name(resource_type, resource_name):
"""Creates a type/name from type and name.
Args:
resource_type (str): the resource type
resource_name (str): the resource name
Returns:
str: type_name of the resource
"""
return '{}/{}'.format(resource_type, resource_name)
def split_type_name(resource_type_name):
"""Split the type name of the resource
Args:
resource_type_name (str): the type_name of the resource
Returns:
tuples: type and name of the resource
"""
return resource_type_name.split('/')
def resource_to_type_name(resource):
"""Creates a type/name format from a resource dbo.
Args:
resource (object): the resource to get the the type_name
Returns:
str: type_name of the resource
"""
return resource.type_name
def get_sql_dialect(session):
"""Return the active SqlAlchemy dialect.
Args:
session (object): the session to check for SqlAlchemy dialect
Returns:
str: name of the SqlAlchemy dialect
"""
return session.bind.dialect.name
def get_resources_from_full_name(full_name):
"""Parse resource info from full name.
Args:
full_name (str): Full name of the resource in hierarchical format.
Example of a full_name:
organization/88888/project/myproject/firewall/99999/
full_name has a trailing / that needs to be removed.
Yields:
iterator: strings of resource_type and resource_id
"""
full_name_parts = full_name.split('/')[:-1]
full_name_parts.reverse()
resource_iter = iter(full_name_parts)
for resource_id, resource_type in izip(resource_iter, resource_iter):
yield resource_type, resource_id
def get_resource_id_from_type_name(type_name):
"""Returns the key from type_name.
Args:
type_name (str): Type name.
Returns:
str: Resource id.
"""
if '/' in type_name:
return type_name.split('/')[-1]
return type_name
| 1 | 33,527 | Just to clarify, should this be 'composite_root/root/' or 'composite_root/'? | forseti-security-forseti-security | py |
@@ -288,5 +288,6 @@ func (psb *PubSubBase) DeletePubSub(ctx context.Context, pubsubable duck.PubSuba
return fmt.Errorf("failed to delete PullSubscription: %w", err)
}
status.SinkURI = nil
+ status.SubscriptionID = ""
return nil
} | 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package intevents
import (
"context"
"fmt"
"github.com/google/knative-gcp/pkg/testing/testloggingutil"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/logging"
pkgreconciler "knative.dev/pkg/reconciler"
duckv1 "github.com/google/knative-gcp/pkg/apis/duck/v1"
inteventsv1 "github.com/google/knative-gcp/pkg/apis/intevents/v1"
clientset "github.com/google/knative-gcp/pkg/client/clientset/versioned"
duck "github.com/google/knative-gcp/pkg/duck/v1"
"github.com/google/knative-gcp/pkg/reconciler"
"github.com/google/knative-gcp/pkg/reconciler/intevents/resources"
)
const (
nilPubsubableReason = "NilPubsubable"
pullSubscriptionGetFailedReason = "PullSubscriptionGetFailed"
pullSubscriptionCreateFailedReason = "PullSubscriptionCreateFailed"
PullSubscriptionStatusPropagateFailedReason = "PullSubscriptionStatusPropagateFailed"
)
var falseVal = false
type PubSubBase struct {
*reconciler.Base
// For dealing with Topics and Pullsubscriptions
pubsubClient clientset.Interface
// What do we tag receive adapter as.
receiveAdapterName string
// What type of receive adapter to use.
receiveAdapterType string
}
// ReconcilePubSub reconciles Topic / PullSubscription given a PubSubSpec.
// Sets the following Conditions in the Status field appropriately:
// "TopicReady", and "PullSubscriptionReady"
// Also sets the following fields in the pubsubable.Status upon success
// TopicID, ProjectID, and SinkURI
func (psb *PubSubBase) ReconcilePubSub(ctx context.Context, pubsubable duck.PubSubable, topic, resourceGroup string) (*inteventsv1.Topic, *inteventsv1.PullSubscription, error) {
t, err := psb.reconcileTopic(ctx, pubsubable, topic)
if err != nil {
return t, nil, err
}
ps, err := psb.ReconcilePullSubscription(ctx, pubsubable, topic, resourceGroup)
if err != nil {
return t, ps, err
}
return t, ps, nil
}
func (psb *PubSubBase) reconcileTopic(ctx context.Context, pubsubable duck.PubSubable, topic string) (*inteventsv1.Topic, pkgreconciler.Event) {
if pubsubable == nil {
return nil, fmt.Errorf("nil pubsubable passed in")
}
name := pubsubable.GetObjectMeta().GetName()
args := &resources.TopicArgs{
Namespace: pubsubable.GetObjectMeta().GetNamespace(),
Name: name,
Spec: pubsubable.PubSubSpec(),
EnablePublisher: &falseVal,
Owner: pubsubable,
Topic: topic,
Labels: resources.GetLabels(psb.receiveAdapterName, name),
Annotations: pubsubable.GetObjectMeta().GetAnnotations(),
}
newTopic := resources.MakeTopic(args)
topics := psb.pubsubClient.InternalV1().Topics(newTopic.Namespace)
t, err := topics.Get(ctx, newTopic.Name, v1.GetOptions{})
if apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Debug("Creating Topic", zap.Any("topic", newTopic))
t, err = topics.Create(ctx, newTopic, metav1.CreateOptions{})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Topic", zap.Any("topic", newTopic), zap.Error(err))
return nil, fmt.Errorf("failed to create Topic: %w", err)
}
} else if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to get Topic", zap.Error(err))
return nil, fmt.Errorf("failed to get Topic: %w", err)
// Check whether the specs differ and update the Topic if so.
} else if !equality.Semantic.DeepDerivative(newTopic.Spec, t.Spec) {
// Don't modify the informers copy.
desired := t.DeepCopy()
desired.Spec = newTopic.Spec
logging.FromContext(ctx).Desugar().Debug("Updating Topic", zap.Any("topic", desired))
t, err = topics.Update(ctx, desired, v1.UpdateOptions{})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to update Topic", zap.Any("topic", t), zap.Error(err))
return nil, fmt.Errorf("failed to update Topic: %w", err)
}
}
status := pubsubable.PubSubStatus()
cs := pubsubable.ConditionSet()
if err := propagateTopicStatus(t, status, cs, topic); err != nil {
return t, err
}
return t, nil
}
func (psb *PubSubBase) ReconcilePullSubscription(ctx context.Context, pubsubable duck.PubSubable, topic, resourceGroup string) (*inteventsv1.PullSubscription, pkgreconciler.Event) {
if pubsubable == nil {
logging.FromContext(ctx).Desugar().Error("Nil pubsubable passed in")
return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, nilPubsubableReason, "nil pubsubable passed in")
}
namespace := pubsubable.GetObjectMeta().GetNamespace()
name := pubsubable.GetObjectMeta().GetName()
annotations := pubsubable.GetObjectMeta().GetAnnotations()
spec := pubsubable.PubSubSpec()
status := pubsubable.PubSubStatus()
cs := pubsubable.ConditionSet()
args := &resources.PullSubscriptionArgs{
Namespace: namespace,
Name: name,
Spec: spec,
Owner: pubsubable,
Topic: topic,
AdapterType: psb.receiveAdapterType,
Labels: resources.GetLabels(psb.receiveAdapterName, name),
Annotations: resources.GetAnnotations(annotations, resourceGroup),
}
if v, present := pubsubable.GetObjectMeta().GetAnnotations()[testloggingutil.LoggingE2ETestAnnotation]; present {
// This is added purely for the TestCloudLogging E2E tests, which verify that the log line
// is written if this annotation is present.
args.Annotations[testloggingutil.LoggingE2ETestAnnotation] = v
}
newPS := resources.MakePullSubscription(args)
pullSubscriptions := psb.pubsubClient.InternalV1().PullSubscriptions(namespace)
ps, err := pullSubscriptions.Get(ctx, name, v1.GetOptions{})
if err != nil {
if !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to get PullSubscription", zap.Error(err))
return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, pullSubscriptionGetFailedReason, "Getting PullSubscription failed with: %s", err.Error())
}
logging.FromContext(ctx).Desugar().Debug("Creating PullSubscription", zap.Any("ps", newPS))
ps, err = pullSubscriptions.Create(ctx, newPS, v1.CreateOptions{})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create PullSubscription", zap.Any("ps", newPS), zap.Error(err))
return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, pullSubscriptionCreateFailedReason, "Creating PullSubscription failed with: %s", err.Error())
}
// Check whether the specs differ and update the PS if so.
} else if !equality.Semantic.DeepDerivative(newPS.Spec, ps.Spec) {
// Don't modify the informers copy.
desired := ps.DeepCopy()
desired.Spec = newPS.Spec
logging.FromContext(ctx).Desugar().Debug("Updating PullSubscription", zap.Any("ps", desired))
ps, err = pullSubscriptions.Update(ctx, desired, v1.UpdateOptions{})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to update PullSubscription", zap.Any("ps", ps), zap.Error(err))
return nil, err
}
}
if err := propagatePullSubscriptionStatus(ps, status, cs); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to propagate PullSubscription status: %s", zap.Error(err))
return ps, pkgreconciler.NewEvent(corev1.EventTypeWarning, PullSubscriptionStatusPropagateFailedReason, "Failed to propagate PullSubscription status: %s", err.Error())
}
status.SubscriptionID = ps.Status.SubscriptionID
status.SinkURI = ps.Status.SinkURI
return ps, nil
}
func propagatePullSubscriptionStatus(ps *inteventsv1.PullSubscription, status *duckv1.PubSubStatus, cs *apis.ConditionSet) error {
pc := ps.Status.GetTopLevelCondition()
if pc == nil {
status.MarkPullSubscriptionNotConfigured(cs)
return fmt.Errorf("PullSubscription %q has not yet been reconciled", ps.Name)
}
switch {
case pc.Status == corev1.ConditionUnknown:
status.MarkPullSubscriptionUnknown(cs, pc.Reason, pc.Message)
return fmt.Errorf("the status of PullSubscription %q is Unknown", ps.Name)
case pc.Status == corev1.ConditionTrue:
status.MarkPullSubscriptionReady(cs)
case pc.Status == corev1.ConditionFalse:
status.MarkPullSubscriptionFailed(cs, pc.Reason, pc.Message)
return fmt.Errorf("the status of PullSubscription %q is False", ps.Name)
default:
status.MarkPullSubscriptionUnknown(cs, "PullSubscriptionUnknown", "The status of PullSubscription is invalid: %v", pc.Status)
return fmt.Errorf("the status of PullSubscription %q is invalid: %v", ps.Name, pc.Status)
}
return nil
}
func propagateTopicStatus(t *inteventsv1.Topic, status *duckv1.PubSubStatus, cs *apis.ConditionSet, topic string) error {
tc := t.Status.GetTopLevelCondition()
if tc == nil {
status.MarkTopicNotConfigured(cs)
return fmt.Errorf("Topic %q has not yet been reconciled", t.Name)
}
switch {
case tc.Status == corev1.ConditionUnknown:
status.MarkTopicUnknown(cs, tc.Reason, tc.Message)
return fmt.Errorf("the status of Topic %q is Unknown", t.Name)
case tc.Status == corev1.ConditionTrue:
// When the status of Topic is ConditionTrue, break here since we also need to check the ProjectID and TopicID before we make the Topic to be Ready.
break
case tc.Status == corev1.ConditionFalse:
status.MarkTopicFailed(cs, tc.Reason, tc.Message)
return fmt.Errorf("the status of Topic %q is False", t.Name)
default:
status.MarkTopicUnknown(cs, "TopicUnknown", "The status of Topic is invalid: %v", tc.Status)
return fmt.Errorf("the status of Topic %q is invalid: %v", t.Name, tc.Status)
}
if t.Status.ProjectID == "" {
status.MarkTopicFailed(cs, "TopicNotReady", "Topic %q did not expose projectid", t.Name)
return fmt.Errorf("Topic %q did not expose projectid", t.Name)
}
if t.Status.TopicID == "" {
status.MarkTopicFailed(cs, "TopicNotReady", "Topic %q did not expose topicid", t.Name)
return fmt.Errorf("Topic %q did not expose topicid", t.Name)
}
if t.Status.TopicID != topic {
status.MarkTopicFailed(cs, "TopicNotReady", "Topic %q mismatch: expected %q got %q", t.Name, topic, t.Status.TopicID)
return fmt.Errorf("Topic %q mismatch: expected %q got %q", t.Name, topic, t.Status.TopicID)
}
status.TopicID = t.Status.TopicID
status.ProjectID = t.Status.ProjectID
status.MarkTopicReady(cs)
return nil
}
func (psb *PubSubBase) DeletePubSub(ctx context.Context, pubsubable duck.PubSubable) error {
if pubsubable == nil {
return fmt.Errorf("nil pubsubable passed in")
}
namespace := pubsubable.GetObjectMeta().GetNamespace()
name := pubsubable.GetObjectMeta().GetName()
status := pubsubable.PubSubStatus()
cs := pubsubable.ConditionSet()
// Delete the topic
err := psb.pubsubClient.InternalV1().Topics(namespace).Delete(ctx, name, v1.DeleteOptions{})
if err != nil && !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to delete Topic", zap.String("name", name), zap.Error(err))
status.MarkTopicUnknown(cs, "TopicDeleteFailed", "Failed to delete Topic: %s", err.Error())
return fmt.Errorf("failed to delete topic: %w", err)
}
status.TopicID = ""
status.ProjectID = ""
// Delete the pullsubscription
err = psb.pubsubClient.InternalV1().PullSubscriptions(namespace).Delete(ctx, name, v1.DeleteOptions{})
if err != nil && !apierrs.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Failed to delete PullSubscription", zap.String("name", name), zap.Error(err))
status.MarkPullSubscriptionUnknown(cs, "PullSubscriptionDeleteFailed", "Failed to delete PullSubscription: %s", err.Error())
return fmt.Errorf("failed to delete PullSubscription: %w", err)
}
status.SinkURI = nil
return nil
}
| 1 | 20,108 | Any reason not to move it to after line 281? These are all pubsub related fields IIUC. | google-knative-gcp | go |
@@ -3,7 +3,7 @@ import { both, gt } from 'ramda';
import isNumber from './isNumber';
/**
- * Checks if value is a negative `Number` primitive or object.
+ * Checks if value is a negative `Number` primitive or object. Zero is not considered negative.
*
* @func isNegative
* @memberOf RA | 1 | import { both, gt } from 'ramda';
import isNumber from './isNumber';
/**
* Checks if value is a negative `Number` primitive or object.
*
* @func isNegative
* @memberOf RA
* @since {@link https://char0n.github.io/ramda-adjunct/1.15.0|v1.15.0}
* @category Type
* @sig * -> Boolean
* @param {*} val The value to test
* @return {boolean}
* @see {@link RA.isPositive|isPositive}
* @example
*
* RA.isNegative(-1); // => true
* RA.isNegative(Number.MIN_VALUE); // => false
* RA.isNegative(+Infinity); // => false
* RA.isNegative(NaN); // => false
* RA.isNegative('5'); // => false
*/
const isNegative = both(isNumber, gt(0));
export default isNegative;
| 1 | 5,529 | Checks if value is a negative `Number` primitive or object. Zero is considered neither positive nor negative. | char0n-ramda-adjunct | js |
@@ -285,6 +285,11 @@ func (h Handler) buildEnv(r *http.Request, rule Rule, fpath string) (map[string]
// Retrieve name of remote user that was set by some downstream middleware such as basicauth.
remoteUser, _ := r.Context().Value(httpserver.RemoteUserCtxKey).(string)
+ requestScheme := "http"
+ if r.TLS != nil {
+ requestScheme = "https"
+ }
+
// Some variables are unused but cleared explicitly to prevent
// the parent environment from interfering.
env = map[string]string{ | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fastcgi has middleware that acts as a FastCGI client. Requests
// that get forwarded to FastCGI stop the middleware execution chain.
// The most common use for this package is to serve PHP websites via php-fpm.
package fastcgi
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"crypto/tls"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyhttp/httpserver"
"github.com/mholt/caddy/caddytls"
)
// Handler is a middleware type that can handle requests as a FastCGI client.
type Handler struct {
Next httpserver.Handler
Rules []Rule
Root string
FileSys http.FileSystem
// These are sent to CGI scripts in env variables
SoftwareName string
SoftwareVersion string
ServerName string
ServerPort string
}
// ServeHTTP satisfies the httpserver.Handler interface.
func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
for _, rule := range h.Rules {
// First requirement: Base path must match request path. If it doesn't,
// we check to make sure the leading slash is not missing, and if so,
// we check again with it prepended. This is in case people forget
// a leading slash when performing rewrites, and we don't want to expose
// the contents of the (likely PHP) script. See issue #1645.
hpath := httpserver.Path(r.URL.Path)
if !hpath.Matches(rule.Path) {
if strings.HasPrefix(string(hpath), "/") {
// this is a normal-looking path, and it doesn't match; try next rule
continue
}
hpath = httpserver.Path("/" + string(hpath)) // prepend leading slash
if !hpath.Matches(rule.Path) {
// even after fixing the request path, it still doesn't match; try next rule
continue
}
}
// The path must also be allowed (not ignored).
if !rule.AllowedPath(r.URL.Path) {
continue
}
// In addition to matching the path, a request must meet some
// other criteria before being proxied as FastCGI. For example,
// we probably want to exclude static assets (CSS, JS, images...)
// but we also want to be flexible for the script we proxy to.
fpath := r.URL.Path
if idx, ok := httpserver.IndexFile(h.FileSys, fpath, rule.IndexFiles); ok {
fpath = idx
// Index file present.
// If request path cannot be split, return error.
if !rule.canSplit(fpath) {
return http.StatusInternalServerError, ErrIndexMissingSplit
}
} else {
// No index file present.
// If request path cannot be split, ignore request.
if !rule.canSplit(fpath) {
continue
}
}
// These criteria work well in this order for PHP sites
if !h.exists(fpath) || fpath[len(fpath)-1] == '/' || strings.HasSuffix(fpath, rule.Ext) {
// Create environment for CGI script
env, err := h.buildEnv(r, rule, fpath)
if err != nil {
return http.StatusInternalServerError, err
}
// Connect to FastCGI gateway
address, err := rule.Address()
if err != nil {
return http.StatusBadGateway, err
}
network, address := parseAddress(address)
ctx := context.Background()
if rule.ConnectTimeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, rule.ConnectTimeout)
defer cancel()
}
fcgiBackend, err := DialContext(ctx, network, address)
if err != nil {
return http.StatusBadGateway, err
}
defer fcgiBackend.Close()
// read/write timeouts
if err := fcgiBackend.SetReadTimeout(rule.ReadTimeout); err != nil {
return http.StatusInternalServerError, err
}
if err := fcgiBackend.SetSendTimeout(rule.SendTimeout); err != nil {
return http.StatusInternalServerError, err
}
var resp *http.Response
var contentLength int64
// if ContentLength is already set
if r.ContentLength > 0 {
contentLength = r.ContentLength
} else {
contentLength, _ = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
}
switch r.Method {
case "HEAD":
resp, err = fcgiBackend.Head(env)
case "GET":
resp, err = fcgiBackend.Get(env, r.Body, contentLength)
case "OPTIONS":
resp, err = fcgiBackend.Options(env)
default:
resp, err = fcgiBackend.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength)
}
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
}
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
return http.StatusGatewayTimeout, err
} else if err != io.EOF {
return http.StatusBadGateway, err
}
}
// Write response header
writeHeader(w, resp)
// Write the response body
_, err = io.Copy(w, resp.Body)
if err != nil {
return http.StatusBadGateway, err
}
// Log any stderr output from upstream
if fcgiBackend.stderr.Len() != 0 {
// Remove trailing newline, error logger already does this.
err = LogError(strings.TrimSuffix(fcgiBackend.stderr.String(), "\n"))
}
// Normally we would return the status code if it is an error status (>= 400),
// however, upstream FastCGI apps don't know about our contract and have
// probably already written an error page. So we just return 0, indicating
// that the response body is already written. However, we do return any
// error value so it can be logged.
// Note that the proxy middleware works the same way, returning status=0.
return 0, err
}
}
return h.Next.ServeHTTP(w, r)
}
// parseAddress returns the network and address of fcgiAddress.
// The first string is the network, "tcp" or "unix", implied from the scheme and address.
// The second string is fcgiAddress, with scheme prefixes removed.
// The two returned strings can be used as parameters to the Dial() function.
func parseAddress(fcgiAddress string) (string, string) {
// check if address has tcp scheme explicitly set
if strings.HasPrefix(fcgiAddress, "tcp://") {
return "tcp", fcgiAddress[len("tcp://"):]
}
// check if address has fastcgi scheme explicitly set
if strings.HasPrefix(fcgiAddress, "fastcgi://") {
return "tcp", fcgiAddress[len("fastcgi://"):]
}
// check if unix socket
if trim := strings.HasPrefix(fcgiAddress, "unix"); strings.HasPrefix(fcgiAddress, "/") || trim {
if trim {
return "unix", fcgiAddress[len("unix:"):]
}
return "unix", fcgiAddress
}
// default case, a plain tcp address with no scheme
return "tcp", fcgiAddress
}
func writeHeader(w http.ResponseWriter, r *http.Response) {
for key, vals := range r.Header {
for _, val := range vals {
w.Header().Add(key, val)
}
}
w.WriteHeader(r.StatusCode)
}
func (h Handler) exists(path string) bool {
if _, err := os.Stat(h.Root + path); err == nil {
return true
}
return false
}
// buildEnv returns a set of CGI environment variables for the request.
func (h Handler) buildEnv(r *http.Request, rule Rule, fpath string) (map[string]string, error) {
var env map[string]string
// Separate remote IP and port; more lenient than net.SplitHostPort
var ip, port string
if idx := strings.LastIndex(r.RemoteAddr, ":"); idx > -1 {
ip = r.RemoteAddr[:idx]
port = r.RemoteAddr[idx+1:]
} else {
ip = r.RemoteAddr
}
// Remove [] from IPv6 addresses
ip = strings.Replace(ip, "[", "", 1)
ip = strings.Replace(ip, "]", "", 1)
// Split path in preparation for env variables.
// Previous rule.canSplit checks ensure this can never be -1.
splitPos := rule.splitPos(fpath)
// Request has the extension; path was split successfully
docURI := fpath[:splitPos+len(rule.SplitPath)]
pathInfo := fpath[splitPos+len(rule.SplitPath):]
scriptName := fpath
// Strip PATH_INFO from SCRIPT_NAME
scriptName = strings.TrimSuffix(scriptName, pathInfo)
// SCRIPT_FILENAME is the absolute path of SCRIPT_NAME
scriptFilename := filepath.Join(rule.Root, scriptName)
// Add vhost path prefix to scriptName. Otherwise, some PHP software will
// have difficulty discovering its URL.
pathPrefix, _ := r.Context().Value(caddy.CtxKey("path_prefix")).(string)
scriptName = path.Join(pathPrefix, scriptName)
// Get the request URI from context. The context stores the original URI in case
// it was changed by a middleware such as rewrite. By default, we pass the
// original URI in as the value of REQUEST_URI (the user can overwrite this
// if desired). Most PHP apps seem to want the original URI. Besides, this is
// how nginx defaults: http://stackoverflow.com/a/12485156/1048862
reqURL, _ := r.Context().Value(httpserver.OriginalURLCtxKey).(url.URL)
// Retrieve name of remote user that was set by some downstream middleware such as basicauth.
remoteUser, _ := r.Context().Value(httpserver.RemoteUserCtxKey).(string)
// Some variables are unused but cleared explicitly to prevent
// the parent environment from interfering.
env = map[string]string{
// Variables defined in CGI 1.1 spec
"AUTH_TYPE": "", // Not used
"CONTENT_LENGTH": r.Header.Get("Content-Length"),
"CONTENT_TYPE": r.Header.Get("Content-Type"),
"GATEWAY_INTERFACE": "CGI/1.1",
"PATH_INFO": pathInfo,
"QUERY_STRING": r.URL.RawQuery,
"REMOTE_ADDR": ip,
"REMOTE_HOST": ip, // For speed, remote host lookups disabled
"REMOTE_PORT": port,
"REMOTE_IDENT": "", // Not used
"REMOTE_USER": remoteUser,
"REQUEST_METHOD": r.Method,
"SERVER_NAME": h.ServerName,
"SERVER_PORT": h.ServerPort,
"SERVER_PROTOCOL": r.Proto,
"SERVER_SOFTWARE": h.SoftwareName + "/" + h.SoftwareVersion,
// Other variables
"DOCUMENT_ROOT": rule.Root,
"DOCUMENT_URI": docURI,
"HTTP_HOST": r.Host, // added here, since not always part of headers
"REQUEST_URI": reqURL.RequestURI(),
"SCRIPT_FILENAME": scriptFilename,
"SCRIPT_NAME": scriptName,
}
// compliance with the CGI specification requires that
// PATH_TRANSLATED should only exist if PATH_INFO is defined.
// Info: https://www.ietf.org/rfc/rfc3875 Page 14
if env["PATH_INFO"] != "" {
env["PATH_TRANSLATED"] = filepath.Join(rule.Root, pathInfo) // Info: http://www.oreilly.com/openbook/cgi/ch02_04.html
}
// Some web apps rely on knowing HTTPS or not
if r.TLS != nil {
env["HTTPS"] = "on"
// and pass the protocol details in a manner compatible with apache's mod_ssl
// (which is why they have a SSL_ prefix and not TLS_).
v, ok := tlsProtocolStringToMap[r.TLS.Version]
if ok {
env["SSL_PROTOCOL"] = v
}
// and pass the cipher suite in a manner compatible with apache's mod_ssl
for k, v := range caddytls.SupportedCiphersMap {
if v == r.TLS.CipherSuite {
env["SSL_CIPHER"] = k
break
}
}
}
// Add env variables from config (with support for placeholders in values)
replacer := httpserver.NewReplacer(r, nil, "")
for _, envVar := range rule.EnvVars {
env[envVar[0]] = replacer.Replace(envVar[1])
}
// Add all HTTP headers to env variables
for field, val := range r.Header {
header := strings.ToUpper(field)
header = headerNameReplacer.Replace(header)
env["HTTP_"+header] = strings.Join(val, ", ")
}
return env, nil
}
// Rule represents a FastCGI handling rule.
// It is parsed from the fastcgi directive in the Caddyfile, see setup.go.
type Rule struct {
// The base path to match. Required.
Path string
// upstream load balancer
balancer
// Always process files with this extension with fastcgi.
Ext string
// Use this directory as the fastcgi root directory. Defaults to the root
// directory of the parent virtual host.
Root string
// The path in the URL will be split into two, with the first piece ending
// with the value of SplitPath. The first piece will be assumed as the
// actual resource (CGI script) name, and the second piece will be set to
// PATH_INFO for the CGI script to use.
SplitPath string
// If the URL ends with '/' (which indicates a directory), these index
// files will be tried instead.
IndexFiles []string
// Environment Variables
EnvVars [][2]string
// Ignored paths
IgnoredSubPaths []string
// The duration used to set a deadline when connecting to an upstream.
ConnectTimeout time.Duration
// The duration used to set a deadline when reading from the FastCGI server.
ReadTimeout time.Duration
// The duration used to set a deadline when sending to the FastCGI server.
SendTimeout time.Duration
}
// balancer is a fastcgi upstream load balancer.
type balancer interface {
// Address picks an upstream address from the
// underlying load balancer.
Address() (string, error)
}
// roundRobin is a round robin balancer for fastcgi upstreams.
type roundRobin struct {
// Known Go bug: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
// must be first field for 64 bit alignment
// on x86 and arm.
index int64
addresses []string
}
func (r *roundRobin) Address() (string, error) {
index := atomic.AddInt64(&r.index, 1) % int64(len(r.addresses))
return r.addresses[index], nil
}
// srvResolver is a private interface used to abstract
// the DNS resolver. It is mainly used to facilitate testing.
type srvResolver interface {
LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error)
}
// srv is a service locator for fastcgi upstreams
type srv struct {
resolver srvResolver
service string
}
// Address looks up the service and returns the address:port
// from first result in resolved list.
// No explicit balancing is required because net.LookupSRV
// sorts the results by priority and randomizes within priority.
func (s *srv) Address() (string, error) {
_, addrs, err := s.resolver.LookupSRV(context.Background(), "", "", s.service)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", strings.TrimRight(addrs[0].Target, "."), addrs[0].Port), nil
}
// canSplit checks if path can split into two based on rule.SplitPath.
func (r Rule) canSplit(path string) bool {
return r.splitPos(path) >= 0
}
// splitPos returns the index where path should be split
// based on rule.SplitPath.
func (r Rule) splitPos(path string) int {
if httpserver.CaseSensitivePath {
return strings.Index(path, r.SplitPath)
}
return strings.Index(strings.ToLower(path), strings.ToLower(r.SplitPath))
}
// AllowedPath checks if requestPath is not an ignored path.
func (r Rule) AllowedPath(requestPath string) bool {
for _, ignoredSubPath := range r.IgnoredSubPaths {
if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(r.Path, ignoredSubPath)) {
return false
}
}
return true
}
var (
headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_")
// ErrIndexMissingSplit describes an index configuration error.
ErrIndexMissingSplit = errors.New("configured index file(s) must include split value")
)
// LogError is a non fatal error that allows requests to go through.
type LogError string
// Error satisfies error interface.
func (l LogError) Error() string {
return string(l)
}
// Map of supported protocols to Apache ssl_mod format
// Note that these are slightly different from SupportedProtocols in caddytls/config.go's
var tlsProtocolStringToMap = map[uint16]string{
tls.VersionTLS10: "TLSv1",
tls.VersionTLS11: "TLSv1.1",
tls.VersionTLS12: "TLSv1.2",
}
| 1 | 12,316 | Just a nit, but remove the empty line here. :) | caddyserver-caddy | go |
@@ -385,6 +385,16 @@ func (p *Package) addFiles(cu *apb.CompilationUnit, root, base string, names []s
Corpus: p.ext.DefaultCorpus,
Path: trimmed,
}
+
+ if p.ext.Rules != nil {
+ v2, ok := p.ext.Rules.Apply(trimmed)
+ if ok {
+ vn.Corpus = v2.Corpus
+ vn.Root = v2.Root
+ vn.Path = v2.Path
+ }
+ }
+
if vn.Corpus == "" {
// If no default corpus is specified, use the package's corpus for each of
// its files. The package corpus is based on the rules in | 1 | /*
* Copyright 2015 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package golang produces Kythe compilation units for each Go import path
// specified. Compilations are extracted incrementally, so that partial
// results are available to the caller.
//
// Usage:
// var c golang.Extractor
// if _, err := c.Locate("fmt"); err != nil {
// log.Fatalf(`Unable to locate package "fmt": %v`, err)
// }
// c.Extract()
// for _, pkg := range c.Packages {
// if pkg.Err != nil {
// log.Printf("Error extracting %q: %v", pkg.Path, pkg.Err)
// } else {
// writeOutput(pkg)
// }
// }
//
package golang // import "kythe.io/kythe/go/extractors/golang"
import (
"context"
"fmt"
"go/build"
"log"
"os"
"path/filepath"
"strings"
"kythe.io/kythe/go/extractors/govname"
"kythe.io/kythe/go/platform/analysis"
"kythe.io/kythe/go/platform/kindex"
"kythe.io/kythe/go/platform/vfs"
"kythe.io/kythe/go/util/ptypes"
"bitbucket.org/creachadair/stringset"
apb "kythe.io/kythe/proto/analysis_go_proto"
gopb "kythe.io/kythe/proto/go_go_proto"
spb "kythe.io/kythe/proto/storage_go_proto"
)
var workingDir string
func init() {
if wd, err := os.Getwd(); err == nil {
workingDir = wd
}
}
// PackageVNameOptions re-exports govname.PackageVNameOptions.
type PackageVNameOptions = govname.PackageVNameOptions
// An Extractor contains the state needed to extract Go compilations from build
// information. The zero value is ready for use with default settings.
type Extractor struct {
// The build configuration to use for extraction.
BuildContext build.Context
// The packages that have been extracted so far (initially empty).
Packages []*Package
// The configuration for constructing VNames for packages.
PackageVNameOptions
// An alternative installation path for compiled packages. If this is set,
// and a compiled package cannot be found in the normal location, the
// extractor will try in this location.
AltInstallPath string
// Extra file paths to include in each compilation record.
ExtraFiles []string
// A function to convert a directory path to an import path. If nil, the
// path is made relative to the first matching element of the build
// context's GOROOT or GOPATH or the current working directory.
DirToImport func(path string) (string, error)
pmap map[string]*build.Package // Map of import path to build package
}
// addPackage imports the specified package, if it has not already been
// imported, and returns its package value.
func (e *Extractor) addPackage(importPath, localPath string) (*build.Package, error) {
if bp := e.pmap[importPath]; bp != nil {
return bp, nil
}
bp, err := e.BuildContext.Import(importPath, localPath, build.AllowBinary)
if err != nil {
return nil, err
}
e.mapPackage(importPath, bp)
return bp, nil
}
func (e *Extractor) mapPackage(importPath string, bp *build.Package) {
if e.pmap == nil {
e.pmap = map[string]*build.Package{importPath: bp}
} else {
e.pmap[importPath] = bp
}
}
// findPackages returns the first *Package value in Packages having the given
// import path, or nil if none is found.
func (e *Extractor) findPackage(importPath string) *Package {
for _, pkg := range e.Packages {
if pkg.Path == importPath {
return pkg
}
}
return nil
}
// vnameFor returns a vname for the specified package.
func (e *Extractor) vnameFor(bp *build.Package) *spb.VName {
v := govname.ForPackage(bp, &e.PackageVNameOptions)
v.Signature = "" // not useful in this context
return v
}
// dirToImport converts a directory name to an import path, if possible.
func (e *Extractor) dirToImport(dir string) (string, error) {
if conv := e.DirToImport; conv != nil {
return conv(dir)
}
for _, path := range e.BuildContext.SrcDirs() {
if rel, err := filepath.Rel(path, dir); err == nil {
return rel, nil
}
}
if rel, err := filepath.Rel(workingDir, dir); err == nil {
return rel, nil
}
return dir, nil
}
// Locate attempts to resolve and locate the specified import path in the build
// context. If a package has already been located, its existing *Package is
// returned. Otherwise, a new *Package value is returned and appended to the
// Packages field.
//
// Note: multiple packages may be resolved for "/..." import paths
func (e *Extractor) Locate(importPath string) ([]*Package, error) {
listedPackages, listErr := e.listPackages(importPath)
var pkgs []*Package
for _, pkg := range listedPackages {
if pkg.ForTest != "" || strings.HasSuffix(pkg.ImportPath, ".test") {
// ignore constructed test packages
continue
} else if pkg.Error != nil {
return nil, pkg.Error
}
importPath := pkg.ImportPath
p := e.findPackage(importPath)
if p == nil {
p = &Package{
ext: e,
Path: importPath,
DepOnly: pkg.DepOnly,
BuildPackage: pkg.buildPackage(),
}
e.Packages = append(e.Packages, p)
e.mapPackage(importPath, p.BuildPackage)
}
if !pkg.DepOnly {
pkgs = append(pkgs, p)
}
}
return pkgs, listErr
}
// ImportDir attempts to import the Go package located in the given directory.
// An import path is inferred from the directory path.
func (e *Extractor) ImportDir(dir string) (*Package, error) {
clean := filepath.Clean(dir)
importPath, err := e.dirToImport(clean)
if err != nil {
return nil, err
}
if pkg := e.findPackage(importPath); pkg != nil {
return pkg, nil
}
bp, err := e.BuildContext.ImportDir(clean, build.AllowBinary)
if err != nil {
return nil, err
}
bp.ImportPath = importPath
e.mapPackage(importPath, bp)
pkg := &Package{
ext: e,
Path: importPath,
BuildPackage: bp,
}
e.Packages = append(e.Packages, pkg)
return pkg, nil
}
// Extract invokes the Extract method of each package in the Packages list, and
// updates its Err field with the result. If there were errors in extraction,
// one of them is returned.
func (e *Extractor) Extract() error {
var err error
for _, pkg := range e.Packages {
if pkg.DepOnly {
continue
}
pkg.Err = pkg.Extract()
if pkg.Err != nil && err == nil {
err = pkg.Err
}
}
return err
}
// Package represents a single Go package extracted from local files.
type Package struct {
ext *Extractor // pointer back to the extractor that generated this package
seen stringset.Set // input files already added to this package
CorpusRoot string // Corpus package root path
Path string // Import or directory path
DepOnly bool // Whether the package is only seen as a dependency
Err error // Error discovered during processing
BuildPackage *build.Package // Package info from the go/build library
VName *spb.VName // The package's Kythe vname
Units []*apb.CompilationUnit // Compilations generated from Package
}
// Extract populates the Units field of p, and reports an error if any occurred.
//
// After this method returns successfully, the require inputs for each of the
// Units are partially resolved, meaning we know their filesystem paths but not
// their contents. The filesystem paths are resolved to contents and digests
// by the Store method.
func (p *Package) Extract() error {
p.VName = p.ext.vnameFor(p.BuildPackage)
if r, err := govname.RepoRoot(p.Path); err == nil {
p.CorpusRoot = r.Root
} else {
p.CorpusRoot = p.VName.GetCorpus()
}
cu := &apb.CompilationUnit{
VName: p.VName,
Argument: []string{"go", "build"},
}
bc := p.ext.BuildContext
if info, err := ptypes.MarshalAny(&gopb.GoDetails{
Gopath: bc.GOPATH,
Goos: bc.GOOS,
Goarch: bc.GOARCH,
Compiler: bc.Compiler,
BuildTags: bc.BuildTags,
CgoEnabled: bc.CgoEnabled,
}); err == nil {
cu.Details = append(cu.Details, info)
}
if govname.ImportPath(cu.VName, bc.GOROOT) != p.Path {
// Add GoPackageInfo if constructed VName differs from actual ImportPath.
if info, err := ptypes.MarshalAny(&gopb.GoPackageInfo{
ImportPath: p.Path,
}); err == nil {
cu.Details = append(cu.Details, info)
} else {
log.Printf("WARNING: failed to marshal GoPackageInfo for CompilationUnit: %v", err)
}
}
// Add required inputs from this package (source files of various kinds).
bp := p.BuildPackage
srcBase := bp.Dir
p.addSource(cu, bp.Root, srcBase, bp.GoFiles)
p.addFiles(cu, bp.Root, srcBase, bp.CgoFiles)
p.addFiles(cu, bp.Root, srcBase, bp.CFiles)
p.addFiles(cu, bp.Root, srcBase, bp.CXXFiles)
p.addFiles(cu, bp.Root, srcBase, bp.HFiles)
p.addSource(cu, bp.Root, srcBase, bp.TestGoFiles)
// Add extra inputs that may be specified by the extractor.
p.addFiles(cu, filepath.Dir(bp.SrcRoot), "", p.ext.ExtraFiles)
// TODO(fromberger): Treat tests that are not in the same package as a
// separate compilation, e.g.,
// p.addSource(cu, bp.Root, srcBase, bp.XTestGoFiles)
// missing = append(missing, p.addDeps(cu, bp.XTestImports, bp.Dir)...)
// Add the outputs of all the dependencies as required inputs.
//
// TODO(fromberger): Consider making a transitive option, to flatten out
// the source requirements for tools like the oracle.
missing := p.addDeps(cu, bp.Imports, bp.Dir)
missing = append(missing, p.addDeps(cu, bp.TestImports, bp.Dir)...)
// Add command-line arguments.
// TODO(fromberger): Figure out whether we should emit separate
// compilations for cgo actions.
p.addFlag(cu, "-compiler", bc.Compiler)
if t := bp.AllTags; len(t) > 0 {
p.addFlag(cu, "-tags", strings.Join(t, " "))
}
cu.Argument = append(cu.Argument, bp.ImportPath)
p.Units = append(p.Units, cu)
if len(missing) != 0 {
cu.HasCompileErrors = true
return &MissingError{p.Path, missing}
}
return nil
}
// mapFetcher implements analysis.Fetcher by dispatching to a preloaded map
// from digests to contents.
type mapFetcher map[string][]byte
// Fetch implements the analysis.Fetcher interface. The path argument is ignored.
func (m mapFetcher) Fetch(_, digest string) ([]byte, error) {
if data, ok := m[digest]; ok {
return data, nil
}
return nil, os.ErrNotExist
}
// EachUnit calls f with a compilation record for each unit in p. If f reports
// an error, that error is returned by EachUnit.
func (p *Package) EachUnit(ctx context.Context, f func(cu *apb.CompilationUnit, fetcher analysis.Fetcher) error) error {
fetcher := make(mapFetcher)
for _, cu := range p.Units {
// Ensure all the file contents are loaded, and update the digests.
for _, ri := range cu.RequiredInput {
if !strings.Contains(ri.Info.Digest, "/") {
continue // skip those that are already complete
}
rc, err := vfs.Open(ctx, ri.Info.Digest)
if err != nil {
return fmt.Errorf("opening input: %v", err)
}
fd, err := kindex.FileData(ri.Info.Path, rc)
rc.Close()
if err != nil {
return fmt.Errorf("reading input: %v", err)
}
fetcher[fd.Info.Digest] = fd.Content
ri.Info.Digest = fd.Info.Digest
}
if err := f(cu, fetcher); err != nil {
return err
}
}
return nil
}
// addFiles adds a required input to cu for each file whose basename or path is
// given in names. If base != "", it is prejoined to each name.
// The path of the input will have root/ trimmed from the beginning.
// The digest will be the complete path as written -- this will be replaced
// with the content digest in the fetcher.
func (p *Package) addFiles(cu *apb.CompilationUnit, root, base string, names []string) {
for _, name := range names {
path := name
if base != "" {
path = filepath.Join(base, name)
}
trimmed := strings.TrimPrefix(path, root+"/")
vn := &spb.VName{
Corpus: p.ext.DefaultCorpus,
Path: trimmed,
}
if vn.Corpus == "" {
// If no default corpus is specified, use the package's corpus for each of
// its files. The package corpus is based on the rules in
// kythe/go/extractors/govname and is usually the package's
// repository root (e.g. github.com/golang/protobuf).
vn.Corpus = p.VName.Corpus
components := strings.SplitN(vn.Path, string(filepath.Separator), 2)
vn.Path = strings.TrimPrefix(components[1], p.CorpusRoot+"/")
if components[0] != "src" {
vn.Root = components[0]
}
}
cu.RequiredInput = append(cu.RequiredInput, &apb.CompilationUnit_FileInput{
VName: vn,
Info: &apb.FileInfo{
Path: trimmed,
Digest: path, // provisional, until the file is loaded
},
})
}
}
// addSource acts as addFiles, and in addition marks each trimmed path as a
// source input for the compilation.
func (p *Package) addSource(cu *apb.CompilationUnit, root, base string, names []string) {
p.addFiles(cu, root, base, names)
for _, in := range cu.RequiredInput[len(cu.RequiredInput)-len(names):] {
cu.SourceFile = append(cu.SourceFile, in.Info.Path)
}
}
// addInput acts as addFiles for the output of a package.
func (p *Package) addInput(cu *apb.CompilationUnit, bp *build.Package) {
obj := bp.PkgObj
if !p.seen.Contains(obj) {
p.seen.Add(obj)
p.addFiles(cu, bp.Root, "", []string{obj})
// Populate the vname for the input based on the corpus of the package.
fi := cu.RequiredInput[len(cu.RequiredInput)-1]
fi.VName = p.ext.vnameFor(bp)
if govname.ImportPath(fi.VName, p.ext.BuildContext.GOROOT) != bp.ImportPath {
// Add GoPackageInfo if constructed VName differs from actual ImportPath.
if info, err := ptypes.MarshalAny(&gopb.GoPackageInfo{
ImportPath: bp.ImportPath,
}); err == nil {
fi.Details = append(fi.Details, info)
} else {
log.Printf("WARNING: failed to marshal GoPackageInfo for input: %v", err)
}
}
}
}
// addFlag adds a flag and its arguments to the command line, if len(values) != 0.
func (*Package) addFlag(cu *apb.CompilationUnit, name string, values ...string) {
if len(values) != 0 {
cu.Argument = append(cu.Argument, name)
cu.Argument = append(cu.Argument, values...)
}
}
// addDeps adds required inputs for the import paths given, returning the paths
// of any packages that could not be imported successfully.
func (p *Package) addDeps(cu *apb.CompilationUnit, importPaths []string, localPath string) []string {
var missing []string
for _, ip := range importPaths {
if ip == "unsafe" {
// package unsafe is intrinsic; nothing to do
} else if dep, err := p.ext.addPackage(ip, localPath); err != nil || dep.PkgObj == "" {
// Package was either literally missing or could not be built properly.
// Note: Locate could have added a dependency package that could not be
// built as part of its earlier analysis.
missing = append(missing, ip)
} else {
p.addInput(cu, dep)
}
}
return missing
}
// MissingError is the concrete type of errors about missing dependencies.
type MissingError struct {
Path string // The import path of the incomplete package
Missing []string // The import paths of the missing dependencies
}
func (m *MissingError) Error() string {
return fmt.Sprintf("package %q is missing %d imports (%s)",
m.Path, len(m.Missing), strings.Join(m.Missing, ", "))
}
| 1 | 11,506 | A `GoPackageInfo` details message should be added to the `CompilationUnit_FileInput` to ensure the import path remains the same. | kythe-kythe | go |
@@ -1,10 +1,10 @@
package com.fsck.k9.activity;
-import android.app.ListActivity;
import android.os.Bundle;
import android.view.KeyEvent;
import android.view.MotionEvent;
import android.widget.AdapterView;
+import android.widget.ListAdapter;
import android.widget.ListView;
import com.fsck.k9.K9; | 1 | package com.fsck.k9.activity;
import android.app.ListActivity;
import android.os.Bundle;
import android.view.KeyEvent;
import android.view.MotionEvent;
import android.widget.AdapterView;
import android.widget.ListView;
import com.fsck.k9.K9;
import com.fsck.k9.activity.K9ActivityCommon.K9ActivityMagic;
import com.fsck.k9.activity.misc.SwipeGestureDetector.OnSwipeGestureListener;
public abstract class K9ListActivity extends ListActivity implements K9ActivityMagic {
private K9ActivityCommon base;
@Override
public void onCreate(Bundle savedInstanceState) {
base = K9ActivityCommon.newInstance(this);
super.onCreate(savedInstanceState);
}
@Override
public boolean dispatchTouchEvent(MotionEvent event) {
base.preDispatchTouchEvent(event);
return super.dispatchTouchEvent(event);
}
@Override
public void onResume() {
super.onResume();
}
@Override
public void setupGestureDetector(OnSwipeGestureListener listener) {
base.setupGestureDetector(listener);
}
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
// Shortcuts that work no matter what is selected
if (K9.useVolumeKeysForListNavigationEnabled() &&
(keyCode == KeyEvent.KEYCODE_VOLUME_UP ||
keyCode == KeyEvent.KEYCODE_VOLUME_DOWN)) {
final ListView listView = getListView();
int currentPosition = listView.getSelectedItemPosition();
if (currentPosition == AdapterView.INVALID_POSITION || listView.isInTouchMode()) {
currentPosition = listView.getFirstVisiblePosition();
}
if (keyCode == KeyEvent.KEYCODE_VOLUME_UP && currentPosition > 0) {
listView.setSelection(currentPosition - 1);
} else if (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN &&
currentPosition < listView.getCount()) {
listView.setSelection(currentPosition + 1);
}
return true;
}
return super.onKeyDown(keyCode, event);
}
@Override
public boolean onKeyUp(int keyCode, KeyEvent event) {
// Swallow these events too to avoid the audible notification of a volume change
if (K9.useVolumeKeysForListNavigationEnabled() &&
(keyCode == KeyEvent.KEYCODE_VOLUME_UP ||
keyCode == KeyEvent.KEYCODE_VOLUME_DOWN)) {
return true;
}
return super.onKeyUp(keyCode, event);
}
}
| 1 | 16,571 | Please don't create new fields with the `m` prefix. I'd rather mix than introduce more of these. | k9mail-k-9 | java |
@@ -229,9 +229,13 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable {
enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);
useCircuitBreakers = getBool("circuitBreaker/useCircuitBreakers", false);
+ cpuCircuitBreakerEnabled = getBool("circuitBreaker/cpuCircuitBreakerEnabled", false);
+ memoryCircuitBreakerEnabled = getBool("circuitBreaker/memoryCircuitBreakerEnabled", false);
memoryCircuitBreakerThresholdPct = getInt("circuitBreaker/memoryCircuitBreakerThresholdPct", 95);
- validateMemoryBreakerThreshold();
+ cpuCircuitBreakerThreshold = getInt("circuitBreaker/cpuCircuitBreakerThreshold", 95);
+
+ validateCircuitBreakerThresholds();
filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache");
queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache"); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPathConstants;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.invoke.MethodHandles;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.collect.ImmutableList;
import org.apache.commons.io.FileUtils;
import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.Version;
import org.apache.solr.client.solrj.io.stream.expr.Expressible;
import org.apache.solr.cloud.RecoveryStrategy;
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.MapSerializable;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.handler.component.SearchComponent;
import org.apache.solr.pkg.PackageListeners;
import org.apache.solr.pkg.PackageLoader;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.transform.TransformerFactory;
import org.apache.solr.rest.RestManager;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.IndexSchemaFactory;
import org.apache.solr.search.CacheConfig;
import org.apache.solr.search.CaffeineCache;
import org.apache.solr.search.QParserPlugin;
import org.apache.solr.search.SolrCache;
import org.apache.solr.search.ValueSourceParser;
import org.apache.solr.search.stats.StatsCache;
import org.apache.solr.servlet.SolrRequestParsers;
import org.apache.solr.spelling.QueryConverter;
import org.apache.solr.update.SolrIndexConfig;
import org.apache.solr.update.UpdateLog;
import org.apache.solr.update.processor.UpdateRequestProcessorChain;
import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
import org.apache.solr.util.DOMUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import static org.apache.solr.common.params.CommonParams.NAME;
import static org.apache.solr.common.params.CommonParams.PATH;
import static org.apache.solr.common.util.Utils.fromJSON;
import static org.apache.solr.common.util.Utils.makeMap;
import static org.apache.solr.core.ConfigOverlay.ZNODEVER;
import static org.apache.solr.core.SolrConfig.PluginOpts.LAZY;
import static org.apache.solr.core.SolrConfig.PluginOpts.MULTI_OK;
import static org.apache.solr.core.SolrConfig.PluginOpts.NOOP;
import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_CLASS;
import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME;
import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME_IN_OVERLAY;
/**
* Provides a static reference to a Config object modeling the main
* configuration data for a Solr instance -- typically found in
* "solrconfig.xml".
*/
public class SolrConfig extends XmlConfigFile implements MapSerializable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String DEFAULT_CONF_FILE = "solrconfig.xml";
private RequestParams requestParams;
public enum PluginOpts {
MULTI_OK,
REQUIRE_NAME,
REQUIRE_NAME_IN_OVERLAY,
REQUIRE_CLASS,
LAZY,
// EnumSet.of and/or EnumSet.copyOf(Collection) are annoying
// because of type determination
NOOP
}
private int multipartUploadLimitKB;
private int formUploadLimitKB;
private boolean enableRemoteStreams;
private boolean enableStreamBody;
private boolean handleSelect;
private boolean addHttpRequestToContext;
private final SolrRequestParsers solrRequestParsers;
/**
* TEST-ONLY: Creates a configuration instance from an instance directory and file name
* @param instanceDir the directory used to create the resource loader
* @param name the configuration name used by the loader if the stream is null
*/
public SolrConfig(Path instanceDir, String name)
throws ParserConfigurationException, IOException, SAXException {
this(new SolrResourceLoader(instanceDir), name, true, null);
}
public static SolrConfig readFromResourceLoader(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties) {
try {
return new SolrConfig(loader, name, isConfigsetTrusted, substitutableProperties);
} catch (Exception e) {
String resource;
if (loader instanceof ZkSolrResourceLoader) {
resource = name;
} else {
resource = Paths.get(loader.getConfigDir()).resolve(name).toString();
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading solr config from " + resource, e);
}
}
/**
* Creates a configuration instance from a resource loader, a configuration name and a stream.
* If the stream is null, the resource loader will open the configuration stream.
* If the stream is not null, no attempt to load the resource will occur (the name is not used).
* @param loader the resource loader
* @param name the configuration name
* @param isConfigsetTrusted false if configset was uploaded using unsecured configset upload API, true otherwise
* @param substitutableProperties optional properties to substitute into the XML
*/
private SolrConfig(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties)
throws ParserConfigurationException, IOException, SAXException {
// insist we have non-null substituteProperties; it might get overlayed
super(loader, name, null, "/config/", substitutableProperties == null ? new Properties() : substitutableProperties);
getOverlay();//just in case it is not initialized
getRequestParams();
initLibs(loader, isConfigsetTrusted);
luceneMatchVersion = SolrConfig.parseLuceneVersionString(getVal(IndexSchema.LUCENE_MATCH_VERSION_PARAM, true));
log.info("Using Lucene MatchVersion: {}", luceneMatchVersion);
String indexConfigPrefix;
// Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_4_0_0.
// For older solrconfig.xml's we allow the old sections, but never mixed with the new <indexConfig>
boolean hasDeprecatedIndexConfig = (getNode("indexDefaults", false) != null) || (getNode("mainIndex", false) != null);
if (hasDeprecatedIndexConfig) {
throw new SolrException(ErrorCode.FORBIDDEN, "<indexDefaults> and <mainIndex> configuration sections are discontinued. Use <indexConfig> instead.");
} else {
indexConfigPrefix = "indexConfig";
}
assertWarnOrFail("The <nrtMode> config has been discontinued and NRT mode is always used by Solr." +
" This config will be removed in future versions.", getNode(indexConfigPrefix + "/nrtMode", false) == null,
true
);
assertWarnOrFail("Solr no longer supports forceful unlocking via the 'unlockOnStartup' option. "+
"This is no longer necessary for the default lockType except in situations where "+
"it would be dangerous and should not be done. For other lockTypes and/or "+
"directoryFactory options it may also be dangerous and users must resolve "+
"problematic locks manually.",
null == getNode(indexConfigPrefix + "/unlockOnStartup", false),
true // 'fail' in trunk
);
// Parse indexConfig section, using mainIndex as backup in case old config is used
indexConfig = new SolrIndexConfig(this, "indexConfig", null);
booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", IndexSearcher.getMaxClauseCount());
if (IndexSearcher.getMaxClauseCount() < booleanQueryMaxClauseCount) {
log.warn("solrconfig.xml: <maxBooleanClauses> of {} is greater than global limit of {} {}"
, booleanQueryMaxClauseCount, IndexSearcher.getMaxClauseCount()
, "and will have no effect set 'maxBooleanClauses' in solr.xml to increase global limit");
}
// Warn about deprecated / discontinued parameters
// boolToFilterOptimizer has had no effect since 3.1
if (get("query/boolTofilterOptimizer", null) != null)
log.warn("solrconfig.xml: <boolTofilterOptimizer> is currently not implemented and has no effect.");
if (get("query/HashDocSet", null) != null)
log.warn("solrconfig.xml: <HashDocSet> is deprecated and no longer used.");
// TODO: Old code - in case somebody wants to re-enable. Also see SolrIndexSearcher#search()
// filtOptEnabled = getBool("query/boolTofilterOptimizer/@enabled", false);
// filtOptCacheSize = getInt("query/boolTofilterOptimizer/@cacheSize",32);
// filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f);
useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false);
queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1));
queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE);
enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);
useCircuitBreakers = getBool("circuitBreaker/useCircuitBreakers", false);
memoryCircuitBreakerThresholdPct = getInt("circuitBreaker/memoryCircuitBreakerThresholdPct", 95);
validateMemoryBreakerThreshold();
filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache");
queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache");
documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache");
CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache");
if (conf == null) {
Map<String, String> args = new HashMap<>();
args.put(NAME, "fieldValueCache");
args.put("size", "10000");
args.put("initialSize", "10");
args.put("showItems", "-1");
conf = new CacheConfig(CaffeineCache.class, args, null);
}
fieldValueCacheConfig = conf;
useColdSearcher = getBool("query/useColdSearcher", false);
dataDir = get("dataDir", null);
if (dataDir != null && dataDir.length() == 0) dataDir = null;
org.apache.solr.search.SolrIndexSearcher.initRegenerators(this);
if (get("jmx", null) != null) {
log.warn("solrconfig.xml: <jmx> is no longer supported, use solr.xml:/metrics/reporter section instead");
}
httpCachingConfig = new HttpCachingConfig(this);
maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1);
slowQueryThresholdMillis = getInt("query/slowQueryThresholdMillis", -1);
for (SolrPluginInfo plugin : plugins) loadPluginInfo(plugin);
Map<String, CacheConfig> userCacheConfigs = CacheConfig.getMultipleConfigs(this, "query/cache");
List<PluginInfo> caches = getPluginInfos(SolrCache.class.getName());
if (!caches.isEmpty()) {
for (PluginInfo c : caches) {
userCacheConfigs.put(c.name, CacheConfig.getConfig(this, "cache", c.attributes, null));
}
}
this.userCacheConfigs = Collections.unmodifiableMap(userCacheConfigs);
updateHandlerInfo = loadUpdatehandlerInfo();
multipartUploadLimitKB = getInt(
"requestDispatcher/requestParsers/@multipartUploadLimitInKB", Integer.MAX_VALUE);
if (multipartUploadLimitKB == -1) multipartUploadLimitKB = Integer.MAX_VALUE;
formUploadLimitKB = getInt(
"requestDispatcher/requestParsers/@formdataUploadLimitInKB", Integer.MAX_VALUE);
if (formUploadLimitKB == -1) formUploadLimitKB = Integer.MAX_VALUE;
enableRemoteStreams = getBool(
"requestDispatcher/requestParsers/@enableRemoteStreaming", false);
enableStreamBody = getBool(
"requestDispatcher/requestParsers/@enableStreamBody", false);
handleSelect = getBool(
"requestDispatcher/@handleSelect", false);
addHttpRequestToContext = getBool(
"requestDispatcher/requestParsers/@addHttpRequestToContext", false);
List<PluginInfo> argsInfos = getPluginInfos(InitParams.class.getName());
if (argsInfos != null) {
Map<String, InitParams> argsMap = new HashMap<>();
for (PluginInfo p : argsInfos) {
InitParams args = new InitParams(p);
argsMap.put(args.name == null ? String.valueOf(args.hashCode()) : args.name, args);
}
this.initParams = Collections.unmodifiableMap(argsMap);
}
solrRequestParsers = new SolrRequestParsers(this);
log.debug("Loaded SolrConfig: {}", name);
}
private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false);
public static final Version parseLuceneVersionString(final String matchVersion) {
final Version version;
try {
version = Version.parseLeniently(matchVersion);
} catch (ParseException pe) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Invalid luceneMatchVersion. Should be of the form 'V.V.V' (e.g. 4.8.0)", pe);
}
if (version == Version.LATEST && !versionWarningAlreadyLogged.getAndSet(true)) {
log.warn("You should not use LATEST as luceneMatchVersion property: "
+ "if you use this setting, and then Solr upgrades to a newer release of Lucene, "
+ "sizable changes may happen. If precise back compatibility is important "
+ "then you should instead explicitly specify an actual Lucene version.");
}
return version;
}
public static final List<SolrPluginInfo> plugins = ImmutableList.<SolrPluginInfo>builder()
.add(new SolrPluginInfo(SolrRequestHandler.class, SolrRequestHandler.TYPE, REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY))
.add(new SolrPluginInfo(QParserPlugin.class, "queryParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(Expressible.class, "expressible", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(QueryResponseWriter.class, "queryResponseWriter", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY))
.add(new SolrPluginInfo(ValueSourceParser.class, "valueSourceParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(TransformerFactory.class, "transformer", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(SearchComponent.class, "searchComponent", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(UpdateRequestProcessorFactory.class, "updateProcessor", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(SolrCache.class, "cache", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
// TODO: WTF is up with queryConverter???
// it apparently *only* works as a singleton? - SOLR-4304
// and even then -- only if there is a single SpellCheckComponent
// because of queryConverter.setIndexAnalyzer
.add(new SolrPluginInfo(QueryConverter.class, "queryConverter", REQUIRE_NAME, REQUIRE_CLASS))
// this is hackish, since it picks up all SolrEventListeners,
// regardless of when/how/why they are used (or even if they are
// declared outside of the appropriate context) but there's no nice
// way around that in the PluginInfo framework
.add(new SolrPluginInfo(InitParams.class, InitParams.TYPE, MULTI_OK, REQUIRE_NAME_IN_OVERLAY))
.add(new SolrPluginInfo(SolrEventListener.class, "//listener", REQUIRE_CLASS, MULTI_OK, REQUIRE_NAME_IN_OVERLAY))
.add(new SolrPluginInfo(DirectoryFactory.class, "directoryFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(RecoveryStrategy.Builder.class, "recoveryStrategy"))
.add(new SolrPluginInfo(IndexDeletionPolicy.class, "indexConfig/deletionPolicy", REQUIRE_CLASS))
.add(new SolrPluginInfo(CodecFactory.class, "codecFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(IndexReaderFactory.class, "indexReaderFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(UpdateRequestProcessorChain.class, "updateRequestProcessorChain", MULTI_OK))
.add(new SolrPluginInfo(UpdateLog.class, "updateHandler/updateLog"))
.add(new SolrPluginInfo(IndexSchemaFactory.class, "schemaFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(RestManager.class, "restManager"))
.add(new SolrPluginInfo(StatsCache.class, "statsCache", REQUIRE_CLASS))
.build();
public static final Map<String, SolrPluginInfo> classVsSolrPluginInfo;
static {
Map<String, SolrPluginInfo> map = new HashMap<>();
for (SolrPluginInfo plugin : plugins) map.put(plugin.clazz.getName(), plugin);
classVsSolrPluginInfo = Collections.unmodifiableMap(map);
}
public static class SolrPluginInfo {
@SuppressWarnings({"rawtypes"})
public final Class clazz;
public final String tag;
public final Set<PluginOpts> options;
@SuppressWarnings({"unchecked", "rawtypes"})
private SolrPluginInfo(Class clz, String tag, PluginOpts... opts) {
this.clazz = clz;
this.tag = tag;
this.options = opts == null ? Collections.EMPTY_SET : EnumSet.of(NOOP, opts);
}
public String getCleanTag() {
return tag.replaceAll("/", "");
}
public String getTagCleanLower() {
return getCleanTag().toLowerCase(Locale.ROOT);
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
public static ConfigOverlay getConfigOverlay(SolrResourceLoader loader) {
InputStream in = null;
InputStreamReader isr = null;
try {
try {
in = loader.openResource(ConfigOverlay.RESOURCE_NAME);
} catch (IOException e) {
// TODO: we should be explicitly looking for file not found exceptions
// and logging if it's not the expected IOException
// hopefully no problem, assume no overlay.json file
return new ConfigOverlay(Collections.EMPTY_MAP, -1);
}
int version = 0; // will be always 0 for file based resourceLoader
if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
log.debug("Config overlay loaded. version : {} ", version);
}
Map m = (Map) fromJSON(in);
return new ConfigOverlay(m, version);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading config overlay", e);
} finally {
IOUtils.closeQuietly(isr);
IOUtils.closeQuietly(in);
}
}
private Map<String, InitParams> initParams = Collections.emptyMap();
public Map<String, InitParams> getInitParams() {
return initParams;
}
protected UpdateHandlerInfo loadUpdatehandlerInfo() {
return new UpdateHandlerInfo(get("updateHandler/@class", null),
getInt("updateHandler/autoCommit/maxDocs", -1),
getInt("updateHandler/autoCommit/maxTime", -1),
convertHeapOptionStyleConfigStringToBytes(get("updateHandler/autoCommit/maxSize", "")),
getBool("updateHandler/indexWriter/closeWaitsForMerges", true),
getBool("updateHandler/autoCommit/openSearcher", true),
getInt("updateHandler/autoSoftCommit/maxDocs", -1),
getInt("updateHandler/autoSoftCommit/maxTime", -1),
getBool("updateHandler/commitWithin/softCommit", true));
}
/**
* Converts a Java heap option-like config string to bytes. Valid suffixes are: 'k', 'm', 'g'
* (case insensitive). If there is no suffix, the default unit is bytes.
* For example, 50k = 50KB, 20m = 20MB, 4g = 4GB, 300 = 300 bytes
* @param configStr the config setting to parse
* @return the size, in bytes. -1 if the given config string is empty
*/
protected static long convertHeapOptionStyleConfigStringToBytes(String configStr) {
if (configStr.isEmpty()) {
return -1;
}
long multiplier = 1;
String numericValueStr = configStr;
char suffix = Character.toLowerCase(configStr.charAt(configStr.length() - 1));
if (Character.isLetter(suffix)) {
if (suffix == 'k') {
multiplier = FileUtils.ONE_KB;
}
else if (suffix == 'm') {
multiplier = FileUtils.ONE_MB;
}
else if (suffix == 'g') {
multiplier = FileUtils.ONE_GB;
} else {
throw new RuntimeException("Invalid suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). "
+ "No suffix means the amount is in bytes. ");
}
numericValueStr = configStr.substring(0, configStr.length() - 1);
}
try {
return Long.parseLong(numericValueStr) * multiplier;
} catch (NumberFormatException e) {
throw new RuntimeException("Invalid format. The config setting should be a long with an "
+ "optional letter suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). "
+ "No suffix means the amount is in bytes.");
}
}
private void loadPluginInfo(SolrPluginInfo pluginInfo) {
boolean requireName = pluginInfo.options.contains(REQUIRE_NAME);
boolean requireClass = pluginInfo.options.contains(REQUIRE_CLASS);
List<PluginInfo> result = readPluginInfos(pluginInfo.tag, requireName, requireClass);
if (1 < result.size() && !pluginInfo.options.contains(MULTI_OK)) {
throw new SolrException
(SolrException.ErrorCode.SERVER_ERROR,
"Found " + result.size() + " configuration sections when at most "
+ "1 is allowed matching expression: " + pluginInfo.getCleanTag());
}
if (!result.isEmpty()) pluginStore.put(pluginInfo.clazz.getName(), result);
}
public List<PluginInfo> readPluginInfos(String tag, boolean requireName, boolean requireClass) {
ArrayList<PluginInfo> result = new ArrayList<>();
NodeList nodes = (NodeList) evaluate(tag, XPathConstants.NODESET);
for (int i = 0; i < nodes.getLength(); i++) {
PluginInfo pluginInfo = new PluginInfo(nodes.item(i), "[solrconfig.xml] " + tag, requireName, requireClass);
if (pluginInfo.isEnabled()) result.add(pluginInfo);
}
return result;
}
public SolrRequestParsers getRequestParsers() {
return solrRequestParsers;
}
/* The set of materialized parameters: */
public final int booleanQueryMaxClauseCount;
// SolrIndexSearcher - nutch optimizer -- Disabled since 3.1
// public final boolean filtOptEnabled;
// public final int filtOptCacheSize;
// public final float filtOptThreshold;
// SolrIndexSearcher - caches configurations
public final CacheConfig filterCacheConfig;
public final CacheConfig queryResultCacheConfig;
public final CacheConfig documentCacheConfig;
public final CacheConfig fieldValueCacheConfig;
public final Map<String, CacheConfig> userCacheConfigs;
// SolrIndexSearcher - more...
public final boolean useFilterForSortedQuery;
public final int queryResultWindowSize;
public final int queryResultMaxDocsCached;
public final boolean enableLazyFieldLoading;
// Circuit Breaker Configuration
public final boolean useCircuitBreakers;
public final int memoryCircuitBreakerThresholdPct;
// IndexConfig settings
public final SolrIndexConfig indexConfig;
protected UpdateHandlerInfo updateHandlerInfo;
private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>();
public final int maxWarmingSearchers;
public final boolean useColdSearcher;
public final Version luceneMatchVersion;
protected String dataDir;
public final int slowQueryThresholdMillis; // threshold above which a query is considered slow
private final HttpCachingConfig httpCachingConfig;
public HttpCachingConfig getHttpCachingConfig() {
return httpCachingConfig;
}
public static class HttpCachingConfig implements MapSerializable {
/**
* config xpath prefix for getting HTTP Caching options
*/
private final static String CACHE_PRE
= "requestDispatcher/httpCaching/";
/**
* For extracting Expires "ttl" from <cacheControl> config
*/
private final static Pattern MAX_AGE
= Pattern.compile("\\bmax-age=(\\d+)");
@Override
public Map<String, Object> toMap(Map<String, Object> map) {
return makeMap("never304", never304,
"etagSeed", etagSeed,
"lastModFrom", lastModFrom.name().toLowerCase(Locale.ROOT),
"cacheControl", cacheControlHeader);
}
public static enum LastModFrom {
OPENTIME, DIRLASTMOD, BOGUS;
/**
* Input must not be null
*/
public static LastModFrom parse(final String s) {
try {
return valueOf(s.toUpperCase(Locale.ROOT));
} catch (Exception e) {
log.warn("Unrecognized value for lastModFrom: {}", s, e);
return BOGUS;
}
}
}
private final boolean never304;
private final String etagSeed;
private final String cacheControlHeader;
private final Long maxAge;
private final LastModFrom lastModFrom;
private HttpCachingConfig(SolrConfig conf) {
never304 = conf.getBool(CACHE_PRE + "@never304", false);
etagSeed = conf.get(CACHE_PRE + "@etagSeed", "Solr");
lastModFrom = LastModFrom.parse(conf.get(CACHE_PRE + "@lastModFrom",
"openTime"));
cacheControlHeader = conf.get(CACHE_PRE + "cacheControl", null);
Long tmp = null; // maxAge
if (null != cacheControlHeader) {
try {
final Matcher ttlMatcher = MAX_AGE.matcher(cacheControlHeader);
final String ttlStr = ttlMatcher.find() ? ttlMatcher.group(1) : null;
tmp = (null != ttlStr && !"".equals(ttlStr))
? Long.valueOf(ttlStr)
: null;
} catch (Exception e) {
log.warn("Ignoring exception while attempting to extract max-age from cacheControl config: {}"
, cacheControlHeader, e);
}
}
maxAge = tmp;
}
public boolean isNever304() {
return never304;
}
public String getEtagSeed() {
return etagSeed;
}
/**
* null if no Cache-Control header
*/
public String getCacheControlHeader() {
return cacheControlHeader;
}
/**
* null if no max age limitation
*/
public Long getMaxAge() {
return maxAge;
}
public LastModFrom getLastModFrom() {
return lastModFrom;
}
}
public static class UpdateHandlerInfo implements MapSerializable {
public final String className;
public final int autoCommmitMaxDocs, autoCommmitMaxTime,
autoSoftCommmitMaxDocs, autoSoftCommmitMaxTime;
public final long autoCommitMaxSizeBytes;
public final boolean indexWriterCloseWaitsForMerges;
public final boolean openSearcher; // is opening a new searcher part of hard autocommit?
public final boolean commitWithinSoftCommit;
/**
* @param autoCommmitMaxDocs set -1 as default
* @param autoCommmitMaxTime set -1 as default
* @param autoCommitMaxSize set -1 as default
*/
public UpdateHandlerInfo(String className, int autoCommmitMaxDocs, int autoCommmitMaxTime, long autoCommitMaxSize, boolean indexWriterCloseWaitsForMerges, boolean openSearcher,
int autoSoftCommmitMaxDocs, int autoSoftCommmitMaxTime, boolean commitWithinSoftCommit) {
this.className = className;
this.autoCommmitMaxDocs = autoCommmitMaxDocs;
this.autoCommmitMaxTime = autoCommmitMaxTime;
this.autoCommitMaxSizeBytes = autoCommitMaxSize;
this.indexWriterCloseWaitsForMerges = indexWriterCloseWaitsForMerges;
this.openSearcher = openSearcher;
this.autoSoftCommmitMaxDocs = autoSoftCommmitMaxDocs;
this.autoSoftCommmitMaxTime = autoSoftCommmitMaxTime;
this.commitWithinSoftCommit = commitWithinSoftCommit;
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Map<String, Object> toMap(Map<String, Object> map) {
LinkedHashMap result = new LinkedHashMap();
result.put("indexWriter", makeMap("closeWaitsForMerges", indexWriterCloseWaitsForMerges));
result.put("commitWithin", makeMap("softCommit", commitWithinSoftCommit));
result.put("autoCommit", makeMap(
"maxDocs", autoCommmitMaxDocs,
"maxTime", autoCommmitMaxTime,
"openSearcher", openSearcher
));
result.put("autoSoftCommit",
makeMap("maxDocs", autoSoftCommmitMaxDocs,
"maxTime", autoSoftCommmitMaxTime));
return result;
}
}
// public Map<String, List<PluginInfo>> getUpdateProcessorChainInfo() { return updateProcessorChainInfo; }
public UpdateHandlerInfo getUpdateHandlerInfo() {
return updateHandlerInfo;
}
public String getDataDir() {
return dataDir;
}
/**
* SolrConfig keeps a repository of plugins by the type. The known interfaces are the types.
*
* @param type The key is FQN of the plugin class there are a few known types : SolrFormatter, SolrFragmenter
* SolrRequestHandler,QParserPlugin, QueryResponseWriter,ValueSourceParser,
* SearchComponent, QueryConverter, SolrEventListener, DirectoryFactory,
* IndexDeletionPolicy, IndexReaderFactory, {@link TransformerFactory}
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public List<PluginInfo> getPluginInfos(String type) {
List<PluginInfo> result = pluginStore.get(type);
SolrPluginInfo info = classVsSolrPluginInfo.get(type);
if (info != null &&
(info.options.contains(REQUIRE_NAME) || info.options.contains(REQUIRE_NAME_IN_OVERLAY))) {
Map<String, Map> infos = overlay.getNamedPlugins(info.getCleanTag());
if (!infos.isEmpty()) {
LinkedHashMap<String, PluginInfo> map = new LinkedHashMap<>();
if (result != null) for (PluginInfo pluginInfo : result) {
//just create a UUID for the time being so that map key is not null
String name = pluginInfo.name == null ?
UUID.randomUUID().toString().toLowerCase(Locale.ROOT) :
pluginInfo.name;
map.put(name, pluginInfo);
}
for (Map.Entry<String, Map> e : infos.entrySet()) {
map.put(e.getKey(), new PluginInfo(info.getCleanTag(), e.getValue()));
}
result = new ArrayList<>(map.values());
}
}
return result == null ? Collections.<PluginInfo>emptyList() : result;
}
public PluginInfo getPluginInfo(String type) {
List<PluginInfo> result = pluginStore.get(type);
if (result == null || result.isEmpty()) {
return null;
}
if (1 == result.size()) {
return result.get(0);
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Multiple plugins configured for type: " + type);
}
private void initLibs(SolrResourceLoader loader, boolean isConfigsetTrusted) {
// TODO Want to remove SolrResourceLoader.getInstancePath; it can be on a Standalone subclass.
// For Zk subclass, it's needed for the time being as well. We could remove that one if we remove two things
// in SolrCloud: (1) instancePath/lib and (2) solrconfig lib directives with relative paths. Can wait till 9.0.
Path instancePath = loader.getInstancePath();
List<URL> urls = new ArrayList<>();
Path libPath = instancePath.resolve("lib");
if (Files.exists(libPath)) {
try {
urls.addAll(SolrResourceLoader.getURLs(libPath));
} catch (IOException e) {
log.warn("Couldn't add files from {} to classpath: {}", libPath, e);
}
}
NodeList nodes = (NodeList) evaluate("lib", XPathConstants.NODESET);
if (nodes == null || nodes.getLength() == 0) return;
if (!isConfigsetTrusted) {
throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place,"
+ " and use of <lib> is not available for collections with untrusted configsets. To use this component, re-upload the configset"
+ " after enabling authentication and authorization.");
}
for (int i = 0; i < nodes.getLength(); i++) {
Node node = nodes.item(i);
String baseDir = DOMUtil.getAttr(node, "dir");
String path = DOMUtil.getAttr(node, PATH);
if (null != baseDir) {
// :TODO: add support for a simpler 'glob' mutually exclusive of regex
Path dir = instancePath.resolve(baseDir);
String regex = DOMUtil.getAttr(node, "regex");
try {
if (regex == null)
urls.addAll(SolrResourceLoader.getURLs(dir));
else
urls.addAll(SolrResourceLoader.getFilteredURLs(dir, regex));
} catch (IOException e) {
log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e);
}
} else if (null != path) {
final Path dir = instancePath.resolve(path);
try {
urls.add(dir.toUri().toURL());
} catch (MalformedURLException e) {
log.warn("Couldn't add file {} to classpath: {}", dir, e);
}
} else {
throw new RuntimeException("lib: missing mandatory attributes: 'dir' or 'path'");
}
}
loader.addToClassLoader(urls);
loader.reloadLuceneSPI();
}
private void validateMemoryBreakerThreshold() {
if (useCircuitBreakers) {
if (memoryCircuitBreakerThresholdPct > 95 || memoryCircuitBreakerThresholdPct < 50) {
throw new IllegalArgumentException("Valid value range of memoryCircuitBreakerThresholdPct is 50 - 95");
}
}
}
public int getMultipartUploadLimitKB() {
return multipartUploadLimitKB;
}
public int getFormUploadLimitKB() {
return formUploadLimitKB;
}
public boolean isHandleSelect() {
return handleSelect;
}
public boolean isAddHttpRequestToContext() {
return addHttpRequestToContext;
}
public boolean isEnableRemoteStreams() {
return enableRemoteStreams;
}
public boolean isEnableStreamBody() {
return enableStreamBody;
}
@Override
public int getInt(String path) {
return getInt(path, 0);
}
@Override
public int getInt(String path, int def) {
Object val = overlay.getXPathProperty(path);
if (val != null) return Integer.parseInt(val.toString());
return super.getInt(path, def);
}
@Override
public boolean getBool(String path, boolean def) {
Object val = overlay.getXPathProperty(path);
if (val != null) return Boolean.parseBoolean(val.toString());
return super.getBool(path, def);
}
@Override
public String get(String path) {
Object val = overlay.getXPathProperty(path, true);
return val != null ? val.toString() : super.get(path);
}
@Override
public String get(String path, String def) {
Object val = overlay.getXPathProperty(path, true);
return val != null ? val.toString() : super.get(path, def);
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Map<String, Object> toMap(Map<String, Object> result) {
if (getZnodeVersion() > -1) result.put(ZNODEVER, getZnodeVersion());
result.put(IndexSchema.LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion);
result.put("updateHandler", getUpdateHandlerInfo());
Map m = new LinkedHashMap();
result.put("query", m);
m.put("useFilterForSortedQuery", useFilterForSortedQuery);
m.put("queryResultWindowSize", queryResultWindowSize);
m.put("queryResultMaxDocsCached", queryResultMaxDocsCached);
m.put("enableLazyFieldLoading", enableLazyFieldLoading);
m.put("maxBooleanClauses", booleanQueryMaxClauseCount);
m.put("useCircuitBreakers", useCircuitBreakers);
m.put("memoryCircuitBreakerThresholdPct", memoryCircuitBreakerThresholdPct);
for (SolrPluginInfo plugin : plugins) {
List<PluginInfo> infos = getPluginInfos(plugin.clazz.getName());
if (infos == null || infos.isEmpty()) continue;
String tag = plugin.getCleanTag();
tag = tag.replace("/", "");
if (plugin.options.contains(PluginOpts.REQUIRE_NAME)) {
LinkedHashMap items = new LinkedHashMap();
for (PluginInfo info : infos) {
//TODO remove after fixing https://issues.apache.org/jira/browse/SOLR-13706
if (info.type.equals("searchComponent") && info.name.equals("highlight")) continue;
items.put(info.name, info);
}
for (Map.Entry e : overlay.getNamedPlugins(plugin.tag).entrySet()) items.put(e.getKey(), e.getValue());
result.put(tag, items);
} else {
if (plugin.options.contains(MULTI_OK)) {
ArrayList<MapSerializable> l = new ArrayList<>();
for (PluginInfo info : infos) l.add(info);
result.put(tag, l);
} else {
result.put(tag, infos.get(0));
}
}
}
addCacheConfig(m, filterCacheConfig, queryResultCacheConfig, documentCacheConfig, fieldValueCacheConfig);
m = new LinkedHashMap();
result.put("requestDispatcher", m);
m.put("handleSelect", handleSelect);
if (httpCachingConfig != null) m.put("httpCaching", httpCachingConfig);
m.put("requestParsers", makeMap("multipartUploadLimitKB", multipartUploadLimitKB,
"formUploadLimitKB", formUploadLimitKB,
"addHttpRequestToContext", addHttpRequestToContext));
if (indexConfig != null) result.put("indexConfig", indexConfig);
//TODO there is more to add
return result;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private void addCacheConfig(Map queryMap, CacheConfig... cache) {
if (cache == null) return;
for (CacheConfig config : cache) if (config != null) queryMap.put(config.getNodeName(), config);
}
@Override
public Properties getSubstituteProperties() {
Map<String, Object> p = getOverlay().getUserProps();
if (p == null || p.isEmpty()) return super.getSubstituteProperties();
Properties result = new Properties(super.getSubstituteProperties());
result.putAll(p);
return result;
}
private ConfigOverlay overlay;
public ConfigOverlay getOverlay() {
if (overlay == null) {
overlay = getConfigOverlay(getResourceLoader());
}
return overlay;
}
public RequestParams getRequestParams() {
if (requestParams == null) {
return refreshRequestParams();
}
return requestParams;
}
/**
* The version of package that should be loaded for a given package name
* This information is stored in the params.json in the same configset
* If params.json is absent or there is no corresponding version specified for a given package,
* this returns a null and the latest is used by the caller
*/
public String maxPackageVersion(String pkg) {
RequestParams.ParamSet p = getRequestParams().getParams(PackageListeners.PACKAGE_VERSIONS);
if (p == null) {
return null;
}
Object o = p.get().get(pkg);
if (o == null || PackageLoader.LATEST.equals(o)) return null;
return o.toString();
}
public RequestParams refreshRequestParams() {
requestParams = RequestParams.getFreshRequestParams(getResourceLoader(), requestParams);
if (log.isDebugEnabled()) {
log.debug("current version of requestparams : {}", requestParams.getZnodeVersion());
}
return requestParams;
}
}
| 1 | 36,285 | Can we please simplify these names? they are awfully verbose and repeating the parts that are already unique and obvious. | apache-lucene-solr | java |
@@ -241,7 +241,7 @@ public class RecipientPresenter implements PermissionPingCallback {
boolean alreadyVisible = recipientMvpView.isBccVisible();
boolean singleBccRecipientFromAccount =
bccRecipients.length == 1 && bccRecipients[0].toString().equals(bccAddress);
- recipientMvpView.setBccVisibility(alreadyVisible || singleBccRecipientFromAccount);
+ recipientMvpView.setBccVisibility(alreadyVisible || !singleBccRecipientFromAccount);
updateRecipientExpanderVisibility();
}
} | 1 | package com.fsck.k9.activity.compose;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import android.app.Activity;
import android.app.LoaderManager;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.net.Uri;
import android.os.Bundle;
import android.support.annotation.VisibleForTesting;
import android.text.TextUtils;
import timber.log.Timber;
import android.view.Menu;
import com.fsck.k9.Account;
import com.fsck.k9.Identity;
import com.fsck.k9.K9;
import com.fsck.k9.R;
import com.fsck.k9.activity.compose.ComposeCryptoStatus.AttachErrorState;
import com.fsck.k9.activity.compose.ComposeCryptoStatus.ComposeCryptoStatusBuilder;
import com.fsck.k9.activity.compose.ComposeCryptoStatus.SendErrorState;
import com.fsck.k9.helper.Contacts;
import com.fsck.k9.helper.MailTo;
import com.fsck.k9.helper.ReplyToParser;
import com.fsck.k9.helper.ReplyToParser.ReplyToAddresses;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.Message.RecipientType;
import com.fsck.k9.message.ComposePgpInlineDecider;
import com.fsck.k9.message.PgpMessageBuilder;
import com.fsck.k9.view.RecipientSelectView.Recipient;
import org.openintents.openpgp.IOpenPgpService2;
import org.openintents.openpgp.util.OpenPgpApi;
import org.openintents.openpgp.util.OpenPgpApi.PermissionPingCallback;
import org.openintents.openpgp.util.OpenPgpServiceConnection;
import org.openintents.openpgp.util.OpenPgpServiceConnection.OnBound;
public class RecipientPresenter implements PermissionPingCallback {
private static final String STATE_KEY_CC_SHOWN = "state:ccShown";
private static final String STATE_KEY_BCC_SHOWN = "state:bccShown";
private static final String STATE_KEY_LAST_FOCUSED_TYPE = "state:lastFocusedType";
private static final String STATE_KEY_CURRENT_CRYPTO_MODE = "state:currentCryptoMode";
private static final String STATE_KEY_CRYPTO_ENABLE_PGP_INLINE = "state:cryptoEnablePgpInline";
private static final int CONTACT_PICKER_TO = 1;
private static final int CONTACT_PICKER_CC = 2;
private static final int CONTACT_PICKER_BCC = 3;
private static final int OPENPGP_USER_INTERACTION = 4;
private static final int PGP_DIALOG_DISPLAY_THRESHOLD = 2;
// transient state, which is either obtained during construction and initialization, or cached
private final Context context;
private final RecipientMvpView recipientMvpView;
private final ComposePgpInlineDecider composePgpInlineDecider;
private final RecipientsChangedListener listener;
private ReplyToParser replyToParser;
private Account account;
private String openPgpProvider;
private Boolean hasContactPicker;
private ComposeCryptoStatus cachedCryptoStatus;
private PendingIntent pendingUserInteractionIntent;
private CryptoProviderState cryptoProviderState = CryptoProviderState.UNCONFIGURED;
private OpenPgpServiceConnection openPgpServiceConnection;
// persistent state, saved during onSaveInstanceState
private RecipientType lastFocusedType = RecipientType.TO;
// TODO initialize cryptoMode to other values under some circumstances, e.g. if we reply to an encrypted e-mail
private CryptoMode currentCryptoMode = CryptoMode.OPPORTUNISTIC;
private boolean cryptoEnablePgpInline = false;
public RecipientPresenter(Context context, LoaderManager loaderManager, RecipientMvpView recipientMvpView,
Account account, ComposePgpInlineDecider composePgpInlineDecider, ReplyToParser replyToParser,
RecipientsChangedListener recipientsChangedListener) {
this.recipientMvpView = recipientMvpView;
this.context = context;
this.composePgpInlineDecider = composePgpInlineDecider;
this.replyToParser = replyToParser;
this.listener = recipientsChangedListener;
recipientMvpView.setPresenter(this);
recipientMvpView.setLoaderManager(loaderManager);
onSwitchAccount(account);
}
public List<Address> getToAddresses() {
return recipientMvpView.getToAddresses();
}
public List<Address> getCcAddresses() {
return recipientMvpView.getCcAddresses();
}
public List<Address> getBccAddresses() {
return recipientMvpView.getBccAddresses();
}
private List<Recipient> getAllRecipients() {
ArrayList<Recipient> result = new ArrayList<>();
result.addAll(recipientMvpView.getToRecipients());
result.addAll(recipientMvpView.getCcRecipients());
result.addAll(recipientMvpView.getBccRecipients());
return result;
}
public boolean checkRecipientsOkForSending() {
recipientMvpView.recipientToTryPerformCompletion();
recipientMvpView.recipientCcTryPerformCompletion();
recipientMvpView.recipientBccTryPerformCompletion();
if (recipientMvpView.recipientToHasUncompletedText()) {
recipientMvpView.showToUncompletedError();
return true;
}
if (recipientMvpView.recipientCcHasUncompletedText()) {
recipientMvpView.showCcUncompletedError();
return true;
}
if (recipientMvpView.recipientBccHasUncompletedText()) {
recipientMvpView.showBccUncompletedError();
return true;
}
if (getToAddresses().isEmpty() && getCcAddresses().isEmpty() && getBccAddresses().isEmpty()) {
recipientMvpView.showNoRecipientsError();
return true;
}
return false;
}
public void initFromReplyToMessage(Message message, boolean isReplyAll) {
ReplyToAddresses replyToAddresses = isReplyAll ?
replyToParser.getRecipientsToReplyAllTo(message, account) :
replyToParser.getRecipientsToReplyTo(message, account);
addToAddresses(replyToAddresses.to);
addCcAddresses(replyToAddresses.cc);
boolean shouldSendAsPgpInline = composePgpInlineDecider.shouldReplyInline(message);
if (shouldSendAsPgpInline) {
cryptoEnablePgpInline = true;
}
}
public void initFromMailto(MailTo mailTo) {
addToAddresses(mailTo.getTo());
addCcAddresses(mailTo.getCc());
addBccAddresses(mailTo.getBcc());
}
public void initFromSendOrViewIntent(Intent intent) {
String[] extraEmail = intent.getStringArrayExtra(Intent.EXTRA_EMAIL);
String[] extraCc = intent.getStringArrayExtra(Intent.EXTRA_CC);
String[] extraBcc = intent.getStringArrayExtra(Intent.EXTRA_BCC);
if (extraEmail != null) {
addToAddresses(addressFromStringArray(extraEmail));
}
if (extraCc != null) {
addCcAddresses(addressFromStringArray(extraCc));
}
if (extraBcc != null) {
addBccAddresses(addressFromStringArray(extraBcc));
}
}
public void onRestoreInstanceState(Bundle savedInstanceState) {
recipientMvpView.setCcVisibility(savedInstanceState.getBoolean(STATE_KEY_CC_SHOWN));
recipientMvpView.setBccVisibility(savedInstanceState.getBoolean(STATE_KEY_BCC_SHOWN));
lastFocusedType = RecipientType.valueOf(savedInstanceState.getString(STATE_KEY_LAST_FOCUSED_TYPE));
currentCryptoMode = CryptoMode.valueOf(savedInstanceState.getString(STATE_KEY_CURRENT_CRYPTO_MODE));
cryptoEnablePgpInline = savedInstanceState.getBoolean(STATE_KEY_CRYPTO_ENABLE_PGP_INLINE);
updateRecipientExpanderVisibility();
}
public void onSaveInstanceState(Bundle outState) {
outState.putBoolean(STATE_KEY_CC_SHOWN, recipientMvpView.isCcVisible());
outState.putBoolean(STATE_KEY_BCC_SHOWN, recipientMvpView.isBccVisible());
outState.putString(STATE_KEY_LAST_FOCUSED_TYPE, lastFocusedType.toString());
outState.putString(STATE_KEY_CURRENT_CRYPTO_MODE, currentCryptoMode.toString());
outState.putBoolean(STATE_KEY_CRYPTO_ENABLE_PGP_INLINE, cryptoEnablePgpInline);
}
public void initFromDraftMessage(Message message) {
initRecipientsFromDraftMessage(message);
initPgpInlineFromDraftMessage(message);
}
private void initRecipientsFromDraftMessage(Message message) {
addToAddresses(message.getRecipients(RecipientType.TO));
Address[] ccRecipients = message.getRecipients(RecipientType.CC);
addCcAddresses(ccRecipients);
Address[] bccRecipients = message.getRecipients(RecipientType.BCC);
addBccAddresses(bccRecipients);
}
private void initPgpInlineFromDraftMessage(Message message) {
cryptoEnablePgpInline = message.isSet(Flag.X_DRAFT_OPENPGP_INLINE);
}
private void addToAddresses(Address... toAddresses) {
addRecipientsFromAddresses(RecipientType.TO, toAddresses);
}
private void addCcAddresses(Address... ccAddresses) {
if (ccAddresses.length > 0) {
addRecipientsFromAddresses(RecipientType.CC, ccAddresses);
recipientMvpView.setCcVisibility(true);
updateRecipientExpanderVisibility();
}
}
public void addBccAddresses(Address... bccRecipients) {
if (bccRecipients.length > 0) {
addRecipientsFromAddresses(RecipientType.BCC, bccRecipients);
String bccAddress = account.getAlwaysBcc();
// If the auto-bcc is the only entry in the BCC list, don't show the Bcc fields.
boolean alreadyVisible = recipientMvpView.isBccVisible();
boolean singleBccRecipientFromAccount =
bccRecipients.length == 1 && bccRecipients[0].toString().equals(bccAddress);
recipientMvpView.setBccVisibility(alreadyVisible || singleBccRecipientFromAccount);
updateRecipientExpanderVisibility();
}
}
public void onPrepareOptionsMenu(Menu menu) {
boolean isCryptoConfigured = cryptoProviderState != CryptoProviderState.UNCONFIGURED;
menu.findItem(R.id.openpgp_inline_enable).setVisible(isCryptoConfigured && !cryptoEnablePgpInline);
menu.findItem(R.id.openpgp_inline_disable).setVisible(isCryptoConfigured && cryptoEnablePgpInline);
boolean showSignOnly = isCryptoConfigured && K9.getOpenPgpSupportSignOnly();
boolean isSignOnly = cachedCryptoStatus.isSignOnly();
menu.findItem(R.id.openpgp_sign_only).setVisible(showSignOnly && !isSignOnly);
menu.findItem(R.id.openpgp_sign_only_disable).setVisible(showSignOnly && isSignOnly);
boolean noContactPickerAvailable = !hasContactPicker();
if (noContactPickerAvailable) {
menu.findItem(R.id.add_from_contacts).setVisible(false);
}
}
public void onSwitchAccount(Account account) {
this.account = account;
if (account.isAlwaysShowCcBcc()) {
recipientMvpView.setCcVisibility(true);
recipientMvpView.setBccVisibility(true);
updateRecipientExpanderVisibility();
}
// This does not strictly depend on the account, but this is as good a point to set this as any
setupCryptoProvider();
}
@SuppressWarnings("UnusedParameters")
public void onSwitchIdentity(Identity identity) {
// TODO decide what actually to do on identity switch?
/*
if (mIdentityChanged) {
mBccWrapper.setVisibility(View.VISIBLE);
}
mBccView.setText("");
mBccView.addAddress(new Address(mAccount.getAlwaysBcc(), ""));
*/
}
private static Address[] addressFromStringArray(String[] addresses) {
return addressFromStringArray(Arrays.asList(addresses));
}
private static Address[] addressFromStringArray(List<String> addresses) {
ArrayList<Address> result = new ArrayList<>(addresses.size());
for (String addressStr : addresses) {
Collections.addAll(result, Address.parseUnencoded(addressStr));
}
return result.toArray(new Address[result.size()]);
}
void onClickToLabel() {
recipientMvpView.requestFocusOnToField();
}
void onClickCcLabel() {
recipientMvpView.requestFocusOnCcField();
}
void onClickBccLabel() {
recipientMvpView.requestFocusOnBccField();
}
void onClickRecipientExpander() {
recipientMvpView.setCcVisibility(true);
recipientMvpView.setBccVisibility(true);
updateRecipientExpanderVisibility();
}
private void hideEmptyExtendedRecipientFields() {
if (recipientMvpView.getCcAddresses().isEmpty()) {
recipientMvpView.setCcVisibility(false);
if (lastFocusedType == RecipientType.CC) {
lastFocusedType = RecipientType.TO;
}
}
if (recipientMvpView.getBccAddresses().isEmpty()) {
recipientMvpView.setBccVisibility(false);
if (lastFocusedType == RecipientType.BCC) {
lastFocusedType = RecipientType.TO;
}
}
updateRecipientExpanderVisibility();
}
private void updateRecipientExpanderVisibility() {
boolean notBothAreVisible = !(recipientMvpView.isCcVisible() && recipientMvpView.isBccVisible());
recipientMvpView.setRecipientExpanderVisibility(notBothAreVisible);
}
public void updateCryptoStatus() {
cachedCryptoStatus = null;
boolean isOkStateButLostConnection = cryptoProviderState == CryptoProviderState.OK &&
(openPgpServiceConnection == null || !openPgpServiceConnection.isBound());
if (isOkStateButLostConnection) {
cryptoProviderState = CryptoProviderState.LOST_CONNECTION;
pendingUserInteractionIntent = null;
}
recipientMvpView.showCryptoStatus(getCurrentCryptoStatus().getCryptoStatusDisplayType());
recipientMvpView.showCryptoSpecialMode(getCurrentCryptoStatus().getCryptoSpecialModeDisplayType());
}
public ComposeCryptoStatus getCurrentCryptoStatus() {
if (cachedCryptoStatus == null) {
ComposeCryptoStatusBuilder builder = new ComposeCryptoStatusBuilder()
.setCryptoProviderState(cryptoProviderState)
.setCryptoMode(currentCryptoMode)
.setEnablePgpInline(cryptoEnablePgpInline)
.setRecipients(getAllRecipients());
long accountCryptoKey = account.getCryptoKey();
if (accountCryptoKey != Account.NO_OPENPGP_KEY) {
// TODO split these into individual settings? maybe after key is bound to identity
builder.setSigningKeyId(accountCryptoKey);
builder.setSelfEncryptId(accountCryptoKey);
}
cachedCryptoStatus = builder.build();
}
return cachedCryptoStatus;
}
public boolean isForceTextMessageFormat() {
if (cryptoEnablePgpInline) {
ComposeCryptoStatus cryptoStatus = getCurrentCryptoStatus();
return cryptoStatus.isEncryptionEnabled() || cryptoStatus.isSigningEnabled();
} else {
return false;
}
}
void onToTokenAdded() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onToTokenRemoved() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onToTokenChanged() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onCcTokenAdded() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onCcTokenRemoved() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onCcTokenChanged() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onBccTokenAdded() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onBccTokenRemoved() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
void onBccTokenChanged() {
updateCryptoStatus();
listener.onRecipientsChanged();
}
public void onCryptoModeChanged(CryptoMode cryptoMode) {
currentCryptoMode = cryptoMode;
updateCryptoStatus();
}
public void onCryptoPgpInlineChanged(boolean enablePgpInline) {
cryptoEnablePgpInline = enablePgpInline;
updateCryptoStatus();
}
private void addRecipientsFromAddresses(final RecipientType recipientType, final Address... addresses) {
new RecipientLoader(context, openPgpProvider, addresses) {
@Override
public void deliverResult(List<Recipient> result) {
Recipient[] recipientArray = result.toArray(new Recipient[result.size()]);
recipientMvpView.addRecipients(recipientType, recipientArray);
stopLoading();
abandon();
}
}.startLoading();
}
private void addRecipientFromContactUri(final RecipientType recipientType, final Uri uri) {
new RecipientLoader(context, openPgpProvider, uri, false) {
@Override
public void deliverResult(List<Recipient> result) {
// TODO handle multiple available mail addresses for a contact?
if (result.isEmpty()) {
recipientMvpView.showErrorContactNoAddress();
return;
}
Recipient recipient = result.get(0);
recipientMvpView.addRecipients(recipientType, recipient);
stopLoading();
abandon();
}
}.startLoading();
}
void onToFocused() {
lastFocusedType = RecipientType.TO;
}
void onCcFocused() {
lastFocusedType = RecipientType.CC;
}
void onBccFocused() {
lastFocusedType = RecipientType.BCC;
}
public void onMenuAddFromContacts() {
int requestCode = recipientTypeToRequestCode(lastFocusedType);
recipientMvpView.showContactPicker(requestCode);
}
public void onActivityResult(int requestCode, int resultCode, Intent data) {
switch (requestCode) {
case CONTACT_PICKER_TO:
case CONTACT_PICKER_CC:
case CONTACT_PICKER_BCC:
if (resultCode != Activity.RESULT_OK || data == null) {
return;
}
RecipientType recipientType = recipientTypeFromRequestCode(requestCode);
addRecipientFromContactUri(recipientType, data.getData());
break;
case OPENPGP_USER_INTERACTION:
cryptoProviderBindOrCheckPermission();
break;
}
}
private static int recipientTypeToRequestCode(RecipientType type) {
switch (type) {
case TO: {
return CONTACT_PICKER_TO;
}
case CC: {
return CONTACT_PICKER_CC;
}
case BCC: {
return CONTACT_PICKER_BCC;
}
}
throw new AssertionError("Unhandled case: " + type);
}
private static RecipientType recipientTypeFromRequestCode(int type) {
switch (type) {
case CONTACT_PICKER_TO: {
return RecipientType.TO;
}
case CONTACT_PICKER_CC: {
return RecipientType.CC;
}
case CONTACT_PICKER_BCC: {
return RecipientType.BCC;
}
}
throw new AssertionError("Unhandled case: " + type);
}
public void onNonRecipientFieldFocused() {
if (!account.isAlwaysShowCcBcc()) {
hideEmptyExtendedRecipientFields();
}
}
void onClickCryptoStatus() {
switch (cryptoProviderState) {
case UNCONFIGURED:
Timber.e("click on crypto status while unconfigured - this should not really happen?!");
return;
case OK:
if (cachedCryptoStatus.isSignOnly()) {
recipientMvpView.showErrorIsSignOnly();
} else {
recipientMvpView.showCryptoDialog(currentCryptoMode);
}
return;
case LOST_CONNECTION:
case UNINITIALIZED:
case ERROR:
cryptoProviderBindOrCheckPermission();
}
}
/**
* Does the device actually have a Contacts application suitable for
* picking a contact. As hard as it is to believe, some vendors ship
* without it.
*
* @return True, if the device supports picking contacts. False, otherwise.
*/
private boolean hasContactPicker() {
if (hasContactPicker == null) {
Contacts contacts = Contacts.getInstance(context);
PackageManager packageManager = context.getPackageManager();
List<ResolveInfo> resolveInfoList = packageManager.queryIntentActivities(contacts.contactPickerIntent(), 0);
hasContactPicker = !resolveInfoList.isEmpty();
}
return hasContactPicker;
}
public void showPgpSendError(SendErrorState sendErrorState) {
switch (sendErrorState) {
case PROVIDER_ERROR:
recipientMvpView.showErrorOpenPgpConnection();
break;
case SIGN_KEY_NOT_CONFIGURED:
recipientMvpView.showErrorMissingSignKey();
break;
case PRIVATE_BUT_MISSING_KEYS:
recipientMvpView.showErrorPrivateButMissingKeys();
break;
default:
throw new AssertionError("not all error states handled, this is a bug!");
}
}
void showPgpAttachError(AttachErrorState attachErrorState) {
switch (attachErrorState) {
case IS_INLINE:
recipientMvpView.showErrorInlineAttach();
break;
default:
throw new AssertionError("not all error states handled, this is a bug!");
}
}
private void setupCryptoProvider() {
String openPgpProvider = K9.getOpenPgpProvider();
if (TextUtils.isEmpty(openPgpProvider)) {
openPgpProvider = null;
}
boolean providerIsBound = openPgpServiceConnection != null && openPgpServiceConnection.isBound();
boolean isSameProvider = openPgpProvider != null && openPgpProvider.equals(this.openPgpProvider);
if (isSameProvider && providerIsBound) {
cryptoProviderBindOrCheckPermission();
return;
}
if (providerIsBound) {
openPgpServiceConnection.unbindFromService();
openPgpServiceConnection = null;
}
this.openPgpProvider = openPgpProvider;
if (openPgpProvider == null) {
cryptoProviderState = CryptoProviderState.UNCONFIGURED;
return;
}
cryptoProviderState = CryptoProviderState.UNINITIALIZED;
openPgpServiceConnection = new OpenPgpServiceConnection(context, openPgpProvider, new OnBound() {
@Override
public void onBound(IOpenPgpService2 service) {
cryptoProviderBindOrCheckPermission();
}
@Override
public void onError(Exception e) {
onCryptoProviderError(e);
}
});
cryptoProviderBindOrCheckPermission();
recipientMvpView.setCryptoProvider(openPgpProvider);
}
private void cryptoProviderBindOrCheckPermission() {
if (openPgpServiceConnection == null) {
cryptoProviderState = CryptoProviderState.UNCONFIGURED;
return;
}
if (!openPgpServiceConnection.isBound()) {
pendingUserInteractionIntent = null;
openPgpServiceConnection.bindToService();
return;
}
if (pendingUserInteractionIntent != null) {
recipientMvpView
.launchUserInteractionPendingIntent(pendingUserInteractionIntent, OPENPGP_USER_INTERACTION);
pendingUserInteractionIntent = null;
return;
}
getOpenPgpApi().checkPermissionPing(this);
}
private void onCryptoProviderError(Exception e) {
// TODO handle error case better
recipientMvpView.showErrorOpenPgpConnection();
cryptoProviderState = CryptoProviderState.ERROR;
Timber.e(e, "error connecting to crypto provider!");
updateCryptoStatus();
}
@Override
public void onPgpPermissionCheckResult(Intent result) {
int resultCode = result.getIntExtra(OpenPgpApi.RESULT_CODE, OpenPgpApi.RESULT_CODE_ERROR);
switch (resultCode) {
case OpenPgpApi.RESULT_CODE_SUCCESS:
cryptoProviderState = CryptoProviderState.OK;
break;
case OpenPgpApi.RESULT_CODE_USER_INTERACTION_REQUIRED:
recipientMvpView.showErrorOpenPgpUserInteractionRequired();
pendingUserInteractionIntent = result.getParcelableExtra(OpenPgpApi.RESULT_INTENT);
cryptoProviderState = CryptoProviderState.ERROR;
break;
case OpenPgpApi.RESULT_CODE_ERROR:
default:
recipientMvpView.showErrorOpenPgpConnection();
cryptoProviderState = CryptoProviderState.ERROR;
break;
}
updateCryptoStatus();
}
public void onActivityDestroy() {
if (openPgpServiceConnection != null && openPgpServiceConnection.isBound()) {
openPgpServiceConnection.unbindFromService();
}
openPgpServiceConnection = null;
}
private OpenPgpApi getOpenPgpApi() {
if (openPgpServiceConnection == null || !openPgpServiceConnection.isBound()) {
Timber.e("obtained openpgpapi object, but service is not bound! inconsistent state?");
}
return new OpenPgpApi(context, openPgpServiceConnection.getService());
}
public void builderSetProperties(PgpMessageBuilder pgpBuilder) {
pgpBuilder.setOpenPgpApi(getOpenPgpApi());
pgpBuilder.setCryptoStatus(getCurrentCryptoStatus());
}
public void onMenuSetPgpInline(boolean enablePgpInline) {
onCryptoPgpInlineChanged(enablePgpInline);
if (enablePgpInline) {
boolean shouldShowPgpInlineDialog = checkAndIncrementPgpInlineDialogCounter();
if (shouldShowPgpInlineDialog) {
recipientMvpView.showOpenPgpInlineDialog(true);
}
}
}
public void onMenuSetSignOnly(boolean enableSignOnly) {
if (enableSignOnly) {
onCryptoModeChanged(CryptoMode.SIGN_ONLY);
boolean shouldShowPgpSignOnlyDialog = checkAndIncrementPgpSignOnlyDialogCounter();
if (shouldShowPgpSignOnlyDialog) {
recipientMvpView.showOpenPgpSignOnlyDialog(true);
}
} else {
onCryptoModeChanged(CryptoMode.OPPORTUNISTIC);
}
}
public void onCryptoPgpSignOnlyDisabled() {
onCryptoPgpInlineChanged(false);
onCryptoModeChanged(CryptoMode.OPPORTUNISTIC);
}
private boolean checkAndIncrementPgpInlineDialogCounter() {
int pgpInlineDialogCounter = K9.getPgpInlineDialogCounter();
if (pgpInlineDialogCounter < PGP_DIALOG_DISPLAY_THRESHOLD) {
K9.setPgpInlineDialogCounter(pgpInlineDialogCounter + 1);
return true;
}
return false;
}
private boolean checkAndIncrementPgpSignOnlyDialogCounter() {
int pgpSignOnlyDialogCounter = K9.getPgpSignOnlyDialogCounter();
if (pgpSignOnlyDialogCounter < PGP_DIALOG_DISPLAY_THRESHOLD) {
K9.setPgpSignOnlyDialogCounter(pgpSignOnlyDialogCounter + 1);
return true;
}
return false;
}
void onClickCryptoSpecialModeIndicator() {
ComposeCryptoStatus currentCryptoStatus = getCurrentCryptoStatus();
if (currentCryptoStatus.isSignOnly()) {
recipientMvpView.showOpenPgpSignOnlyDialog(false);
} else if (currentCryptoStatus.isPgpInlineModeEnabled()) {
recipientMvpView.showOpenPgpInlineDialog(false);
} else {
throw new IllegalStateException("This icon should not be clickable while no special mode is active!");
}
}
@VisibleForTesting
void setOpenPgpServiceConnection(OpenPgpServiceConnection openPgpServiceConnection, String cryptoProvider) {
this.openPgpServiceConnection = openPgpServiceConnection;
this.openPgpProvider = cryptoProvider;
}
public enum CryptoProviderState {
UNCONFIGURED,
UNINITIALIZED,
LOST_CONNECTION,
ERROR,
OK
}
public enum CryptoMode {
DISABLE,
SIGN_ONLY,
OPPORTUNISTIC,
PRIVATE,
}
public static interface RecipientsChangedListener {
public void onRecipientsChanged();
}
}
| 1 | 15,331 | Flipped the logic here, was this actually wrong before? | k9mail-k-9 | java |
@@ -336,7 +336,7 @@ static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
.reason = CALI_REASON_UNKNOWN,
};
struct calico_nat_dest *nat_dest = NULL;
- bool nat_lvl1_drop = 0;
+ int res = 0;
/* we assume we do FIB and from this point on, we only set it to false
* if we decide not to do it. | 1 | // Project Calico BPF dataplane programs.
// Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#include <asm/types.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/if_ether.h>
#include <iproute2/bpf_elf.h>
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include "bpf.h"
#include "log.h"
#include "skb.h"
#include "policy.h"
#include "conntrack.h"
#include "nat.h"
#include "routes.h"
#include "jump.h"
#include "reasons.h"
#include "icmp.h"
#ifndef CALI_FIB_LOOKUP_ENABLED
#define CALI_FIB_LOOKUP_ENABLED true
#endif
#ifndef CALI_DROP_WORKLOAD_TO_HOST
#define CALI_DROP_WORKLOAD_TO_HOST false
#endif
#ifdef CALI_DEBUG_ALLOW_ALL
/* If we want to just compile the code without defining any policies and to
* avoid compiling out code paths that are not reachable if traffic is denied,
* we can compile it with allow all
*/
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, allow);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#else
static CALI_BPF_INLINE enum calico_policy_result execute_policy_norm(struct __sk_buff *skb,
__u8 ip_proto, __u32 saddr, __u32 daddr, __u16 sport, __u16 dport)
{
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-label"
RULE_START(0);
RULE_END(0, deny);
return CALI_POL_NO_MATCH;
deny:
return CALI_POL_DENY;
allow:
return CALI_POL_ALLOW;
#pragma clang diagnostic pop
}
#endif /* CALI_DEBUG_ALLOW_ALL */
__attribute__((section("1/0")))
int calico_tc_norm_pol_tail(struct __sk_buff *skb)
{
CALI_DEBUG("Entering normal policy tail call\n");
__u32 key = 0;
struct cali_tc_state *state = cali_v4_state_lookup_elem(&key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
state->pol_rc = execute_policy_norm(skb, state->ip_proto, state->ip_src,
state->ip_dst, state->sport, state->dport);
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to post-policy program failed: DROP\n");
deny:
return TC_ACT_SHOT;
}
struct fwd {
int res;
uint32_t mark;
enum calico_reason reason;
#if FIB_ENABLED
uint32_t fib_flags;
bool fib;
#endif
};
#if FIB_ENABLED
#define fwd_fib(fwd) ((fwd)->fib)
#define fwd_fib_set(fwd, v) ((fwd)->fib = v)
#define fwd_fib_set_flags(fwd, flags) ((fwd)->fib_flags = flags)
#else
#define fwd_fib(fwd) false
#define fwd_fib_set(fwd, v)
#define fwd_fib_set_flags(fwd, flags)
#endif
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest);
static CALI_BPF_INLINE int skb_nat_l4_csum_ipv4(struct __sk_buff *skb, size_t off,
__be32 ip_from, __be32 ip_to,
__u16 port_from, __u16 port_to,
uint64_t flags)
{
int ret = 0;
if (ip_from != ip_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) IP from %x to %x\n", off,
be32_to_host(ip_from), be32_to_host(ip_to));
ret = bpf_l4_csum_replace(skb, off, ip_from, ip_to, flags | BPF_F_PSEUDO_HDR | 4);
CALI_DEBUG("bpf_l4_csum_replace(IP): %d\n", ret);
}
if (port_from != port_to) {
CALI_DEBUG("L4 checksum update (csum is at %d) port from %d to %d\n",
off, be16_to_host(port_from), be16_to_host(port_to));
int rc = bpf_l4_csum_replace(skb, off, port_from, port_to, flags | 2);
CALI_DEBUG("bpf_l4_csum_replace(port): %d\n", rc);
ret |= rc;
}
return ret;
}
static CALI_BPF_INLINE int forward_or_drop(struct __sk_buff *skb,
struct cali_tc_state *state,
struct fwd *fwd)
{
int rc = fwd->res;
enum calico_reason reason = fwd->reason;
if (rc == TC_ACT_SHOT) {
goto deny;
}
if (rc == CALI_RES_REDIR_IFINDEX) {
int redir_flags = 0;
if (CALI_F_FROM_HOST) {
redir_flags = BPF_F_INGRESS;
}
/* Revalidate the access to the packet */
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
/* Swap the MACs as we are turning it back */
struct ethhdr *eth_hdr = (void *)(long)skb->data;
unsigned char mac[ETH_ALEN];
__builtin_memcpy(mac, ð_hdr->h_dest, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_dest, ð_hdr->h_source, ETH_ALEN);
__builtin_memcpy(ð_hdr->h_source, mac, ETH_ALEN);
rc = bpf_redirect(skb->ifindex, redir_flags);
if (rc == TC_ACT_REDIRECT) {
CALI_DEBUG("Redirect to the same interface (%d) succeeded\n", skb->ifindex);
goto skip_fib;
}
CALI_DEBUG("Redirect to the same interface (%d) failed\n", skb->ifindex);
goto deny;
}
#if FIB_ENABLED
// Try a short-circuit FIB lookup.
if (fwd_fib(fwd)) {
/* XXX we might include the tot_len in the fwd, set it once when
* we get the ip_header the first time and only adjust the value
* when we modify the packet - to avoid geting the header here
* again - it is simpler though.
*/
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
struct iphdr *ip_header = skb_iphdr(skb);
struct bpf_fib_lookup fib_params = {
.family = 2, /* AF_INET */
.tot_len = be16_to_host(ip_header->tot_len),
.ifindex = skb->ingress_ifindex,
.l4_protocol = state->ip_proto,
.sport = host_to_be16(state->sport),
.dport = host_to_be16(state->dport),
};
/* set the ipv4 here, otherwise the ipv4/6 unions do not get
* zeroed properly
*/
fib_params.ipv4_src = state->ip_src;
fib_params.ipv4_dst = state->ip_dst;
CALI_DEBUG("FIB family=%d\n", fib_params.family);
CALI_DEBUG("FIB tot_len=%d\n", fib_params.tot_len);
CALI_DEBUG("FIB ifindex=%d\n", fib_params.ifindex);
CALI_DEBUG("FIB l4_protocol=%d\n", fib_params.l4_protocol);
CALI_DEBUG("FIB sport=%d\n", be16_to_host(fib_params.sport));
CALI_DEBUG("FIB dport=%d\n", be16_to_host(fib_params.dport));
CALI_DEBUG("FIB ipv4_src=%x\n", be32_to_host(fib_params.ipv4_src));
CALI_DEBUG("FIB ipv4_dst=%x\n", be32_to_host(fib_params.ipv4_dst));
CALI_DEBUG("Traffic is towards the host namespace, doing Linux FIB lookup\n");
rc = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params), fwd->fib_flags);
if (rc == 0) {
CALI_DEBUG("FIB lookup succeeded\n");
/* Since we are going to short circuit the IP stack on
* forward, check if TTL is still alive. If not, let the
* IP stack handle it. It was approved by policy, so it
* is safe.
*/
if ip_ttl_exceeded(ip_header) {
rc = TC_ACT_UNSPEC;
goto cancel_fib;
}
// Update the MACs. NAT may have invalidated pointer into the packet so need to
// revalidate.
if ((void *)(long)skb->data + sizeof(struct ethhdr) > (void *)(long)skb->data_end) {
reason = CALI_REASON_SHORT;
goto deny;
}
struct ethhdr *eth_hdr = (void *)(long)skb->data;
__builtin_memcpy(ð_hdr->h_source, fib_params.smac, sizeof(eth_hdr->h_source));
__builtin_memcpy(ð_hdr->h_dest, fib_params.dmac, sizeof(eth_hdr->h_dest));
// Redirect the packet.
CALI_DEBUG("Got Linux FIB hit, redirecting to iface %d.\n", fib_params.ifindex);
rc = bpf_redirect(fib_params.ifindex, 0);
/* now we know we will bypass IP stack and ip->ttl > 1, decrement it! */
if (rc == TC_ACT_REDIRECT) {
ip_dec_ttl(ip_header);
}
} else if (rc < 0) {
CALI_DEBUG("FIB lookup failed (bad input): %d.\n", rc);
rc = TC_ACT_UNSPEC;
} else {
CALI_DEBUG("FIB lookup failed (FIB problem): %d.\n", rc);
rc = TC_ACT_UNSPEC;
}
}
cancel_fib:
#endif /* FIB_ENABLED */
skip_fib:
if (CALI_F_TO_HOST) {
/* If we received the packet from the tunnel and we forward it to a
* workload we need to skip RPF check since there might be a better path
* for the packet if the host has multiple ifaces and might get dropped.
*
* XXX We should check ourselves that we got our tunnel packets only from
* XXX those devices where we expect them before we even decap.
*/
if (CALI_F_FROM_HEP && state->tun_ip != 0) {
fwd->mark = CALI_SKB_MARK_SKIP_RPF;
}
/* Packet is towards host namespace, mark it so that downstream
* programs know that they're not the first to see the packet.
*/
CALI_DEBUG("Traffic is towards host namespace, marking with %x.\n", fwd->mark);
/* FIXME: this ignores the mask that we should be using.
* However, if we mask off the bits, then clang spots that it
* can do a 16-bit store instead of a 32-bit load/modify/store,
* which trips up the validator.
*/
skb->mark = fwd->mark | CALI_SKB_MARK_SEEN; /* make sure that each pkt has SEEN mark */
}
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=ALLOW (%d). Program execution time: %lluns\n",
rc, prog_end_time-state->prog_start_time);
}
return rc;
deny:
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
uint64_t prog_end_time = bpf_ktime_get_ns();
CALI_INFO("Final result=DENY (%x). Program execution time: %lluns\n",
reason, prog_end_time-state->prog_start_time);
}
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE int calico_tc(struct __sk_buff *skb)
{
struct cali_tc_state state = {};
struct fwd fwd = {
.res = TC_ACT_UNSPEC,
.reason = CALI_REASON_UNKNOWN,
};
struct calico_nat_dest *nat_dest = NULL;
bool nat_lvl1_drop = 0;
/* we assume we do FIB and from this point on, we only set it to false
* if we decide not to do it.
*/
fwd_fib_set(&fwd, true);
if (CALI_LOG_LEVEL >= CALI_LOG_LEVEL_INFO) {
state.prog_start_time = bpf_ktime_get_ns();
}
state.tun_ip = 0;
#ifdef CALI_SET_SKB_MARK
/* workaround for test since bpftool run cannot set it in context, wont
* be necessary if fixed in kernel
*/
skb->mark = CALI_SET_SKB_MARK;
#endif
if (!CALI_F_TO_HOST && skb->mark == CALI_SKB_MARK_BYPASS) {
CALI_DEBUG("Packet pre-approved by another hook, allow.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
}
struct iphdr *ip_header;
if (CALI_F_TO_HEP || CALI_F_TO_WEP) {
switch (skb->mark) {
case CALI_SKB_MARK_BYPASS_FWD:
CALI_DEBUG("Packet approved for forward.\n");
fwd.reason = CALI_REASON_BYPASS;
goto allow;
case CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP:
CALI_DEBUG("Packet approved for forward - src ip fixup\n");
fwd.reason = CALI_REASON_BYPASS;
/* we need to fix up the right src host IP */
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__be32 ip_src = ip_header->saddr;
if (ip_src == HOST_IP) {
CALI_DEBUG("src ip fixup not needed %x\n", be32_to_host(ip_src));
goto allow;
}
/* XXX do a proper CT lookup to find this */
ip_header->saddr = HOST_IP;
int l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
int res = bpf_l3_csum_replace(skb, l3_csum_off, ip_src, HOST_IP, 4);
if (res) {
fwd.reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
goto allow;
}
}
// Parse the packet.
// TODO Do we need to handle any odd-ball frames here (e.g. with a 0 VLAN header)?
switch (host_to_be16(skb->protocol)) {
case ETH_P_IP:
break;
case ETH_P_ARP:
CALI_DEBUG("ARP: allowing packet\n");
fwd_fib_set(&fwd, false);
goto allow;
case ETH_P_IPV6:
if (CALI_F_WEP) {
CALI_DEBUG("IPv6 from workload: drop\n");
return TC_ACT_SHOT;
} else {
// FIXME: support IPv6.
CALI_DEBUG("IPv6 on host interface: allow\n");
return TC_ACT_UNSPEC;
}
default:
if (CALI_F_WEP) {
CALI_DEBUG("Unknown ethertype (%x), drop\n", be16_to_host(skb->protocol));
goto deny;
} else {
CALI_DEBUG("Unknown ethertype on host interface (%x), allow\n",
be16_to_host(skb->protocol));
return TC_ACT_UNSPEC;
}
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
if (dnat_should_decap() && is_vxlan_tunnel(ip_header)) {
struct udphdr *udp_header = (void*)(ip_header+1);
/* decap on host ep only if directly for the node */
CALI_DEBUG("VXLAN tunnel packet to %x (host IP=%x)\n", ip_header->daddr, HOST_IP);
if (rt_addr_is_local_host(ip_header->daddr) &&
vxlan_udp_csum_ok(udp_header) &&
vxlan_size_ok(skb, udp_header) &&
vxlan_vni_is_valid(skb, udp_header) &&
vxlan_vni(skb, udp_header) == CALI_VXLAN_VNI) {
state.tun_ip = ip_header->saddr;
CALI_DEBUG("vxlan decap\n");
if (vxlan_v4_decap(skb)) {
fwd.reason = CALI_REASON_DECAP_FAIL;
goto deny;
}
if (skb_too_short(skb)) {
fwd.reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short after VXLAN decap\n");
goto deny;
}
ip_header = skb_iphdr(skb);
CALI_DEBUG("vxlan decap origin %x\n", be32_to_host(state.tun_ip));
}
}
// Drop malformed IP packets
if (ip_header->ihl < 5) {
fwd.reason = CALI_REASON_IP_MALFORMED;
CALI_DEBUG("Drop malformed IP packets\n");
goto deny;
} else if (ip_header->ihl > 5) {
/* Drop packets with IP options from/to WEP.
* Also drop packets with IP options if the dest IP is not host IP
*/
if (CALI_F_WEP || (CALI_F_FROM_HEP && !rt_addr_is_local_host(ip_header->daddr))) {
fwd.reason = CALI_REASON_IP_OPTIONS;
CALI_DEBUG("Drop packets with IP options\n");
goto deny;
}
CALI_DEBUG("Allow packets with IP options and dst IP = hostIP\n");
goto allow;
}
// Setting all of these up-front to keep the verifier happy.
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
struct icmphdr *icmp_header = (void*)(ip_header+1);
tc_state_fill_from_iphdr(&state, ip_header);
switch (state.ip_proto) {
case IPPROTO_TCP:
// Re-check buffer space for TCP (has larger headers than UDP).
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
state.sport = be16_to_host(tcp_header->source);
state.dport = be16_to_host(tcp_header->dest);
CALI_DEBUG("TCP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_UDP:
state.sport = be16_to_host(udp_header->source);
state.dport = be16_to_host(udp_header->dest);
CALI_DEBUG("UDP; ports: s=%d d=%d\n", state.sport, state.dport);
break;
case IPPROTO_ICMP:
icmp_header = (void*)(ip_header+1);
CALI_DEBUG("ICMP; type=%d code=%d\n",
icmp_header->type, icmp_header->code);
break;
case 4:
// IPIP
if (CALI_F_HEP) {
// TODO IPIP whitelist.
CALI_DEBUG("IPIP: allow\n");
fwd_fib_set(&fwd, false);
goto allow;
}
default:
CALI_DEBUG("Unknown protocol (%d), unable to extract ports\n", (int)state.ip_proto);
}
state.pol_rc = CALI_POL_NO_MATCH;
switch (state.ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_ICMP:
break;
default:
if (CALI_F_HEP) {
// FIXME: allow unknown protocols through on host endpoints.
goto allow;
}
// FIXME non-port based conntrack.
goto deny;
}
struct ct_ctx ct_lookup_ctx = {
.skb = skb,
.proto = state.ip_proto,
.src = state.ip_src,
.sport = state.sport,
.dst = state.ip_dst,
.dport = state.dport,
.tun_ip = state.tun_ip,
};
if (state.ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_lookup_ctx.tcp = tcp_header;
}
/* Do conntrack lookup before anything else */
state.ct_result = calico_ct_v4_lookup(&ct_lookup_ctx);
/* check if someone is trying to spoof a tunnel packet */
if (CALI_F_FROM_HEP && ct_result_tun_src_changed(state.ct_result.rc)) {
CALI_DEBUG("dropping tunnel pkt with changed source node\n");
goto deny;
}
if (state.ct_result.flags & CALI_CT_FLAG_NAT_OUT) {
state.flags |= CALI_ST_NAT_OUTGOING;
}
/* We are possibly past (D)NAT, but that is ok, we need to let the IP
* stack do the RPF check on the source, dest is not importatnt.
*/
if (CALI_F_TO_HOST && ct_result_rpf_failed(state.ct_result.rc)) {
fwd_fib_set(&fwd, false);
}
/* skip policy if we get conntrack hit */
if (ct_result_rc(state.ct_result.rc) != CALI_CT_NEW) {
goto skip_policy;
}
/* Unlike from WEP where we can do RPF by comparing to calico routing
* info, we must rely in Linux to do it for us when receiving packets
* from outside of the host. We enforce RPF failed on every new flow.
* This will make it to skip fib in calico_tc_skb_accepted()
*/
if (CALI_F_FROM_HEP) {
ct_result_set_flag(state.ct_result.rc, CALI_CT_RPF_FAILED);
}
/* No conntrack entry, check if we should do NAT */
nat_dest = calico_v4_nat_lookup2(state.ip_src, state.ip_dst,
state.ip_proto, state.dport,
state.tun_ip != 0, &nat_lvl1_drop);
if (nat_lvl1_drop) {
CALI_DEBUG("Packet is from an unauthorised source: DROP\n");
fwd.reason = CALI_REASON_UNAUTH_SOURCE;
goto deny;
}
if (nat_dest != NULL) {
state.post_nat_ip_dst = nat_dest->addr;
state.post_nat_dport = nat_dest->port;
} else {
state.post_nat_ip_dst = state.ip_dst;
state.post_nat_dport = state.dport;
}
if (CALI_F_TO_WEP &&
skb->mark != CALI_SKB_MARK_SEEN &&
cali_rt_flags_local_host(cali_rt_lookup_flags(state.ip_src))) {
/* Host to workload traffic always allowed. We discount traffic that was
* seen by another program since it must have come in via another interface.
*/
CALI_DEBUG("Packet is from the host: ACCEPT\n");
state.pol_rc = CALI_POL_ALLOW;
goto skip_policy;
}
if (CALI_F_FROM_WEP) {
/* Do RPF check since it's our responsibility to police that. */
CALI_DEBUG("Workload RPF check src=%x skb iface=%d.\n",
be32_to_host(state.ip_src), skb->ifindex);
struct cali_rt *r = cali_rt_lookup(state.ip_src);
if (!r) {
CALI_INFO("Workload RPF fail: missing route.\n");
goto deny;
}
if (!cali_rt_flags_local_workload(r->flags)) {
CALI_INFO("Workload RPF fail: not a local workload.\n");
goto deny;
}
if (r->if_index != skb->ifindex) {
CALI_INFO("Workload RPF fail skb iface (%d) != route iface (%d)\n",
skb->ifindex, r->if_index);
goto deny;
}
// Check whether the workload needs outgoing NAT to this address.
if (r->flags & CALI_RT_NAT_OUT) {
if (!(cali_rt_lookup_flags(state.post_nat_ip_dst) & CALI_RT_IN_POOL)) {
CALI_DEBUG("Source is in NAT-outgoing pool "
"but dest is not, need to SNAT.\n");
state.flags |= CALI_ST_NAT_OUTGOING;
}
}
}
/* icmp_type and icmp_code share storage with the ports; now we've used
* the ports set to 0 to do the conntrack lookup, we can set the ICMP fields
* for policy.
*/
if (state.ip_proto == IPPROTO_ICMP) {
state.icmp_type = icmp_header->type;
state.icmp_code = icmp_header->code;
}
// Set up an entry in the state map and then jump to the normal policy program.
int key = 0;
struct cali_tc_state *map_state = cali_v4_state_lookup_elem(&key);
if (!map_state) {
// Shouldn't be possible; the map is pre-allocated.
CALI_INFO("State map lookup failed: DROP\n");
goto deny;
}
state.pol_rc = CALI_POL_NO_MATCH;
if (nat_dest) {
state.nat_dest.addr = nat_dest->addr;
state.nat_dest.port = nat_dest->port;
} else {
state.nat_dest.addr = 0;
state.nat_dest.port = 0;
}
*map_state = state;
if (CALI_F_HEP) {
/* We don't support host-endpoint policy yet, skip straight to
* the epilogue program.
* FIXME we really want to just call calico_tc_skb_accepted()
* here but that runs out of stack space.
*/
map_state->pol_rc = CALI_POL_ALLOW;
bpf_tail_call(skb, &cali_jump, 1);
CALI_DEBUG("Tail call to epilogue program failed: ALLOW\n");
return TC_ACT_UNSPEC;
}
CALI_DEBUG("About to jump to policy program; lack of further "
"logs means policy dropped the packet...\n");
bpf_tail_call(skb, &cali_jump, 0);
CALI_DEBUG("Tail call to policy program failed: DROP\n");
return TC_ACT_SHOT;
skip_policy:
fwd = calico_tc_skb_accepted(skb, ip_header, &state, nat_dest);
allow:
finalize:
return forward_or_drop(skb, &state, &fwd);
deny:
fwd.res = TC_ACT_SHOT;
goto finalize;
}
__attribute__((section("1/1")))
int calico_tc_skb_accepted_entrypoint(struct __sk_buff *skb)
{
CALI_DEBUG("Entering calico_tc_skb_accepted_entrypoint\n");
struct iphdr *ip_header = NULL;
if (skb_too_short(skb)) {
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
__u32 key = 0;
struct cali_tc_state *state = bpf_map_lookup_elem(&cali_v4_state, &key);
if (!state) {
CALI_DEBUG("State map lookup failed: DROP\n");
goto deny;
}
struct calico_nat_dest *nat_dest = NULL;
struct calico_nat_dest nat_dest_2 = {
.addr=state->nat_dest.addr,
.port=state->nat_dest.port,
};
if (state->nat_dest.addr != 0) {
nat_dest = &nat_dest_2;
}
struct fwd fwd = calico_tc_skb_accepted(skb, ip_header, state, nat_dest);
return forward_or_drop(skb, state, &fwd);
deny:
return TC_ACT_SHOT;
}
static CALI_BPF_INLINE struct fwd calico_tc_skb_accepted(struct __sk_buff *skb,
struct iphdr *ip_header,
struct cali_tc_state *state,
struct calico_nat_dest *nat_dest)
{
CALI_DEBUG("Entering calico_tc_skb_accepted\n");
enum calico_reason reason = CALI_REASON_UNKNOWN;
int rc = TC_ACT_UNSPEC;
bool fib = false;
struct ct_ctx ct_nat_ctx = {};
int ct_rc = ct_result_rc(state->ct_result.rc);
bool ct_related = ct_result_is_related(state->ct_result.rc);
uint32_t seen_mark;
size_t l4_csum_off = 0, l3_csum_off;
uint32_t fib_flags = 0;
CALI_DEBUG("src=%x dst=%x\n", be32_to_host(state->ip_src), be32_to_host(state->ip_dst));
CALI_DEBUG("post_nat=%x:%d\n", be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
CALI_DEBUG("tun_ip=%x\n", state->tun_ip);
CALI_DEBUG("pol_rc=%d\n", state->pol_rc);
CALI_DEBUG("sport=%d\n", state->sport);
CALI_DEBUG("flags=%x\n", state->flags);
CALI_DEBUG("ct_rc=%d\n", ct_rc);
CALI_DEBUG("ct_related=%d\n", ct_related);
// Set the dport to 0, to make sure conntrack entries for icmp is proper as we use
// dport to hold icmp type and code
if (state->ip_proto == IPPROTO_ICMP) {
state->dport = 0;
}
if (CALI_F_FROM_WEP && (state->flags & CALI_ST_NAT_OUTGOING)) {
seen_mark = CALI_SKB_MARK_NAT_OUT;
} else {
/* XXX we do it here again because doing it in one place only
* XXX in calico_tc() irritates the verifier :'(
*/
if (!CALI_F_TO_HOST || !ct_result_rpf_failed(state->ct_result.rc)) {
fib = true;
}
seen_mark = CALI_SKB_MARK_SEEN;
}
/* We check the ttl here to avoid needing complicated handling of
* related trafic back from the host if we let the host to handle it.
*/
CALI_DEBUG("ip->ttl %d\n", ip_header->ttl);
if (ip_ttl_exceeded(ip_header)) {
switch (ct_rc){
case CALI_CT_NEW:
if (nat_dest) {
goto icmp_ttl_exceeded;
}
break;
case CALI_CT_ESTABLISHED_DNAT:
case CALI_CT_ESTABLISHED_SNAT:
goto icmp_ttl_exceeded;
}
}
l3_csum_off = skb_iphdr_offset(skb) + offsetof(struct iphdr, check);
if (ct_related) {
if (ip_header->protocol == IPPROTO_ICMP) {
struct icmphdr *icmp;
bool outer_ip_snat;
/* if we do SNAT ... */
outer_ip_snat = ct_rc == CALI_CT_ESTABLISHED_SNAT;
/* ... there is a return path to the tunnel ... */
outer_ip_snat = outer_ip_snat && state->ct_result.tun_ip;
/* ... and should do encap and it is not DSR or it is leaving host
* and either DSR from WEP or originated at host ... */
outer_ip_snat = outer_ip_snat &&
((dnat_return_should_encap() && !CALI_F_DSR) ||
(CALI_F_TO_HEP &&
((CALI_F_DSR && skb_seen(skb)) || !skb_seen(skb))));
/* ... then fix the outer header IP first */
if (outer_ip_snat) {
ip_header->saddr = state->ct_result.nat_ip;
int res = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
CALI_DEBUG("ICMP related: outer IP SNAT to %x\n",
be32_to_host(state->ct_result.nat_ip));
}
if (!icmp_skb_get_hdr(skb, &icmp)) {
CALI_DEBUG("Ooops, we already passed one such a check!!!\n");
goto deny;
}
l3_csum_off += sizeof(*ip_header) + sizeof(*icmp);
ip_header = (struct iphdr *)(icmp + 1); /* skip to inner ip */
/* flip the direction, we need to reverse the original packet */
switch (ct_rc) {
case CALI_CT_ESTABLISHED_SNAT:
/* handle the DSR case, see CALI_CT_ESTABLISHED_SNAT where nat is done */
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
}
ct_rc = CALI_CT_ESTABLISHED_DNAT;
break;
case CALI_CT_ESTABLISHED_DNAT:
if (CALI_F_FROM_HEP && state->tun_ip && ct_result_np_node(state->ct_result)) {
/* Packet is returning from a NAT tunnel, just forward it. */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("ICMP related returned from NAT tunnel\n");
goto allow;
}
ct_rc = CALI_CT_ESTABLISHED_SNAT;
break;
}
}
}
struct tcphdr *tcp_header = (void*)(ip_header+1);
struct udphdr *udp_header = (void*)(ip_header+1);
__u8 ihl = ip_header->ihl * 4;
int res = 0;
bool encap_needed = false;
if (state->ip_proto == IPPROTO_ICMP && ct_related) {
/* do not fix up embedded L4 checksum for related ICMP */
} else {
switch (ip_header->protocol) {
case IPPROTO_TCP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct tcphdr, check);
break;
case IPPROTO_UDP:
l4_csum_off = skb_l4hdr_offset(skb, ihl) + offsetof(struct udphdr, check);
break;
}
}
switch (ct_rc){
case CALI_CT_NEW:
switch (state->pol_rc) {
case CALI_POL_NO_MATCH:
CALI_DEBUG("Implicitly denied by normal policy: DROP\n");
goto deny;
case CALI_POL_DENY:
CALI_DEBUG("Denied by normal policy: DROP\n");
goto deny;
case CALI_POL_ALLOW:
CALI_DEBUG("Allowed by normal policy: ACCEPT\n");
}
if (CALI_F_FROM_WEP &&
CALI_DROP_WORKLOAD_TO_HOST &&
cali_rt_flags_local_host(
cali_rt_lookup_flags(state->post_nat_ip_dst))) {
CALI_DEBUG("Workload to host traffic blocked by "
"DefaultEndpointToHostAction: DROP\n");
goto deny;
}
ct_nat_ctx.skb = skb;
ct_nat_ctx.proto = state->ip_proto;
ct_nat_ctx.src = state->ip_src;
ct_nat_ctx.sport = state->sport;
ct_nat_ctx.dst = state->post_nat_ip_dst;
ct_nat_ctx.dport = state->post_nat_dport;
ct_nat_ctx.tun_ip = state->tun_ip;
if (state->flags & CALI_ST_NAT_OUTGOING) {
ct_nat_ctx.flags |= CALI_CT_FLAG_NAT_OUT;
}
if (state->ip_proto == IPPROTO_TCP) {
if (!skb_has_data_after(skb, ip_header, sizeof(struct tcphdr))) {
CALI_DEBUG("Too short for TCP: DROP\n");
goto deny;
}
tcp_header = (void*)(ip_header+1);
ct_nat_ctx.tcp = tcp_header;
}
// If we get here, we've passed policy.
if (nat_dest == NULL) {
if (conntrack_create(&ct_nat_ctx, CT_CREATE_NORMAL)) {
CALI_DEBUG("Creating normal conntrack failed\n");
goto deny;
}
goto allow;
}
ct_nat_ctx.orig_dst = state->ip_dst;
ct_nat_ctx.orig_dport = state->dport;
/* fall through as DNAT is now established */
case CALI_CT_ESTABLISHED_DNAT:
/* align with CALI_CT_NEW */
if (ct_rc == CALI_CT_ESTABLISHED_DNAT) {
if (CALI_F_FROM_HEP && state->tun_ip && ct_result_np_node(state->ct_result)) {
/* Packet is returning from a NAT tunnel,
* already SNATed, just forward it.
*/
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
CALI_DEBUG("returned from NAT tunnel\n");
goto allow;
}
state->post_nat_ip_dst = state->ct_result.nat_ip;
state->post_nat_dport = state->ct_result.nat_port;
}
CALI_DEBUG("CT: DNAT to %x:%d\n",
be32_to_host(state->post_nat_ip_dst), state->post_nat_dport);
encap_needed = dnat_should_encap();
/* We have not created the conntrack yet since we did not know
* if we need encap or not. Must do before MTU check and before
* we jump to do the encap.
*/
if (ct_rc == CALI_CT_NEW) {
struct cali_rt * rt;
int nat_type = CT_CREATE_NAT;
if (encap_needed) {
/* When we need to encap, we need to find out if the backend is
* local or not. If local, we actually do not need the encap.
*/
rt = cali_rt_lookup(state->post_nat_ip_dst);
if (!rt) {
reason = CALI_REASON_RT_UNKNOWN;
goto deny;
}
CALI_DEBUG("rt found for 0x%x local %d\n",
be32_to_host(state->post_nat_ip_dst), !!cali_rt_is_local(rt));
encap_needed = !cali_rt_is_local(rt);
if (encap_needed) {
if (CALI_F_FROM_HEP && state->tun_ip == 0) {
if (CALI_F_DSR) {
ct_nat_ctx.flags |= CALI_CT_FLAG_DSR_FWD;
}
ct_nat_ctx.flags |= CALI_CT_FLAG_NP_FWD;
}
nat_type = CT_CREATE_NAT_FWD;
ct_nat_ctx.tun_ip = rt->next_hop;
state->ip_dst = rt->next_hop;
}
}
if (conntrack_create(&ct_nat_ctx, nat_type)) {
CALI_DEBUG("Creating NAT conntrack failed\n");
goto deny;
}
} else {
if (encap_needed && ct_result_np_node(state->ct_result)) {
CALI_DEBUG("CT says encap to node %x\n", be32_to_host(state->ct_result.tun_ip));
state->ip_dst = state->ct_result.tun_ip;
} else {
encap_needed = false;
}
}
if (encap_needed) {
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Request packet with DNF set is too big\n");
goto icmp_too_big;
}
state->ip_src = HOST_IP;
seen_mark = CALI_SKB_MARK_SKIP_RPF;
/* We cannot enforce RPF check on encapped traffic, do FIB if you can */
fib = true;
goto nat_encap;
}
ip_header->daddr = state->post_nat_ip_dst;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->dest = host_to_be16(state->post_nat_dport);
break;
case IPPROTO_UDP:
udp_header->dest = host_to_be16(state->post_nat_dport);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_dst,
state->post_nat_ip_dst, host_to_be16(state->dport),
host_to_be16(state->post_nat_dport),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
res |= bpf_l3_csum_replace(skb, l3_csum_off, state->ip_dst, state->post_nat_ip_dst, 4);
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
/* Handle returning ICMP related to tunnel
*
* N.B. we assume that we can fit in the MTU. Since it is ICMP
* and even though Linux sends up to min ipv4 MTU, it is
* unlikely that we are anywhere to close the MTU limit. If we
* are, we need to fail anyway.
*/
if (ct_related && state->ip_proto == IPPROTO_ICMP
&& state->ct_result.tun_ip
&& !CALI_F_DSR) {
if (dnat_return_should_encap()) {
CALI_DEBUG("Returning related ICMP from workload to tunnel\n");
state->ip_dst = state->ct_result.tun_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
} else if (CALI_F_TO_HEP) {
/* Special case for ICMP error being returned by the host with the
* backing workload into the tunnel back to the original host. It is
* ICMP related and there is a return tunnel path. We need to change
* both the source and destination at once.
*
* XXX the packet was routed to the original client as if it was XXX
* DSR and we might not be on the right iface!!! Should we XXX try
* to reinject it to fix the routing?
*/
CALI_DEBUG("Returning related ICMP from host to tunnel\n");
state->ip_src = HOST_IP;
state->ip_dst = state->ct_result.tun_ip;
goto nat_encap;
}
}
state->dport = state->post_nat_dport;
state->ip_dst = state->post_nat_ip_dst;
goto allow;
case CALI_CT_ESTABLISHED_SNAT:
CALI_DEBUG("CT: SNAT from %x:%d\n",
be32_to_host(state->ct_result.nat_ip), state->ct_result.nat_port);
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
if (CALI_F_DSR) {
/* SNAT will be done after routing, when leaving HEP */
CALI_DEBUG("DSR enabled, skipping SNAT + encap\n");
goto allow;
}
if (!(state->ip_proto == IPPROTO_TCP && skb_is_gso(skb)) &&
ip_is_dnf(ip_header) && vxlan_v4_encap_too_big(skb)) {
CALI_DEBUG("Return ICMP mtu is too big\n");
goto icmp_too_big;
}
}
// Actually do the NAT.
ip_header->saddr = state->ct_result.nat_ip;
switch (ip_header->protocol) {
case IPPROTO_TCP:
tcp_header->source = host_to_be16(state->ct_result.nat_port);
break;
case IPPROTO_UDP:
udp_header->source = host_to_be16(state->ct_result.nat_port);
break;
}
CALI_VERB("L3 csum at %d L4 csum at %d\n", l3_csum_off, l4_csum_off);
if (l4_csum_off) {
res = skb_nat_l4_csum_ipv4(skb, l4_csum_off, state->ip_src,
state->ct_result.nat_ip, host_to_be16(state->sport),
host_to_be16(state->ct_result.nat_port),
ip_header->protocol == IPPROTO_UDP ? BPF_F_MARK_MANGLED_0 : 0);
}
CALI_VERB("L3 checksum update (csum is at %d) port from %x to %x\n",
l3_csum_off, state->ip_src, state->ct_result.nat_ip);
int csum_rc = bpf_l3_csum_replace(skb, l3_csum_off,
state->ip_src, state->ct_result.nat_ip, 4);
CALI_VERB("bpf_l3_csum_replace(IP): %d\n", csum_rc);
res |= csum_rc;
if (res) {
reason = CALI_REASON_CSUM_FAIL;
goto deny;
}
if (dnat_return_should_encap() && state->ct_result.tun_ip) {
state->ip_dst = state->ct_result.tun_ip;
seen_mark = CALI_SKB_MARK_BYPASS_FWD_SRC_FIXUP;
goto nat_encap;
}
state->sport = state->ct_result.nat_port;
state->ip_src = state->ct_result.nat_ip;
goto allow;
case CALI_CT_ESTABLISHED_BYPASS:
seen_mark = CALI_SKB_MARK_BYPASS;
// fall through
case CALI_CT_ESTABLISHED:
goto allow;
default:
if (CALI_F_FROM_HEP) {
/* Since we're using the host endpoint program for TC-redirect
* acceleration for workloads (but we haven't fully implemented
* host endpoint support yet), we can get an incorrect conntrack
* invalid for host traffic.
*
* FIXME: Properly handle host endpoint conntrack failures
*/
CALI_DEBUG("Traffic is towards host namespace but not conntracked, "
"falling through to iptables\n");
fib = false;
goto allow;
}
goto deny;
}
CALI_INFO("We should never fall through here\n");
goto deny;
icmp_ttl_exceeded:
if (skb_too_short(skb)) {
reason = CALI_REASON_SHORT;
CALI_DEBUG("Too short\n");
goto deny;
}
ip_header = skb_iphdr(skb);
/* we silently drop the packet if things go wrong */
/* XXX we should check if it is broadcast or multicast and not respond */
/* do not respond to IP fragments except the first */
if (ip_frag_no(ip_header)) {
goto deny;
}
if (icmp_v4_ttl_exceeded(skb)) {
goto deny;
}
/* we need to allow the reponse for the IP stack to route it back.
* XXX we might want to send it back the same iface
*/
goto icmp_allow;
icmp_too_big:
if (icmp_v4_too_big(skb)) {
reason = CALI_REASON_ICMP_DF;
goto deny;
}
/* XXX we might use skb->ifindex to redirect it straight back
* to where it came from if it is guaranteed to be the path
*/
fib_flags |= BPF_FIB_LOOKUP_OUTPUT;
if (CALI_F_FROM_WEP) {
/* we know it came from workload, just send it back the same way */
rc = CALI_RES_REDIR_IFINDEX;
}
goto icmp_allow;
icmp_allow:
/* recheck the size of the packet after it was turned into icmp and set
* state so that it can processed further.
*/
if (skb_shorter(skb, ETH_IPV4_UDP_SIZE)) {
reason = CALI_REASON_SHORT;
goto deny;
}
ip_header = skb_iphdr(skb);
tc_state_fill_from_iphdr(state, ip_header);
state->sport = state->dport = 0;
/* packet was created because of approved traffic, treat it as related */
seen_mark = CALI_SKB_MARK_BYPASS_FWD;
goto allow;
nat_encap:
if (vxlan_v4_encap(skb, state->ip_src, state->ip_dst)) {
reason = CALI_REASON_ENCAP_FAIL;
goto deny;
}
state->sport = state->dport = CALI_VXLAN_PORT;
state->ip_proto = IPPROTO_UDP;
allow:
{
struct fwd fwd = {
.res = rc,
.mark = seen_mark,
};
fwd_fib_set(&fwd, fib);
fwd_fib_set_flags(&fwd, fib_flags);
return fwd;
}
deny:
{
struct fwd fwd = {
.res = TC_ACT_SHOT,
.reason = reason,
};
return fwd;
}
}
#ifndef CALI_ENTRYPOINT_NAME
#define CALI_ENTRYPOINT_NAME calico_entrypoint
#endif
// Entrypoint with definable name. It's useful to redefine the name for each entrypoint
// because the name is exposed by bpftool et al.
__attribute__((section(XSTR(CALI_ENTRYPOINT_NAME))))
int tc_calico_entry(struct __sk_buff *skb)
{
return calico_tc(skb);
}
char ____license[] __attribute__((section("license"), used)) = "GPL";
| 1 | 17,998 | I think (a) should be initialized to `NAT_FE_LOOKUP_ALLOW` and (b) it should be probably the first thing in `calico_v4_nat_lookup2` | projectcalico-felix | go |
@@ -36,7 +36,7 @@ public class TestManyPointsInOldIndex extends LuceneTestCase {
//
// Compile:
// 1) temporarily remove 'extends LuceneTestCase' above (else java doesn't see our static void main)
-// 2) ant compile-test
+// 2) gradlew testClasses
//
// Run:
// 1) java -cp ../build/backward-codecs/classes/test:../build/core/classes/java org.apache.lucene.index.TestManyPointsInOldIndex | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
// LUCENE-7501
public class TestManyPointsInOldIndex extends LuceneTestCase {
// To regenerate the back index zip:
//
// Compile:
// 1) temporarily remove 'extends LuceneTestCase' above (else java doesn't see our static void main)
// 2) ant compile-test
//
// Run:
// 1) java -cp ../build/backward-codecs/classes/test:../build/core/classes/java org.apache.lucene.index.TestManyPointsInOldIndex
//
// cd manypointsindex
// zip manypointsindex.zip *
public static void main(String[] args) throws IOException {
Directory dir = FSDirectory.open(Paths.get("manypointsindex"));
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig());
for(int i=0;i<1025;i++) {
Document doc = new Document();
doc.add(new IntPoint("intpoint", 1025-i));
w.addDocument(doc);
}
w.close();
dir.close();
}
public void testCheckOldIndex() throws IOException {
assumeTrue("Reenable when 7.0 is released", false);
Path path = createTempDir("manypointsindex");
InputStream resource = getClass().getResourceAsStream("manypointsindex.zip");
assertNotNull("manypointsindex not found", resource);
TestUtil.unzip(resource, path);
BaseDirectoryWrapper dir = newFSDirectory(path);
// disable default checking...
dir.setCheckIndexOnClose(false);
// ... because we check ourselves here:
TestUtil.checkIndex(dir, false, true, null);
dir.close();
}
}
| 1 | 36,632 | I'd look as to what this class actually does... seems weird. Classpath below (under "run") is wrong for gradle. | apache-lucene-solr | java |
@@ -17,6 +17,7 @@
package algod
import (
+ "github.com/algorand/go-algorand/data/basics"
"path/filepath"
"testing"
| 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package algod
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
func BenchmarkSendPayment(b *testing.B) {
var fixture fixtures.LibGoalFixture
fixture.SetupNoStart(b, filepath.Join("nettemplates", "TwoNodes50Each.json"))
fixture.Start()
defer fixture.Shutdown()
binDir := fixture.GetBinDir()
c, err := libgoal.MakeClientWithBinDir(binDir, fixture.PrimaryDataDir(), fixture.PrimaryDataDir(), libgoal.FullClient)
require.NoError(b, err)
wallet, err := c.GetUnencryptedWalletHandle()
require.NoError(b, err)
addrs, err := c.ListAddresses(wallet)
require.NoError(b, err)
require.True(b, len(addrs) > 0)
addr := addrs[0]
b.Run("getwallet", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err = c.GetUnencryptedWalletHandle()
require.NoError(b, err)
}
})
var tx transactions.Transaction
b.Run("construct", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var nonce [8]byte
crypto.RandBytes(nonce[:])
tx, err = c.ConstructPayment(addr, addr, 1, 1, nonce[:], "")
require.NoError(b, err)
}
})
b.Run("signtxn", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err = c.SignTransactionWithWallet(wallet, nil, tx)
require.NoError(b, err)
}
})
b.Run("sendpayment", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var nonce [8]byte
crypto.RandBytes(nonce[:])
_, err := c.SendPaymentFromWallet(wallet, nil, addr, addr, 1, 1, nonce[:], "")
require.NoError(b, err)
}
})
}
| 1 | 35,323 | Group w/ algorand imports | algorand-go-algorand | go |
@@ -28,12 +28,14 @@ package com.salesforce.androidsdk.store;
import java.util.ArrayList;
import java.util.List;
+import java.util.Locale;
import net.sqlcipher.database.SQLiteDatabase;
import org.json.JSONException;
import org.json.JSONObject;
+import android.annotation.SuppressLint;
import android.content.Context;
import android.test.InstrumentationTestCase;
import android.util.Log; | 1 | /*
* Copyright (c) 2011, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.store;
import java.util.ArrayList;
import java.util.List;
import net.sqlcipher.database.SQLiteDatabase;
import org.json.JSONException;
import org.json.JSONObject;
import android.content.Context;
import android.test.InstrumentationTestCase;
import android.util.Log;
import com.salesforce.androidsdk.smartstore.store.DBHelper;
import com.salesforce.androidsdk.smartstore.store.DBOpenHelper;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.Order;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
/**
* Set of tests for the smart store loading numerous and/or large entries and querying them back
*/
public class SmartStoreLoadTest extends InstrumentationTestCase {
private static final int MAX_NUMBER_ENTRIES = 2048;
private static final int MAX_NUMBER_FIELDS = 2048;
private static final int MAX_FIELD_LENGTH = 65536;
private static final int NUMBER_FIELDS_PER_ENTRY = 128;
private static final int NUMBER_ENTRIES_PER_BATCH = 64;
private static final int NUMBER_BATCHES = 128;
private static final int QUERY_PAGE_SIZE = 64;
private static final String TEST_SOUP = "test_soup";
protected Context targetContext;
private SQLiteDatabase db;
private SmartStore store;
@Override
public void setUp() throws Exception {
super.setUp();
targetContext = getInstrumentation().getTargetContext();
DBHelper.INSTANCE.reset(targetContext, null); // start clean
db = getWritableDatabase();
store = new SmartStore(db);
assertFalse("Soup test_soup should not exist", store.hasSoup(TEST_SOUP));
store.registerSoup(TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string)});
assertTrue("Soup test_soup should now exist", store.hasSoup(TEST_SOUP));
}
protected SQLiteDatabase getWritableDatabase() {
return DBOpenHelper.getOpenHelper(targetContext, null).getWritableDatabase("");
}
@Override
protected void tearDown() throws Exception {
db.close();
// Not cleaning up after the test to make diagnosing issues easier
super.tearDown();
}
/**
* TEST: Upsert 1,2,...,MAX_NUMBER_ENTRIES entries (with just a couple of fields) into a soup
* @throws JSONException
*/
public void testUpsertManyEntries() throws JSONException {
Log.i("SmartStoreLoadTest", "In testUpsertManyEntries");
upsertNextManyEntries(1);
}
private void upsertNextManyEntries(int k) throws JSONException {
List<Long> times = new ArrayList<Long>();
store.beginTransaction();
for (int i=0; i<k; i++) {
JSONObject entry = new JSONObject();
entry.put("key", "k_" + k + "_" + i);
entry.put("value", "x");
long start = System.currentTimeMillis();
store.upsert(TEST_SOUP, entry);
long end = System.currentTimeMillis();
times.add(end-start);
}
store.setTransactionSuccessful();
store.endTransaction();
// Compute average time taken
long avg = 0;
for (int i=0; i<times.size(); i++) {
avg += times.get(i);
}
avg /= times.size();
// Log avg time taken
Log.i("SmartStoreLoadTest", "upserting " + k + " entries avg time taken: " + avg + " ms");
// Next
if (k < MAX_NUMBER_ENTRIES) {
upsertNextManyEntries(k*2);
}
}
/**
* TEST: Upsert entries with 1,2,...,MAX_NUMBER_FIELDS into a soup
* @throws JSONException
*/
public void testNumerousFields() throws JSONException {
Log.i("SmartStoreLoadTest", "In testNumerousFields");
upsertNextManyFieldsEntry(1);
}
private void upsertNextManyFieldsEntry(int k) throws JSONException {
JSONObject entry = new JSONObject();
for (int i=0; i<k; i++) {
entry.put("v"+i, "value_" + i);
}
// Upsert
upsertEntry("upserting entry with " + k + "+ fields", entry);
// Next
if (k < MAX_NUMBER_FIELDS) {
upsertNextManyFieldsEntry(k*2);
}
}
private void upsertEntry(String msg, JSONObject entry) throws JSONException {
long start = System.currentTimeMillis();
store.beginTransaction();
store.upsert(TEST_SOUP, entry);
store.setTransactionSuccessful();
store.endTransaction();
long end = System.currentTimeMillis();
// Log time taken
Log.i("SmartStoreLoadTest", msg + " time taken: " + (end-start) + " ms");
}
/**
* TEST: Upsert entry with a value field that is 1,2, ... , MAX_FIELD_LENGTH long into a soup
* @throws JSONException
*/
public void testIncreasingFieldLength() throws JSONException {
Log.i("SmartStoreLoadTest", "In testIncreasingFieldLength");
upsertNextLargerFieldEntry(1);
}
private void upsertNextLargerFieldEntry(int k) throws JSONException {
Log.i("SmartStoreLoadTest", "upsertNextLargerFieldEntry " + k);
StringBuilder sb = new StringBuilder();
for (int i=0; i< k; i++) {
sb.append("x");
}
JSONObject entry = new JSONObject();
entry.put("key", "k" + k);
entry.put("value", sb.toString());
// Upsert
upsertEntry("upserting entry with field with " + k + " characters", entry);
// Next
if (k < MAX_FIELD_LENGTH) {
upsertNextLargerFieldEntry(k*2);
}
}
/**
* TEST: Upsert MAX_NUMBER_ENTRIES entries into a soup and retrieve them back
* @throws JSONException
*/
public void testAddAndRetrieveManyEntries() throws JSONException {
Log.i("SmartStoreLoadTest", "In testAddAndRetrieveManyEntries");
List<Long> soupEntryIds = new ArrayList<Long>();
List<Long> times = new ArrayList<Long>();
store.beginTransaction();
for (int i=0; i < MAX_NUMBER_ENTRIES; i++) {
String paddedIndex = String.format("%05d", i);
JSONObject entry = new JSONObject();
entry.put("Name", "Todd Stellanova" + paddedIndex);
entry.put("Id", "003" + paddedIndex);
JSONObject attributes = new JSONObject();
attributes.put("type", "Contact");
attributes.put("url", "/foo/Contact" + paddedIndex);
entry.put("attributes", attributes);
long start = System.currentTimeMillis();
JSONObject upsertedEntry = store.upsert(TEST_SOUP, entry);
Long soupEntryId = upsertedEntry.getLong(SmartStore.SOUP_ENTRY_ID);
soupEntryIds.add(soupEntryId);
long end = System.currentTimeMillis();
times.add(end-start);
}
store.setTransactionSuccessful();
store.endTransaction();
// Compute average time taken
long avg = 0;
for (int i=0; i<times.size(); i++) {
avg += times.get(i);
}
avg /= times.size();
// Log avg time taken
Log.i("SmartStoreLoadTest", "upserting " + MAX_NUMBER_ENTRIES + " entries avg time taken: " + avg + " ms");
// Retrieve
long start = System.currentTimeMillis();
store.retrieve(TEST_SOUP, soupEntryIds.toArray(new Long[]{}));
long end = System.currentTimeMillis();
// Log retrieve time taken
Log.i("SmartStoreLoadTest", "retrieve " + MAX_NUMBER_ENTRIES + " entries time taken: " + (end-start) + " ms");
}
/**
* TEST: Upsert NUMBER_BATCHES batches of NUMBER_ENTRIES_PER_BATCH entries with NUMBER_FIELDS_PER_ENTRY fields into a soup and query all (fetching only a page of QUERY_PAGE_SIZE entries)
* @throws JSONException
*/
public void testUpsertAndQueryEntries() throws JSONException {
Log.i("SmartStoreLoadTest", "In testUpsertAndQueryEntries");
upsertQueryEntries(0);
}
private void upsertQueryEntries(int batch) throws JSONException {
int startKey = batch * NUMBER_ENTRIES_PER_BATCH;
int endKey = (batch+1) * NUMBER_ENTRIES_PER_BATCH;
List<Long> times = new ArrayList<Long>();
store.beginTransaction();
for (int i=startKey; i<endKey; i++) {
JSONObject entry = new JSONObject();
entry.put("key", "k_" + i);
entry.put("value", "x");
for (int j=0; j<NUMBER_FIELDS_PER_ENTRY; j++) {
entry.put("v" + j, "value_" + j);
}
long start = System.currentTimeMillis();
store.upsert(TEST_SOUP, entry);
long end = System.currentTimeMillis();
times.add(end-start);
}
store.setTransactionSuccessful();
store.endTransaction();
// Compute average time taken
long avg = 0;
for (int i=0; i<times.size(); i++) {
avg += times.get(i);
}
avg /= times.size();
// Log avg time taken
Log.i("SmartStoreLoadTest", "upserting " + NUMBER_ENTRIES_PER_BATCH + " entries avg time taken: " + avg + " ms");
// Query all
QuerySpec qs = QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, QUERY_PAGE_SIZE);
long start = System.currentTimeMillis();
store.query(qs, 0);
long end = System.currentTimeMillis();
// Log query time
Log.i("SmartStoreLoadTest", "querying out of soup with " + (batch+1)*NUMBER_ENTRIES_PER_BATCH + " entries time taken: " + (end-start) + " ms");
// Next
if (batch < NUMBER_BATCHES - 1) {
upsertQueryEntries(batch + 1);
}
}
}
| 1 | 14,015 | I don't think this import is being used. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -5,7 +5,7 @@ const AnyKind = "*** any procedure kind ***"
// Pattern represents an operator tree pattern
// It can match itself against a query plan
type Pattern interface {
- Root() ProcedureKind
+ Roots() []ProcedureKind
Match(Node) bool
}
| 1 | package plan
const AnyKind = "*** any procedure kind ***"
// Pattern represents an operator tree pattern
// It can match itself against a query plan
type Pattern interface {
Root() ProcedureKind
Match(Node) bool
}
// Pat returns a pattern that can match a plan node with the given ProcedureKind
// and whose predecessors match the given predecessor patterns.
//
// For example, to construct a pattern that matches a join followed by a sum:
//
// sum
// |
// |X| <=> join(A, B) |> sum() <=> Pat(SumKind, Pat(JoinKind, Any(), Any()))
// / \
// A B
func Pat(kind ProcedureKind, predecessors ...Pattern) Pattern {
return &OneKindPattern{
kind: kind,
predecessors: predecessors,
}
}
// PhysPat returns a pattern that matches a physical plan node with the given
// ProcedureKind and whose predecessors match the given predecessor patterns.
func PhysPat(kind ProcedureKind, predecessors ...Pattern) Pattern {
return PhysicalOneKindPattern{
pattern: Pat(kind, predecessors...),
}
}
// PhysicalOneKindPattern matches a physical operator pattern
type PhysicalOneKindPattern struct {
pattern Pattern
}
func (p PhysicalOneKindPattern) Root() ProcedureKind {
return p.pattern.Root()
}
func (p PhysicalOneKindPattern) Match(node Node) bool {
_, ok := node.(*PhysicalPlanNode)
return ok && p.pattern.Match(node)
}
// Any returns a pattern that matches anything.
func Any() Pattern {
return &AnyPattern{}
}
// OneKindPattern matches a specified procedure with a predecessor pattern
//
// ProcedureKind
// / | ... \
// pattern1 pattern2 ... patternK
type OneKindPattern struct {
kind ProcedureKind
predecessors []Pattern
}
func (okp OneKindPattern) Root() ProcedureKind {
return okp.kind
}
func (okp OneKindPattern) Match(node Node) bool {
if node.Kind() != okp.kind {
return false
}
if len(okp.predecessors) != len(node.Predecessors()) {
return false
}
// Check that each predecessor does not have other successors
for _, pred := range node.Predecessors() {
if len(pred.Successors()) != 1 {
return false
}
}
// Recursively match each predecessor
for i, pattern := range okp.predecessors {
if !pattern.Match(node.Predecessors()[i]) {
return false
}
}
return true
}
// AnyPattern describes (and matches) any plan node
type AnyPattern struct{}
func (AnyPattern) Root() ProcedureKind {
return AnyKind
}
func (AnyPattern) Match(node Node) bool {
return true
}
| 1 | 14,344 | This is an interesting way to do this. I like it. An alternative way to implement a multiple kind pattern was to have this return `AnyKind` and then have match perform an actual check to see if it matches. | influxdata-flux | go |
@@ -373,8 +373,9 @@ class SonataMediaExtension extends Extension implements PrependExtensionInterfac
}
if ($container->hasDefinition('sonata.media.cdn.fallback') && isset($config['cdn']['fallback'])) {
+ // NEXT_MAJOR: Do not fallback to master
$container->getDefinition('sonata.media.cdn.fallback')
- ->replaceArgument(0, new Reference($config['cdn']['fallback']['master']))
+ ->replaceArgument(0, new Reference($config['cdn']['fallback']['primary'] ?? $config['cdn']['fallback']['master']))
->replaceArgument(1, new Reference($config['cdn']['fallback']['fallback']));
} else {
$container->removeDefinition('sonata.media.cdn.fallback'); | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\DependencyInjection;
use Nelmio\ApiDocBundle\Annotation\Operation;
use Sonata\ClassificationBundle\Model\CategoryInterface;
use Sonata\Doctrine\Mapper\Builder\OptionsBuilder;
use Sonata\Doctrine\Mapper\DoctrineCollector;
use Sonata\EasyExtendsBundle\Mapper\DoctrineCollector as DeprecatedDoctrineCollector;
use Sonata\MediaBundle\CDN\CloudFrontVersion3;
use Symfony\Component\Config\Definition\Processor;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Definition;
use Symfony\Component\DependencyInjection\Extension\PrependExtensionInterface;
use Symfony\Component\DependencyInjection\Loader\XmlFileLoader;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\HttpKernel\DependencyInjection\Extension;
use Symfony\Component\Messenger\MessageBusInterface;
/**
* @final since sonata-project/media-bundle 3.21.0
*
* @author Thomas Rabaix <[email protected]>
*/
class SonataMediaExtension extends Extension implements PrependExtensionInterface
{
/**
* @var array
*/
private $bundleConfigs;
public function load(array $configs, ContainerBuilder $container)
{
$processor = new Processor();
$configuration = new Configuration();
$config = $processor->processConfiguration($configuration, $configs);
$loader = new XmlFileLoader($container, new FileLocator(__DIR__.'/../Resources/config'));
$loader->load('actions.xml');
$loader->load('provider.xml');
$loader->load('media.xml');
$loader->load('twig.xml');
$loader->load('security.xml');
$loader->load('extra.xml');
$loader->load('form.xml');
$loader->load('gaufrette.xml');
$loader->load('validators.xml');
// NEXT_MAJOR: Remove next line and the file.
$loader->load('serializer.xml');
$loader->load('command.xml');
$bundles = $container->getParameter('kernel.bundles');
// NEXT_MAJOR: Remove this condition and remove all configuration files related to this.
if (isset($bundles['FOSRestBundle'], $bundles['NelmioApiDocBundle'])) {
$loader->load(sprintf('api_form_%s.xml', $config['db_driver']));
if ('doctrine_orm' === $config['db_driver']) {
// NEXT_MAJOR: remove legacy part
if (class_exists(Operation::class)) {
$loader->load('api_controllers.xml');
} else {
$loader->load('api_controllers_legacy.xml');
}
}
}
// NEXT_MAJOR: Remove this condition and remove all configuration files related to this.
if (isset($bundles['SonataNotificationBundle'])) {
$loader->load('consumer.xml');
}
if (isset($bundles['SonataBlockBundle'])) {
$loader->load('block.xml');
}
if (isset($bundles['SonataSeoBundle'])) {
$loader->load('seo_block.xml');
}
if (!isset($bundles['LiipImagineBundle'])) {
$container->removeDefinition('sonata.media.thumbnail.liip_imagine');
}
if ($this->isClassificationEnabled($config)) {
$loader->load('category.xml');
$categoryManagerAlias = 'sonata.media.manager.category';
$container->setAlias($categoryManagerAlias, $config['category_manager']);
$categoryManager = $container->getAlias($categoryManagerAlias);
$categoryManager->setPublic(true);
}
if (!\array_key_exists($config['default_context'], $config['contexts'])) {
throw new \InvalidArgumentException(sprintf('SonataMediaBundle - Invalid default context : %s, available : %s', $config['default_context'], json_encode(array_keys($config['contexts']))));
}
$loader->load(sprintf('%s.xml', $config['db_driver']));
if (isset($bundles['SonataAdminBundle'])) {
$loader->load(sprintf('%s_admin.xml', $config['db_driver']));
$sonataAdminConfig = $this->bundleConfigs['SonataAdminBundle'];
$sonataRoles = [];
if (isset($sonataAdminConfig['security']['role_admin'])) {
$sonataRoles[] = $sonataAdminConfig['security']['role_admin'];
} else {
$sonataRoles[] = 'ROLE_ADMIN';
}
if (isset($sonataAdminConfig['security']['role_super_admin'])) {
$sonataRoles[] = $sonataAdminConfig['security']['role_super_admin'];
} else {
$sonataRoles[] = 'ROLE_SUPER_ADMIN';
}
$container->getDefinition('sonata.media.security.superadmin_strategy')
->replaceArgument(2, $sonataRoles);
}
$this->configureFilesystemAdapter($container, $config);
$this->configureCdnAdapter($container, $config);
$pool = $container->getDefinition('sonata.media.pool');
$pool->replaceArgument(0, $config['default_context']);
$strategies = [];
foreach ($config['contexts'] as $name => $settings) {
$formats = [];
foreach ($settings['formats'] as $format => $value) {
$formats[$name.'_'.$format] = $value;
}
$strategies[] = $settings['download']['strategy'];
$pool->addMethodCall('addContext', [$name, $settings['providers'], $formats, $settings['download']]);
}
$container->setParameter('sonata.media.admin_format', $config['admin_format']);
$strategies = array_unique($strategies);
foreach ($strategies as $strategyId) {
$pool->addMethodCall('addDownloadStrategy', [$strategyId, new Reference($strategyId)]);
}
if ('doctrine_orm' === $config['db_driver']) {
if (isset($bundles['SonataDoctrineBundle'])) {
$this->registerSonataDoctrineMapping($config);
} else {
// NEXT MAJOR: Remove next line and throw error when not registering SonataDoctrineBundle
$this->registerDoctrineMapping($config);
}
}
$container->setParameter('sonata.media.resizer.simple.adapter.mode', $config['resizer']['simple']['mode']);
$container->setParameter('sonata.media.resizer.square.adapter.mode', $config['resizer']['square']['mode']);
$this->configureParameterClass($container, $config);
$this->configureExtra($container, $config);
$this->configureBuzz($container, $config);
$this->configureHttpClient($container, $config);
$this->configureProviders($container, $config);
$this->configureAdapters($container, $config);
$this->configureResizers($container, $config);
if ($this->isConfigEnabled($container, $config['messenger'])) {
$this->registerMessengerConfiguration($container, $config['messenger'], $loader);
}
}
public function configureProviders(ContainerBuilder $container, array $config)
{
$container->getDefinition('sonata.media.provider.image')
->replaceArgument(5, array_map('strtolower', $config['providers']['image']['allowed_extensions']))
->replaceArgument(6, $config['providers']['image']['allowed_mime_types'])
->replaceArgument(7, new Reference($config['providers']['image']['adapter']));
$container->getDefinition('sonata.media.provider.file')
->replaceArgument(5, $config['providers']['file']['allowed_extensions'])
->replaceArgument(6, $config['providers']['file']['allowed_mime_types']);
$container->getDefinition('sonata.media.provider.youtube')->replaceArgument(7, $config['providers']['youtube']['html5']);
}
public function configureBuzz(ContainerBuilder $container, array $config)
{
$container->getDefinition('sonata.media.buzz.browser')
->replaceArgument(0, new Reference($config['buzz']['connector']));
foreach ([
'sonata.media.buzz.connector.curl',
'sonata.media.buzz.connector.file_get_contents',
] as $connector) {
$container->getDefinition($connector)
->addMethodCall('setIgnoreErrors', [$config['buzz']['client']['ignore_errors']])
->addMethodCall('setMaxRedirects', [$config['buzz']['client']['max_redirects']])
->addMethodCall('setTimeout', [$config['buzz']['client']['timeout']])
->addMethodCall('setVerifyPeer', [$config['buzz']['client']['verify_peer']])
->addMethodCall('setProxy', [$config['buzz']['client']['proxy']]);
}
}
public function configureParameterClass(ContainerBuilder $container, array $config)
{
$container->setParameter('sonata.media.admin.media.entity', $config['class']['media']);
$container->setParameter('sonata.media.admin.gallery.entity', $config['class']['gallery']);
$container->setParameter('sonata.media.admin.gallery_has_media.entity', $config['class']['gallery_has_media']);
$container->setParameter('sonata.media.media.class', $config['class']['media']);
$container->setParameter('sonata.media.gallery.class', $config['class']['gallery']);
$container->getDefinition('sonata.media.form.type.media')->replaceArgument(1, $config['class']['media']);
}
/**
* NEXT_MAJOR: Remove this method.
*/
public function registerDoctrineMapping(array $config)
{
@trigger_error(
'Using SonataEasyExtendsBundle is deprecated since sonata-project/media-bundle 3.26. Please register SonataDoctrineBundle as a bundle instead.',
\E_USER_DEPRECATED
);
$collector = DeprecatedDoctrineCollector::getInstance();
$collector->addAssociation($config['class']['media'], 'mapOneToMany', [
'fieldName' => 'galleryHasMedias',
'targetEntity' => $config['class']['gallery_has_media'],
'cascade' => [
'persist',
],
'mappedBy' => 'media',
'orphanRemoval' => false,
]);
$collector->addAssociation($config['class']['gallery_has_media'], 'mapManyToOne', [
'fieldName' => 'gallery',
'targetEntity' => $config['class']['gallery'],
'cascade' => [
'persist',
],
'mappedBy' => null,
'inversedBy' => 'galleryHasMedias',
'joinColumns' => [
[
'name' => 'gallery_id',
'referencedColumnName' => 'id',
'onDelete' => 'CASCADE',
],
],
'orphanRemoval' => false,
]);
$collector->addAssociation($config['class']['gallery_has_media'], 'mapManyToOne', [
'fieldName' => 'media',
'targetEntity' => $config['class']['media'],
'cascade' => [
'persist',
],
'mappedBy' => null,
'inversedBy' => 'galleryHasMedias',
'joinColumns' => [
[
'name' => 'media_id',
'referencedColumnName' => 'id',
'onDelete' => 'CASCADE',
],
],
'orphanRemoval' => false,
]);
$collector->addAssociation($config['class']['gallery'], 'mapOneToMany', [
'fieldName' => 'galleryHasMedias',
'targetEntity' => $config['class']['gallery_has_media'],
'cascade' => [
'persist',
],
'mappedBy' => 'gallery',
'orphanRemoval' => true,
'orderBy' => [
'position' => 'ASC',
],
]);
if ($this->isClassificationEnabled($config)) {
$collector->addAssociation($config['class']['media'], 'mapManyToOne', [
'fieldName' => 'category',
'targetEntity' => $config['class']['category'],
'cascade' => [
'persist',
],
'mappedBy' => null,
'inversedBy' => null,
'joinColumns' => [
[
'name' => 'category_id',
'referencedColumnName' => 'id',
'onDelete' => 'SET NULL',
],
],
'orphanRemoval' => false,
]);
}
}
/**
* Inject CDN dependency to default provider.
*/
public function configureCdnAdapter(ContainerBuilder $container, array $config)
{
// add the default configuration for the server cdn
if ($container->hasDefinition('sonata.media.cdn.server') && isset($config['cdn']['server'])) {
$container->getDefinition('sonata.media.cdn.server')
->replaceArgument(0, $config['cdn']['server']['path']);
} else {
$container->removeDefinition('sonata.media.cdn.server');
}
if ($container->hasDefinition('sonata.media.cdn.panther') && isset($config['cdn']['panther'])) {
$container->getDefinition('sonata.media.cdn.panther')
->replaceArgument(0, $config['cdn']['panther']['path'])
->replaceArgument(1, $config['cdn']['panther']['username'])
->replaceArgument(2, $config['cdn']['panther']['password'])
->replaceArgument(3, $config['cdn']['panther']['site_id']);
} else {
$container->removeDefinition('sonata.media.cdn.panther');
}
if ($container->hasDefinition('sonata.media.cdn.cloudfront') && isset($config['cdn']['cloudfront'])) {
$cloudFrontConfig = [];
if (isset($config['cdn']['cloudfront']['region'])) {
$cloudFrontConfig['region'] = $config['cdn']['cloudfront']['region'];
}
if (isset($config['cdn']['cloudfront']['version'])) {
$cloudFrontConfig['version'] = $config['cdn']['cloudfront']['version'];
}
$cloudFrontConfig['credentials'] = [
'key' => $config['cdn']['cloudfront']['key'],
'secret' => $config['cdn']['cloudfront']['secret'],
];
$cloudFrontClass = CloudFrontVersion3::class;
$container->getDefinition('sonata.media.cdn.cloudfront.client')
->replaceArgument(0, $cloudFrontConfig);
$container->getDefinition('sonata.media.cdn.cloudfront')
->setClass($cloudFrontClass)
->replaceArgument(0, new Reference('sonata.media.cdn.cloudfront.client'))
->replaceArgument(1, $config['cdn']['cloudfront']['distribution_id'])
->replaceArgument(2, $config['cdn']['cloudfront']['path']);
} else {
$container->removeDefinition('sonata.media.cdn.cloudfront.client');
$container->removeDefinition('sonata.media.cdn.cloudfront');
}
if ($container->hasDefinition('sonata.media.cdn.fallback') && isset($config['cdn']['fallback'])) {
$container->getDefinition('sonata.media.cdn.fallback')
->replaceArgument(0, new Reference($config['cdn']['fallback']['master']))
->replaceArgument(1, new Reference($config['cdn']['fallback']['fallback']));
} else {
$container->removeDefinition('sonata.media.cdn.fallback');
}
}
/**
* Inject filesystem dependency to default provider.
*/
public function configureFilesystemAdapter(ContainerBuilder $container, array $config)
{
// add the default configuration for the local filesystem
if ($container->hasDefinition('sonata.media.adapter.filesystem.local') && isset($config['filesystem']['local'])) {
$container->getDefinition('sonata.media.adapter.filesystem.local')
->addArgument($config['filesystem']['local']['directory'])
->addArgument($config['filesystem']['local']['create']);
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.local');
}
// add the default configuration for the FTP filesystem
if ($container->hasDefinition('sonata.media.adapter.filesystem.ftp') && isset($config['filesystem']['ftp'])) {
$container->getDefinition('sonata.media.adapter.filesystem.ftp')
->addArgument($config['filesystem']['ftp']['directory'])
->addArgument($config['filesystem']['ftp']['host'])
->addArgument([
'port' => $config['filesystem']['ftp']['port'],
'username' => $config['filesystem']['ftp']['username'],
'password' => $config['filesystem']['ftp']['password'],
'passive' => $config['filesystem']['ftp']['passive'],
'create' => $config['filesystem']['ftp']['create'],
'mode' => $config['filesystem']['ftp']['mode'],
]);
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.ftp');
$container->removeDefinition('sonata.media.filesystem.ftp');
}
// add the default configuration for the S3 filesystem
if ($container->hasDefinition('sonata.media.adapter.filesystem.s3') && isset($config['filesystem']['s3'])) {
$container->getDefinition('sonata.media.adapter.filesystem.s3')
->replaceArgument(0, new Reference('sonata.media.adapter.service.s3'))
->replaceArgument(1, $config['filesystem']['s3']['bucket'])
->replaceArgument(2, ['create' => $config['filesystem']['s3']['create'], 'region' => $config['filesystem']['s3']['region'], 'directory' => $config['filesystem']['s3']['directory'], 'ACL' => $config['filesystem']['s3']['acl']]);
$container->getDefinition('sonata.media.metadata.amazon')
->replaceArgument(0, [
'acl' => $config['filesystem']['s3']['acl'],
'storage' => $config['filesystem']['s3']['storage'],
'encryption' => $config['filesystem']['s3']['encryption'],
'meta' => $config['filesystem']['s3']['meta'],
'cache_control' => $config['filesystem']['s3']['cache_control'],
]);
$arguments = [
'region' => $config['filesystem']['s3']['region'],
'version' => $config['filesystem']['s3']['version'],
];
if (isset($config['filesystem']['s3']['endpoint'])) {
$arguments['endpoint'] = $config['filesystem']['s3']['endpoint'];
}
if (isset($config['filesystem']['s3']['secretKey'], $config['filesystem']['s3']['accessKey'])) {
$arguments['credentials'] = [
'secret' => $config['filesystem']['s3']['secretKey'],
'key' => $config['filesystem']['s3']['accessKey'],
];
}
$container->getDefinition('sonata.media.adapter.service.s3')
->replaceArgument(0, $arguments);
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.s3');
$container->removeDefinition('sonata.media.filesystem.s3');
}
if ($container->hasDefinition('sonata.media.adapter.filesystem.replicate') && isset($config['filesystem']['replicate'])) {
$container->getDefinition('sonata.media.adapter.filesystem.replicate')
->replaceArgument(0, new Reference($config['filesystem']['replicate']['master']))
->replaceArgument(1, new Reference($config['filesystem']['replicate']['slave']));
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.replicate');
$container->removeDefinition('sonata.media.filesystem.replicate');
}
if ($container->hasDefinition('sonata.media.adapter.filesystem.mogilefs') && isset($config['filesystem']['mogilefs'])) {
$container->getDefinition('sonata.media.adapter.filesystem.mogilefs')
->replaceArgument(0, $config['filesystem']['mogilefs']['domain'])
->replaceArgument(1, $config['filesystem']['mogilefs']['hosts']);
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.mogilefs');
$container->removeDefinition('sonata.media.filesystem.mogilefs');
}
// NEXT_MAJOR: Remove this section and everything related to openstack/rackspace
if (
$container->hasDefinition('sonata.media.adapter.filesystem.opencloud') &&
(isset($config['filesystem']['openstack']) || isset($config['filesystem']['rackspace']))
) {
@trigger_error(
'Integration with OpenStack / Rackspace is deprecated without replacement since sonata-project/media-bundle 3.33.',
\E_USER_DEPRECATED
);
if (isset($config['filesystem']['openstack'])) {
$container->setParameter('sonata.media.adapter.filesystem.opencloud.class', 'OpenCloud\OpenStack');
$settings = 'openstack';
} else {
$container->setParameter('sonata.media.adapter.filesystem.opencloud.class', 'OpenCloud\Rackspace');
$settings = 'rackspace';
}
$container->getDefinition('sonata.media.adapter.filesystem.opencloud.connection')
->replaceArgument(0, $config['filesystem'][$settings]['url'])
->replaceArgument(1, $config['filesystem'][$settings]['secret']);
$container->getDefinition('sonata.media.adapter.filesystem.opencloud')
->replaceArgument(1, $config['filesystem'][$settings]['containerName'])
->replaceArgument(2, $config['filesystem'][$settings]['create_container']);
$container->getDefinition('sonata.media.adapter.filesystem.opencloud.objectstore')
->replaceArgument(1, $config['filesystem'][$settings]['region']);
} else {
$container->removeDefinition('sonata.media.adapter.filesystem.opencloud');
$container->removeDefinition('sonata.media.adapter.filesystem.opencloud.connection');
$container->removeDefinition('sonata.media.adapter.filesystem.opencloud.objectstore');
$container->removeDefinition('sonata.media.filesystem.opencloud');
}
}
public function configureExtra(ContainerBuilder $container, array $config)
{
if ($config['pixlr']['enabled']) {
$container->getDefinition('sonata.media.extra.pixlr')
->replaceArgument(0, $config['pixlr']['referrer'])
->replaceArgument(1, $config['pixlr']['secret']);
} else {
$container->removeDefinition('sonata.media.extra.pixlr');
}
}
/**
* NEXT_MAJOR: Remove this method.
*
* @deprecated since sonata-project/media-bundle 3.22, to be removed in version 4.0.
*/
public function configureClassesToCompile()
{
}
/**
* Allow an extension to prepend the extension configurations.
*/
public function prepend(ContainerBuilder $container)
{
$bundles = $container->getParameter('kernel.bundles');
// Store SonataAdminBundle configuration for later use
if (isset($bundles['SonataAdminBundle'])) {
$this->bundleConfigs['SonataAdminBundle'] = current($container->getExtensionConfig('sonata_admin'));
}
}
/**
* Checks if the classification of media is enabled.
*/
private function isClassificationEnabled(array $config): bool
{
return interface_exists(CategoryInterface::class)
&& !$config['force_disable_category'];
}
private function configureAdapters(ContainerBuilder $container, array $config): void
{
foreach (['gd', 'imagick', 'gmagick'] as $adapter) {
if ($container->hasParameter('sonata.media.adapter.image.'.$adapter.'.class')) {
$container->register(
'sonata.media.adapter.image.'.$adapter,
$container->getParameter('sonata.media.adapter.image.'.$adapter.'.class')
);
}
}
$container->setAlias('sonata.media.adapter.image.default', $config['adapters']['default']);
}
private function configureResizers(ContainerBuilder $container, array $config): void
{
if ($container->hasParameter('sonata.media.resizer.crop.class')) {
$class = $container->getParameter('sonata.media.resizer.crop.class');
$definition = new Definition($class, [
new Reference('sonata.media.adapter.image.default'),
new Reference('sonata.media.metadata.proxy'),
]);
$definition->addTag('sonata.media.resizer');
$container->setDefinition('sonata.media.resizer.crop', $definition);
}
if ($container->hasParameter('sonata.media.resizer.simple.class')) {
$class = $container->getParameter('sonata.media.resizer.simple.class');
$definition = new Definition($class, [
new Reference('sonata.media.adapter.image.default'),
'%sonata.media.resizer.simple.adapter.mode%',
new Reference('sonata.media.metadata.proxy'),
]);
$definition->addTag('sonata.media.resizer');
$container->setDefinition('sonata.media.resizer.simple', $definition);
}
if ($container->hasParameter('sonata.media.resizer.square.class')) {
$class = $container->getParameter('sonata.media.resizer.square.class');
$definition = new Definition($class, [
new Reference('sonata.media.adapter.image.default'),
'%sonata.media.resizer.square.adapter.mode%',
new Reference('sonata.media.metadata.proxy'),
]);
$definition->addTag('sonata.media.resizer');
$container->setDefinition('sonata.media.resizer.square', $definition);
}
$container->setAlias('sonata.media.resizer.default', $config['resizers']['default']);
}
private function registerSonataDoctrineMapping(array $config): void
{
$collector = DoctrineCollector::getInstance();
$collector->addAssociation(
$config['class']['media'],
'mapOneToMany',
OptionsBuilder::createOneToMany('galleryHasMedias', $config['class']['gallery_has_media'])
->cascade(['persist'])
->mappedBy('media')
);
$collector->addAssociation(
$config['class']['gallery_has_media'],
'mapManyToOne',
OptionsBuilder::createManyToOne('gallery', $config['class']['gallery'])
->cascade(['persist'])
->inversedBy('galleryHasMedias')
->addJoin([
'name' => 'gallery_id',
'referencedColumnName' => 'id',
'onDelete' => 'CASCADE',
])
);
$collector->addAssociation(
$config['class']['gallery_has_media'],
'mapManyToOne',
OptionsBuilder::createManyToOne('media', $config['class']['media'])
->cascade(['persist'])
->inversedBy('galleryHasMedias')
->addJoin([
'name' => 'media_id',
'referencedColumnName' => 'id',
'onDelete' => 'CASCADE',
])
);
$collector->addAssociation(
$config['class']['gallery'],
'mapOneToMany',
OptionsBuilder::createOneToMany('galleryHasMedias', $config['class']['gallery_has_media'])
->cascade(['persist'])
->mappedBy('gallery')
->orphanRemoval()
->addOrder('position', 'ASC')
);
if ($this->isClassificationEnabled($config)) {
$collector->addAssociation(
$config['class']['media'],
'mapManyToOne',
OptionsBuilder::createManyToOne('category', $config['class']['category'])
->cascade(['persist'])
->addJoin([
'name' => 'category_id',
'referencedColumnName' => 'id',
'onDelete' => 'SET NULL',
])
);
}
}
private function configureHttpClient(ContainerBuilder $container, array $config): void
{
if (null === $config['http']['client'] || null === $config['http']['message_factory']) {
// NEXT_MAJOR: Remove this fallback service
$container->setAlias('sonata.media.http.client', 'sonata.media.buzz.browser');
return;
}
$container->setAlias('sonata.media.http.client', $config['http']['client']);
$container->setAlias('sonata.media.http.message_factory', $config['http']['message_factory']);
}
/**
* @param array<string, string> $config
*
* @phpstan-param array{generate_thumbnails_bus: string} $config
*/
private function registerMessengerConfiguration(ContainerBuilder $container, array $config, XmlFileLoader $loader): void
{
if (!interface_exists(MessageBusInterface::class)) {
throw new \LogicException('Messenger support cannot be enabled as the Messenger component is not installed. Try running "composer require symfony/messenger".');
}
$loader->load('messenger.xml');
$container->setAlias('sonata.media.messenger.generate_thumbnails_bus', $config['generate_thumbnails_bus']);
}
}
| 1 | 12,718 | You did not check this code | sonata-project-SonataMediaBundle | php |
@@ -31,6 +31,10 @@ func EncodeValues(vals []*Value) ([]byte, error) {
// DecodeValues decodes an array of abi values from the given buffer, using the
// provided type information.
func DecodeValues(data []byte, types []Type) ([]*Value, error) {
+ if len(types) > 0 && len(data) == 0 {
+ return nil, fmt.Errorf("expected %d parameters, but got 0", len(types))
+ }
+
if len(data) == 0 {
return nil, nil
} | 1 | package abi
import (
"fmt"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/pkg/errors"
)
// EncodeValues encodes a set of abi values to raw bytes. Zero length arrays of
// values are normalized to nil
func EncodeValues(vals []*Value) ([]byte, error) {
if len(vals) == 0 {
return nil, nil
}
var arr [][]byte
for _, val := range vals {
data, err := val.Serialize()
if err != nil {
return nil, err
}
arr = append(arr, data)
}
return cbor.DumpObject(arr)
}
// DecodeValues decodes an array of abi values from the given buffer, using the
// provided type information.
func DecodeValues(data []byte, types []Type) ([]*Value, error) {
if len(data) == 0 {
return nil, nil
}
var arr [][]byte
if err := cbor.DecodeInto(data, &arr); err != nil {
return nil, err
}
if len(arr) != len(types) {
return nil, fmt.Errorf("expected %d parameters, but got %d", len(types), len(arr))
}
out := make([]*Value, 0, len(types))
for i, t := range types {
v, err := Deserialize(arr[i], t)
if err != nil {
return nil, err
}
out = append(out, v)
}
return out, nil
}
// ToEncodedValues converts from a list of go abi-compatible values to abi values and then encodes to raw bytes.
func ToEncodedValues(params ...interface{}) ([]byte, error) {
vals, err := ToValues(params)
if err != nil {
return nil, errors.Wrap(err, "unable to convert params to values")
}
bytes, err := EncodeValues(vals)
if err != nil {
return nil, errors.Wrap(err, "unable to encode values")
}
return bytes, nil
}
| 1 | 18,902 | This was necessary due to a bug uncovered in testing. If an actor method which requires params is called without any params at all, not returning an error here causes a panic. | filecoin-project-venus | go |
@@ -738,6 +738,7 @@ const htmlElms = {
'dialog',
'document',
'feed',
+ 'group',
'log',
'main',
'marquee', | 1 | // Source: https://www.w3.org/TR/html-aria/#allowed-aria-roles-states-and-properties
// Source: https://www.w3.org/TR/html-aam-1.0/#html-element-role-mappings
// Source https://html.spec.whatwg.org/multipage/dom.html#content-models
// Source https://dom.spec.whatwg.org/#dom-element-attachshadow
const htmlElms = {
a: {
// Note: variants work by matching the node against the
// `matches` attribute. if the variant matches AND has the
// desired property (contentTypes, etc.) then we use it,
// otherwise we move on to the next matching variant
variant: {
href: {
matches: '[href]',
contentTypes: ['interactive', 'phrasing', 'flow'],
allowedRoles: [
'button',
'checkbox',
'menuitem',
'menuitemcheckbox',
'menuitemradio',
'option',
'radio',
'switch',
'tab',
'treeitem',
'doc-backlink',
'doc-biblioref',
'doc-glossref',
'doc-noteref'
],
namingMethods: ['subtreeText']
},
// Note: the default variant is a special variant and is
// used as the last match if none of the other variants
// match or have the desired attribute
default: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
}
}
},
abbr: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
addres: {
contentTypes: ['flow'],
allowedRoles: true
},
area: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
namingMethods: ['altText']
},
article: {
contentTypes: ['sectioning', 'flow'],
allowedRoles: [
'feed',
'presentation',
'none',
'document',
'application',
'main',
'region'
],
shadowRoot: true
},
aside: {
contentTypes: ['sectioning', 'flow'],
allowedRoles: [
'feed',
'note',
'presentation',
'none',
'region',
'search',
'doc-dedication',
'doc-example',
'doc-footnote',
'doc-pullquote',
'doc-tip'
]
},
audio: {
variant: {
controls: {
matches: '[controls]',
contentTypes: ['interactive', 'embedded', 'phrasing', 'flow']
},
default: {
contentTypes: ['embedded', 'phrasing', 'flow']
}
},
// Note: if the property applies regardless of variants it is
// placed at the top level instead of the default variant
allowedRoles: ['application'],
chromiumRole: 'Audio'
},
b: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
base: {
allowedRoles: false,
noAriaAttrs: true
},
bdi: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
bdo: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
blockquote: {
contentTypes: ['flow'],
allowedRoles: true,
shadowRoot: true
},
body: {
allowedRoles: false,
shadowRoot: true
},
br: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: ['presentation', 'none'],
namingMethods: ['titleText', 'singleSpace']
},
button: {
contentTypes: ['interactive', 'phrasing', 'flow'],
allowedRoles: [
'checkbox',
'link',
'menuitem',
'menuitemcheckbox',
'menuitemradio',
'option',
'radio',
'switch',
'tab'
],
// 5.4 button Element
namingMethods: ['subtreeText']
},
canvas: {
allowedRoles: true,
contentTypes: ['embedded', 'phrasing', 'flow'],
chromiumRole: 'Canvas'
},
caption: {
allowedRoles: false
},
cite: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
code: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
col: {
allowedRoles: false,
noAriaAttrs: true
},
colgroup: {
allowedRoles: false,
noAriaAttrs: true
},
data: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
datalist: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
implicitAttrs: {
// Note: even though the value of aria-multiselectable is based
// on the attributes, we don't currently need to know the
// precise value. however, this allows us to make the attribute
// future proof in case we ever do need to know it
'aria-multiselectable': 'false'
}
},
dd: {
allowedRoles: false
},
del: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
dfn: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
details: {
contentTypes: ['interactive', 'flow'],
allowedRoles: false
},
dialog: {
contentTypes: ['flow'],
allowedRoles: ['alertdialog']
},
div: {
contentTypes: ['flow'],
allowedRoles: true,
shadowRoot: true
},
dl: {
contentTypes: ['flow'],
allowedRoles: ['group', 'list', 'presentation', 'none'],
chromiumRole: 'DescriptionList'
},
dt: {
allowedRoles: ['listitem']
},
em: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
embed: {
contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'],
allowedRoles: ['application', 'document', 'img', 'presentation', 'none'],
chromiumRole: 'EmbeddedObject'
},
fieldset: {
contentTypes: ['flow'],
allowedRoles: ['none', 'presentation', 'radiogroup'],
// 5.5 fieldset and legend Elements
namingMethods: ['fieldsetLegendText']
},
figcaption: {
allowedRoles: ['group', 'none', 'presentation']
},
figure: {
contentTypes: ['flow'],
// Note: technically you're allowed no role when a figcaption
// descendant, but we can't match that so we'll go with any role
allowedRoles: true,
// 5.9 figure and figcaption Elements
namingMethods: ['figureText', 'titleText']
},
footer: {
contentTypes: ['flow'],
allowedRoles: ['group', 'none', 'presentation', 'doc-footnote'],
shadowRoot: true
},
form: {
contentTypes: ['flow'],
allowedRoles: ['search', 'none', 'presentation']
},
h1: {
contentTypes: ['heading', 'flow'],
allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'],
shadowRoot: true,
implicitAttrs: {
'aria-level': '1'
}
},
h2: {
contentTypes: ['heading', 'flow'],
allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'],
shadowRoot: true,
implicitAttrs: {
'aria-level': '2'
}
},
h3: {
contentTypes: ['heading', 'flow'],
allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'],
shadowRoot: true,
implicitAttrs: {
'aria-level': '3'
}
},
h4: {
contentTypes: ['heading', 'flow'],
allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'],
shadowRoot: true,
implicitAttrs: {
'aria-level': '4'
}
},
h5: {
contentTypes: ['heading', 'flow'],
allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'],
shadowRoot: true,
implicitAttrs: {
'aria-level': '5'
}
},
h6: {
contentTypes: ['heading', 'flow'],
allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'],
shadowRoot: true,
implicitAttrs: {
'aria-level': '6'
}
},
head: {
allowedRoles: false,
noAriaAttrs: true
},
header: {
contentTypes: ['flow'],
allowedRoles: ['group', 'none', 'presentation', 'doc-footnote'],
shadowRoot: true
},
hgroup: {
contentTypes: ['heading', 'flow'],
allowedRoles: true
},
hr: {
contentTypes: ['flow'],
allowedRoles: ['none', 'presentation', 'doc-pagebreak'],
namingMethods: ['titleText', 'singleSpace']
},
html: {
allowedRoles: false,
noAriaAttrs: true
},
i: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
iframe: {
contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'],
allowedRoles: ['application', 'document', 'img', 'none', 'presentation'],
chromiumRole: 'Iframe'
},
img: {
variant: {
nonEmptyAlt: {
matches: {
attributes: {
alt: '/.+/'
}
},
allowedRoles: [
'button',
'checkbox',
'link',
'menuitem',
'menuitemcheckbox',
'menuitemradio',
'option',
'progressbar',
'scrollbar',
'separator',
'slider',
'switch',
'tab',
'treeitem',
'doc-cover'
]
},
usemap: {
matches: '[usemap]',
contentTypes: ['interactive', 'embedded', 'flow']
},
default: {
// Note: allow role presentation and none on image with no
// alt as a way to prevent axe from flagging the image as
// needing an alt
allowedRoles: ['presentation', 'none'],
// Note: spec change (do not count as phrasing), because browsers
// insert a space between an img's accessible name and other
// elements' accessible names
contentTypes: ['embedded', 'flow']
}
},
// 5.10 img Element
namingMethods: ['altText']
},
input: {
variant: {
button: {
matches: {
properties: {
type: 'button'
}
},
allowedRoles: [
'link',
'menuitem',
'menuitemcheckbox',
'menuitemradio',
'option',
'radio',
'switch',
'tab'
]
},
// 5.2 input type="button", input type="submit" and input type="reset"
buttonType: {
matches: {
properties: {
type: ['button', 'submit', 'reset']
}
},
namingMethods: ['valueText', 'titleText', 'buttonDefaultText']
},
checkboxPressed: {
matches: {
properties: {
type: 'checkbox'
},
attributes: {
'aria-pressed': '/.*/'
}
},
allowedRoles: ['button', 'menuitemcheckbox', 'option', 'switch'],
implicitAttrs: {
'aria-checked': 'false'
}
},
checkbox: {
matches: {
properties: {
type: 'checkbox'
},
attributes: {
'aria-pressed': null
}
},
allowedRoles: ['menuitemcheckbox', 'option', 'switch'],
implicitAttrs: {
'aria-checked': 'false'
}
},
noRoles: {
matches: {
properties: {
// Note: types of url, search, tel, and email are listed
// as not allowed roles however since they are text
// types they should be allowed to have role=combobox
type: [
'color',
'date',
'datetime-local',
'file',
'month',
'number',
'password',
'range',
'reset',
'submit',
'time',
'week'
]
}
},
allowedRoles: false
},
hidden: {
matches: {
properties: {
type: 'hidden'
}
},
// Note: spec change (do not count as phrasing)
contentTypes: ['flow'],
allowedRoles: false,
noAriaAttrs: true
},
image: {
matches: {
properties: {
type: 'image'
}
},
allowedRoles: [
'link',
'menuitem',
'menuitemcheckbox',
'menuitemradio',
'radio',
'switch'
],
// 5.3 input type="image"
namingMethods: [
'altText',
'valueText',
'labelText',
'titleText',
'buttonDefaultText'
]
},
radio: {
matches: {
properties: {
type: 'radio'
}
},
allowedRoles: ['menuitemradio'],
implicitAttrs: {
'aria-checked': 'false'
}
},
textWithList: {
matches: {
properties: {
type: 'text'
},
attributes: {
list: '/.*/'
}
},
allowedRoles: false
},
default: {
// Note: spec change (do not count as phrasing)
contentTypes: ['interactive', 'flow'],
allowedRoles: ['combobox', 'searchbox', 'spinbutton'],
implicitAttrs: {
'aria-valuenow': ''
},
// 5.1 input type="text", input type="password", input type="search", input type="tel", input type="url"
// 5.7 Other Form Elements
namingMethods: ['labelText', 'placeholderText']
}
}
},
ins: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
kbd: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
label: {
contentTypes: ['interactive', 'phrasing', 'flow'],
allowedRoles: false,
chromiumRole: 'Label'
},
legend: {
allowedRoles: false
},
li: {
allowedRoles: [
'menuitem',
'menuitemcheckbox',
'menuitemradio',
'option',
'none',
'presentation',
'radio',
'separator',
'tab',
'treeitem',
'doc-biblioentry',
'doc-endnote'
],
implicitAttrs: {
'aria-setsize': '1',
'aria-posinset': '1'
}
},
link: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
main: {
contentTypes: ['flow'],
allowedRoles: false,
shadowRoot: true
},
map: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
math: {
contentTypes: ['embedded', 'phrasing', 'flow'],
allowedRoles: false
},
mark: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
menu: {
contentTypes: ['flow'],
allowedRoles: [
'directory',
'group',
'listbox',
'menu',
'menubar',
'none',
'presentation',
'radiogroup',
'tablist',
'toolbar',
'tree'
]
},
meta: {
variant: {
itemprop: {
matches: '[itemprop]',
contentTypes: ['phrasing', 'flow']
}
},
allowedRoles: false,
noAriaAttrs: true
},
meter: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
chromiumRole: 'progressbar'
},
nav: {
contentTypes: ['sectioning', 'flow'],
allowedRoles: [
'doc-index',
'doc-pagelist',
'doc-toc',
'menu',
'menubar',
'tablist'
],
shadowRoot: true
},
noscript: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
object: {
variant: {
usemap: {
matches: '[usemap]',
contentTypes: ['interactive', 'embedded', 'phrasing', 'flow']
},
default: {
contentTypes: ['embedded', 'phrasing', 'flow']
}
},
allowedRoles: ['application', 'document', 'img'],
chromiumRole: 'PluginObject'
},
ol: {
contentTypes: ['flow'],
allowedRoles: [
'directory',
'group',
'listbox',
'menu',
'menubar',
'none',
'presentation',
'radiogroup',
'tablist',
'toolbar',
'tree'
]
},
optgroup: {
allowedRoles: false
},
option: {
allowedRoles: false,
implicitAttrs: {
'aria-selected': 'false'
}
},
output: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true,
// 5.6 output Element
namingMethods: ['subtreeText']
},
p: {
contentTypes: ['flow'],
allowedRoles: true,
shadowRoot: true
},
param: {
allowedRoles: false,
noAriaAttrs: true
},
picture: {
// Note: spec change (do not count as embedded), because browsers do not hide text inside the picture element
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
pre: {
contentTypes: ['flow'],
allowedRoles: true
},
progress: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
implicitAttrs: {
'aria-valuemax': '100',
'aria-valuemin': '0',
'aria-valuenow': '0'
}
},
q: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
rp: {
allowedRoles: true
},
rt: {
allowedRoles: true
},
ruby: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
s: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
samp: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
script: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
section: {
contentTypes: ['sectioning', 'flow'],
allowedRoles: [
'alert',
'alertdialog',
'application',
'banner',
'complementary',
'contentinfo',
'dialog',
'document',
'feed',
'log',
'main',
'marquee',
'navigation',
'none',
'note',
'presentation',
'search',
'status',
'tabpanel',
'doc-abstract',
'doc-acknowledgments',
'doc-afterword',
'doc-appendix',
'doc-bibliography',
'doc-chapter',
'doc-colophon',
'doc-conclusion',
'doc-credit',
'doc-credits',
'doc-dedication',
'doc-endnotes',
'doc-epigraph',
'doc-epilogue',
'doc-errata',
'doc-example',
'doc-foreword',
'doc-glossary',
'doc-index',
'doc-introduction',
'doc-notice',
'doc-pagelist',
'doc-part',
'doc-preface',
'doc-prologue',
'doc-pullquote',
'doc-qna',
'doc-toc'
],
shadowRoot: true
},
select: {
variant: {
combobox: {
matches: {
attributes: {
multiple: null,
size: [null, '1']
}
},
allowedRoles: ['menu']
},
default: {
allowedRoles: false
}
},
contentTypes: ['interactive', 'phrasing', 'flow'],
implicitAttrs: {
'aria-valuenow': ''
},
// 5.7 Other form elements
namingMethods: ['labelText']
},
slot: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
small: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
source: {
allowedRoles: false,
noAriaAttrs: true
},
span: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true,
shadowRoot: true
},
strong: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
style: {
allowedRoles: false,
noAriaAttrs: true
},
svg: {
contentTypes: ['embedded', 'phrasing', 'flow'],
allowedRoles: true,
chromiumRole: 'SVGRoot',
namingMethods: ['svgTitleText']
},
sub: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
summary: {
allowedRoles: false,
// 5.8 summary Element
namingMethods: ['subtreeText']
},
sup: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
table: {
contentTypes: ['flow'],
allowedRoles: true,
// 5.11 table Element
namingMethods: ['tableCaptionText', 'tableSummaryText']
},
tbody: {
allowedRoles: true
},
template: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: false,
noAriaAttrs: true
},
textarea: {
contentTypes: ['interactive', 'phrasing', 'flow'],
allowedRoles: false,
implicitAttrs: {
'aria-valuenow': '',
'aria-multiline': 'true'
},
// 5.1 textarea
namingMethods: ['labelText', 'placeholderText']
},
tfoot: {
allowedRoles: true
},
thead: {
allowedRoles: true
},
time: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
title: {
allowedRoles: false,
noAriaAttrs: true
},
td: {
allowedRoles: true
},
th: {
allowedRoles: true
},
tr: {
allowedRoles: true
},
track: {
allowedRoles: false,
noAriaAttrs: true
},
u: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
ul: {
contentTypes: ['flow'],
allowedRoles: [
'directory',
'group',
'listbox',
'menu',
'menubar',
'none',
'presentation',
'radiogroup',
'tablist',
'toolbar',
'tree'
]
},
var: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: true
},
video: {
variant: {
controls: {
matches: '[controls]',
contentTypes: ['interactive', 'embedded', 'phrasing', 'flow']
},
default: {
contentTypes: ['embedded', 'phrasing', 'flow']
}
},
allowedRoles: ['application'],
chromiumRole: 'video'
},
wbr: {
contentTypes: ['phrasing', 'flow'],
allowedRoles: ['presentation', 'none']
}
};
export default htmlElms;
| 1 | 17,260 | Thanks for the pr. Would you be able to edit the file to use spaces rather than tabs? Normally we have prettier run on commit, but I guess file patches don't do that. | dequelabs-axe-core | js |
@@ -111,7 +111,6 @@ public class TestHiveClientPool {
"Another meta exception", () -> clients.run(client -> client.getTables("default", "t")));
}
- @Test
public void testConnectionFailureRestoreForMetaException() throws Exception {
HiveMetaStoreClient hmsClient = newClient();
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Function;
import org.apache.hadoop.hive.metastore.api.FunctionType;
import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.iceberg.AssertHelpers;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.thrift.transport.TTransportException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestHiveClientPool {
private static final String HIVE_SITE_CONTENT = "<?xml version=\"1.0\"?>\n" +
"<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n" +
"<configuration>\n" +
" <property>\n" +
" <name>hive.metastore.sasl.enabled</name>\n" +
" <value>true</value>\n" +
" </property>\n" +
"</configuration>\n";
HiveClientPool clients;
@Before
public void before() {
HiveClientPool clientPool = new HiveClientPool(2, new Configuration());
clients = Mockito.spy(clientPool);
}
@After
public void after() {
clients.close();
clients = null;
}
@Test
public void testConf() {
HiveConf conf = createHiveConf();
conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "file:/mywarehouse/");
HiveClientPool clientPool = new HiveClientPool(10, conf);
HiveConf clientConf = clientPool.hiveConf();
Assert.assertEquals(conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname),
clientConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
Assert.assertEquals(10, clientPool.poolSize());
// 'hive.metastore.sasl.enabled' should be 'true' as defined in xml
Assert.assertEquals(conf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname),
clientConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));
Assert.assertTrue(clientConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL));
}
private HiveConf createHiveConf() {
HiveConf hiveConf = new HiveConf();
try (InputStream inputStream = new ByteArrayInputStream(HIVE_SITE_CONTENT.getBytes(StandardCharsets.UTF_8))) {
hiveConf.addResource(inputStream, "for_test");
} catch (IOException e) {
throw new RuntimeException(e);
}
return hiveConf;
}
@Test
public void testNewClientFailure() {
Mockito.doThrow(new RuntimeException("Connection exception")).when(clients).newClient();
AssertHelpers.assertThrows("Should throw exception", RuntimeException.class,
"Connection exception", () -> clients.run(Object::toString));
}
@Test
public void testGetTablesFailsForNonReconnectableException() throws Exception {
HiveMetaStoreClient hmsClient = Mockito.mock(HiveMetaStoreClient.class);
Mockito.doReturn(hmsClient).when(clients).newClient();
Mockito.doThrow(new MetaException("Another meta exception"))
.when(hmsClient).getTables(Mockito.anyString(), Mockito.anyString());
AssertHelpers.assertThrows("Should throw exception", MetaException.class,
"Another meta exception", () -> clients.run(client -> client.getTables("default", "t")));
}
@Test
public void testConnectionFailureRestoreForMetaException() throws Exception {
HiveMetaStoreClient hmsClient = newClient();
// Throwing an exception may trigger the client to reconnect.
String metaMessage = "Got exception: org.apache.thrift.transport.TTransportException";
Mockito.doThrow(new MetaException(metaMessage)).when(hmsClient).getAllDatabases();
// Create a new client when the reconnect method is called.
HiveMetaStoreClient newClient = reconnect(hmsClient);
List<String> databases = Lists.newArrayList("db1", "db2");
Mockito.doReturn(databases).when(newClient).getAllDatabases();
// The return is OK when the reconnect method is called.
Assert.assertEquals(databases, clients.run(client -> client.getAllDatabases()));
// Verify that the method is called.
Mockito.verify(clients).reconnect(hmsClient);
Mockito.verify(clients, Mockito.never()).reconnect(newClient);
}
@Test
public void testConnectionFailureRestoreForTTransportException() throws Exception {
HiveMetaStoreClient hmsClient = newClient();
Mockito.doThrow(new TTransportException()).when(hmsClient).getAllFunctions();
// Create a new client when getAllFunctions() failed.
HiveMetaStoreClient newClient = reconnect(hmsClient);
GetAllFunctionsResponse response = new GetAllFunctionsResponse();
response.addToFunctions(
new Function("concat", "db1", "classname", "root", PrincipalType.USER, 100, FunctionType.JAVA, null));
Mockito.doReturn(response).when(newClient).getAllFunctions();
Assert.assertEquals(response, clients.run(client -> client.getAllFunctions()));
Mockito.verify(clients).reconnect(hmsClient);
Mockito.verify(clients, Mockito.never()).reconnect(newClient);
}
private HiveMetaStoreClient newClient() {
HiveMetaStoreClient hmsClient = Mockito.mock(HiveMetaStoreClient.class);
Mockito.doReturn(hmsClient).when(clients).newClient();
return hmsClient;
}
private HiveMetaStoreClient reconnect(HiveMetaStoreClient obsoleteClient) {
HiveMetaStoreClient newClient = Mockito.mock(HiveMetaStoreClient.class);
Mockito.doReturn(newClient).when(clients).reconnect(obsoleteClient);
return newClient;
}
}
| 1 | 41,852 | Is it intentional that we removed this annotation? | apache-iceberg | java |
@@ -42,6 +42,18 @@ namespace AutoRest.Swagger.Validation
/// </summary>
public override Category Severity => Category.Warning;
+ /// <summary>
+ /// What kind of open api document type this rule should be applied to
+ /// </summary>
+ public override ServiceDefinitionDocumentType ServiceDefinitionDocumentType => ServiceDefinitionDocumentType.ARM;
+
+ /// <summary>
+ /// When to apply the validation rule, before or after it has been merged as a part of
+ /// its merged document as specified in the corresponding '.md' file
+ /// By default consider all rules to be applied for After only
+ /// </summary>
+ public override ServiceDefinitionDocumentState ValidationRuleMergeState => ServiceDefinitionDocumentState.Individual;
+
public override IEnumerable<ValidationMessage> GetValidationMessages(Dictionary<string, Schema> definitions, RuleContext context)
{
var violatingModels = definitions.Where(defPair=>defPair.Value.Properties?.Values.Any(schema => schema.Enum != null)??false); | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using AutoRest.Core.Logging;
using AutoRest.Core.Properties;
using AutoRest.Core.Utilities;
using System.Collections.Generic;
using AutoRest.Swagger.Model;
using System.Text.RegularExpressions;
using System.Linq;
using AutoRest.Swagger.Model.Utilities;
using AutoRest.Swagger.Validation.Core;
namespace AutoRest.Swagger.Validation
{
/// <summary>
/// Validates if the x-ms-enum extension has been set for enum types
/// </summary>
public class XmsEnumValidation : TypedRule<Dictionary<string, Schema>>
{
/// <summary>
/// The template message for this Rule.
/// </summary>
/// <remarks>
/// This may contain placeholders '{0}' for parameterized messages.
/// </remarks>
public override string MessageTemplate => "The enum types should have x-ms-enum type extension set with appropriate options. Property name: {0}";
/// <summary>
/// Id of the Rule.
/// </summary>
public override string Id => "R2018";
/// <summary>
/// Violation category of the Rule.
/// </summary>
public override ValidationCategory ValidationCategory => ValidationCategory.SDKViolation;
/// <summary>
/// The severity of this message (ie, debug/info/warning/error/fatal, etc)
/// </summary>
public override Category Severity => Category.Warning;
public override IEnumerable<ValidationMessage> GetValidationMessages(Dictionary<string, Schema> definitions, RuleContext context)
{
var violatingModels = definitions.Where(defPair=>defPair.Value.Properties?.Values.Any(schema => schema.Enum != null)??false);
foreach (var modelPair in violatingModels)
{
var violatingProps = modelPair.Value.Properties.Where(prop => prop.Value.Enum != null && (!prop.Value.Extensions?.ContainsKey("x-ms-enum")??false));
foreach (var prop in violatingProps)
{
yield return new ValidationMessage(new FileObjectPath(context.File, context.Path.AppendProperty(modelPair.Key).AppendProperty("properties").AppendProperty(prop.Key)), this, prop.Key);
}
}
}
}
}
| 1 | 25,119 | should be general? | Azure-autorest | java |
@@ -153,7 +153,7 @@ namespace Nethermind.Blockchain
private void LoadBestKnown()
{
long headNumber = Head?.Number ?? _syncConfig.PivotNumberParsed;
- long left = Math.Max(_syncConfig.PivotNumberParsed, headNumber);
+ long left = Math.Max(LowestInsertedHeader?.Number ?? 0, headNumber);
long right = headNumber + BestKnownSearchLimit;
bool LevelExists(long blockNumber) | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Nethermind.Blockchain.Find;
using Nethermind.Blockchain.Synchronization;
using Nethermind.Core;
using Nethermind.Core.Attributes;
using Nethermind.Core.Caching;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Specs;
using Nethermind.Db;
using Nethermind.Dirichlet.Numerics;
using Nethermind.Logging;
using Nethermind.Serialization.Json;
using Nethermind.Serialization.Rlp;
using Nethermind.State.Repositories;
using Nethermind.Db.Blooms;
using Nethermind.TxPool;
namespace Nethermind.Blockchain
{
[Todo(Improve.Refactor, "After the fast sync work there are some duplicated code parts for the 'by header' and 'by block' approaches.")]
public partial class BlockTree : IBlockTree
{
private const int CacheSize = 64;
private readonly ICache<Keccak, Block> _blockCache = new LruCacheWithRecycling<Keccak, Block>(CacheSize, CacheSize, "blocks");
private readonly ICache<Keccak, BlockHeader> _headerCache = new LruCacheWithRecycling<Keccak, BlockHeader>(CacheSize, CacheSize, "headers");
private const int BestKnownSearchLimit = 256_000_000;
private readonly object _batchInsertLock = new object();
private readonly IDb _blockDb;
private readonly IDb _headerDb;
private readonly IDb _blockInfoDb;
private ICache<long, HashSet<Keccak>> _invalidBlocks = new LruCacheWithRecycling<long, HashSet<Keccak>>(128, 128, "invalid blocks");
private readonly BlockDecoder _blockDecoder = new BlockDecoder();
private readonly HeaderDecoder _headerDecoder = new HeaderDecoder();
private readonly ILogger _logger;
private readonly ISpecProvider _specProvider;
private readonly ITxPool _txPool;
private readonly IBloomStorage _bloomStorage;
private readonly ISyncConfig _syncConfig;
private readonly IChainLevelInfoRepository _chainLevelInfoRepository;
internal static Keccak DeletePointerAddressInDb = new Keccak(new BitArray(32 * 8, true).ToBytes());
internal static Keccak HeadAddressInDb = Keccak.Zero;
public BlockHeader Genesis { get; private set; }
public Block Head { get; private set; }
public BlockHeader BestSuggestedHeader { get; private set; }
public Block BestSuggestedBody { get; private set; }
public BlockHeader LowestInsertedHeader { get; private set; }
public Block LowestInsertedBody { get; private set; }
public long BestKnownNumber { get; private set; }
public int ChainId => _specProvider.ChainId;
public bool CanAcceptNewBlocks { get; private set; } = true; // no need to sync it at the moment
public BlockTree(
IDb blockDb,
IDb headerDb,
IDb blockInfoDb,
IChainLevelInfoRepository chainLevelInfoRepository,
ISpecProvider specProvider,
ITxPool txPool,
IBloomStorage bloomStorage,
ILogManager logManager)
: this(blockDb, headerDb, blockInfoDb, chainLevelInfoRepository, specProvider, txPool, bloomStorage, new SyncConfig(), logManager)
{
}
public BlockTree(
IDb blockDb,
IDb headerDb,
IDb blockInfoDb,
IChainLevelInfoRepository chainLevelInfoRepository,
ISpecProvider specProvider,
ITxPool txPool,
IBloomStorage bloomStorage,
ISyncConfig syncConfig,
ILogManager logManager)
{
_logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
_blockDb = blockDb ?? throw new ArgumentNullException(nameof(blockDb));
_headerDb = headerDb ?? throw new ArgumentNullException(nameof(headerDb));
_blockInfoDb = blockInfoDb ?? throw new ArgumentNullException(nameof(blockInfoDb));
_specProvider = specProvider;
_txPool = txPool ?? throw new ArgumentNullException(nameof(txPool));
_bloomStorage = bloomStorage ?? throw new ArgumentNullException(nameof(txPool));
_syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig));
_chainLevelInfoRepository = chainLevelInfoRepository ?? throw new ArgumentNullException(nameof(chainLevelInfoRepository));
var deletePointer = _blockInfoDb.Get(DeletePointerAddressInDb);
if (deletePointer != null)
{
DeleteBlocks(new Keccak(deletePointer));
}
ChainLevelInfo genesisLevel = LoadLevel(0, true);
if (genesisLevel != null)
{
if (genesisLevel.BlockInfos.Length != 1)
{
// just for corrupted test bases
genesisLevel.BlockInfos = new[] {genesisLevel.BlockInfos[0]};
_chainLevelInfoRepository.PersistLevel(0, genesisLevel);
//throw new InvalidOperationException($"Genesis level in DB has {genesisLevel.BlockInfos.Length} blocks");
}
if (genesisLevel.BlockInfos[0].WasProcessed)
{
BlockHeader genesisHeader = FindHeader(genesisLevel.BlockInfos[0].BlockHash, BlockTreeLookupOptions.None);
Genesis = genesisHeader;
LoadHeadBlockAtStart();
}
RecalculateTreeLevels();
}
if (_logger.IsInfo) _logger.Info($"Block tree initialized, last processed is {Head?.Header?.ToString(BlockHeader.Format.Short) ?? "0"}, best queued is {BestSuggestedHeader?.Number.ToString() ?? "0"}, best known is {BestKnownNumber}, lowest inserted header {LowestInsertedHeader?.Number}, body {LowestInsertedBody?.Number}");
ThisNodeInfo.AddInfo("Chain ID :", $"{Nethermind.Core.ChainId.GetChainName(ChainId)}");
ThisNodeInfo.AddInfo("Chain head :", $"{Head?.Header?.ToString(BlockHeader.Format.Short) ?? "0"}");
}
private void RecalculateTreeLevels()
{
LoadLowestInsertedHeader();
LoadLowestInsertedBody();
LoadBestKnown();
}
private void LoadBestKnown()
{
long headNumber = Head?.Number ?? _syncConfig.PivotNumberParsed;
long left = Math.Max(_syncConfig.PivotNumberParsed, headNumber);
long right = headNumber + BestKnownSearchLimit;
bool LevelExists(long blockNumber)
{
return LoadLevel(blockNumber) != null;
}
bool HeaderExists(long blockNumber)
{
ChainLevelInfo level = LoadLevel(blockNumber);
if (level == null)
{
return false;
}
foreach (BlockInfo blockInfo in level.BlockInfos)
{
if (FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None) != null)
{
return true;
}
}
return false;
}
bool BodyExists(long blockNumber)
{
ChainLevelInfo level = LoadLevel(blockNumber);
if (level == null)
{
return false;
}
foreach (BlockInfo blockInfo in level.BlockInfos)
{
if (FindBlock(blockInfo.BlockHash, BlockTreeLookupOptions.None) != null)
{
return true;
}
}
return false;
}
BestKnownNumber = BinarySearchBlockNumber(left, right, LevelExists) ?? 0;
long bestSuggestedHeaderNumber = BinarySearchBlockNumber(left, right, HeaderExists) ?? 0;
long bestSuggestedBodyNumber = BinarySearchBlockNumber(left, right, BodyExists) ?? 0;
if (BestKnownNumber < 0 ||
bestSuggestedHeaderNumber < 0 ||
bestSuggestedBodyNumber < 0 ||
bestSuggestedHeaderNumber < bestSuggestedBodyNumber)
{
throw new InvalidDataException($"Invalid initial block tree state loaded - best known: {BestKnownNumber}|best header: {bestSuggestedHeaderNumber}|best body: {bestSuggestedBodyNumber}|");
}
BestSuggestedHeader = FindHeader(bestSuggestedHeaderNumber, BlockTreeLookupOptions.None);
var bestSuggestedBodyHeader = FindHeader(bestSuggestedBodyNumber, BlockTreeLookupOptions.None);
BestSuggestedBody = bestSuggestedBodyHeader == null ? null : FindBlock(bestSuggestedBodyHeader.Hash, BlockTreeLookupOptions.None);
}
private void LoadLowestInsertedHeader()
{
long left = 1L;
long right = _syncConfig.PivotNumberParsed;
bool HasLevel(long blockNumber)
{
ChainLevelInfo level = LoadLevel(blockNumber);
return level != null;
}
long? lowestInsertedHeader = BinarySearchBlockNumber(left, right, HasLevel, BinarySearchDirection.Down);
if (lowestInsertedHeader != null)
{
ChainLevelInfo level = LoadLevel(lowestInsertedHeader.Value);
BlockInfo blockInfo = level.BlockInfos[0];
LowestInsertedHeader = FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None);
}
}
private void LoadLowestInsertedBody()
{
long left = 1L;
long right = _syncConfig.PivotNumberParsed;
Block LoadBody(long blockNumber)
{
ChainLevelInfo level = LoadLevel(blockNumber, true);
return level == null ? null : FindBlock(level.BlockInfos[0].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
}
bool HasBody(long blockNumber)
{
Block block = LoadBody(blockNumber);
return block != null;
}
long? lowestInsertedBody = BinarySearchBlockNumber(left, right, HasBody, BinarySearchDirection.Down);
if (lowestInsertedBody != null) LowestInsertedBody = LoadBody(lowestInsertedBody.Value);
}
private enum BinarySearchDirection
{
Up,
Down
}
private static long? BinarySearchBlockNumber(long left, long right, Func<long, bool> isBlockFound, BinarySearchDirection direction = BinarySearchDirection.Up)
{
if (left > right)
{
return null;
}
long? result = null;
while (left != right)
{
long index = direction == BinarySearchDirection.Up ? left + (right - left) / 2 : right - (right - left) / 2;
if (isBlockFound(index))
{
result = index;
if (direction == BinarySearchDirection.Up)
{
left = index + 1;
}
else
{
right = index - 1;
}
}
else
{
if (direction == BinarySearchDirection.Up)
{
right = index;
}
else
{
left = index;
}
}
}
if (isBlockFound(left))
{
result = direction == BinarySearchDirection.Up ? left : right;
}
return result;
}
public AddBlockResult Insert(BlockHeader header)
{
if (!CanAcceptNewBlocks)
{
return AddBlockResult.CannotAccept;
}
if (header.Number == 0)
{
throw new InvalidOperationException("Genesis block should not be inserted.");
}
if (header.TotalDifficulty == null)
{
SetTotalDifficulty(header);
}
// validate hash here
Rlp newRlp = _headerDecoder.Encode(header);
_headerDb.Set(header.Hash, newRlp.Bytes);
BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0);
ChainLevelInfo chainLevel = new ChainLevelInfo(true, blockInfo);
_chainLevelInfoRepository.PersistLevel(header.Number, chainLevel);
_bloomStorage.Store(header.Number, header.Bloom);
if (header.Number < (LowestInsertedHeader?.Number ?? long.MaxValue))
{
LowestInsertedHeader = header;
}
if (header.Number > BestKnownNumber)
{
BestKnownNumber = header.Number;
}
if (header.Number > BestSuggestedHeader.Number)
{
BestSuggestedHeader = header;
}
return AddBlockResult.Added;
}
public AddBlockResult Insert(Block block)
{
if (!CanAcceptNewBlocks)
{
return AddBlockResult.CannotAccept;
}
if (block.Number == 0)
{
throw new InvalidOperationException("Genesis block should not be inserted.");
}
Rlp newRlp = _blockDecoder.Encode(block);
_blockDb.Set(block.Hash, newRlp.Bytes);
long expectedNumber = (LowestInsertedBody?.Number - 1 ?? LongConverter.FromString(_syncConfig.PivotNumber ?? "0"));
if (block.Number != expectedNumber)
{
throw new InvalidOperationException($"Trying to insert out of order block {block.Number} when expected number was {expectedNumber}");
}
if (block.Number < (LowestInsertedBody?.Number ?? long.MaxValue))
{
LowestInsertedBody = block;
}
return AddBlockResult.Added;
}
public void Insert(IEnumerable<Block> blocks)
{
lock (_batchInsertLock)
{
try
{
// _blockDb.StartBatch();
foreach (Block block in blocks)
{
Insert(block);
}
}
finally
{
// _blockDb.CommitBatch();
}
}
}
private AddBlockResult Suggest(Block block, BlockHeader header, bool shouldProcess = true)
{
#if DEBUG
/* this is just to make sure that we do not fall into this trap when creating tests */
if (header.StateRoot == null && !header.IsGenesis)
{
throw new InvalidDataException($"State root is null in {header.ToString(BlockHeader.Format.Short)}");
}
#endif
if (!CanAcceptNewBlocks)
{
return AddBlockResult.CannotAccept;
}
HashSet<Keccak> invalidBlocksWithThisNumber = _invalidBlocks.Get(header.Number);
if (invalidBlocksWithThisNumber?.Contains(header.Hash) ?? false)
{
return AddBlockResult.InvalidBlock;
}
bool isKnown = IsKnownBlock(header.Number, header.Hash);
if (header.Number == 0)
{
if (BestSuggestedHeader != null)
{
throw new InvalidOperationException("Genesis block should be added only once");
}
}
else if (isKnown && (BestSuggestedHeader?.Number ?? 0) >= header.Number)
{
if (_logger.IsTrace)
{
_logger.Trace($"Block {header.Hash} already known.");
}
return AddBlockResult.AlreadyKnown;
}
else if (!IsKnownBlock(header.Number - 1, header.ParentHash))
{
if (_logger.IsTrace)
{
_logger.Trace($"Could not find parent ({header.ParentHash}) of block {header.Hash}");
}
return AddBlockResult.UnknownParent;
}
SetTotalDifficulty(header);
if (block != null && !isKnown)
{
Rlp newRlp = _blockDecoder.Encode(block);
_blockDb.Set(block.Hash, newRlp.Bytes);
}
if (!isKnown)
{
Rlp newRlp = _headerDecoder.Encode(header);
_headerDb.Set(header.Hash, newRlp.Bytes);
BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0);
UpdateOrCreateLevel(header.Number, blockInfo, !shouldProcess);
}
if (header.IsGenesis || header.TotalDifficulty > (BestSuggestedHeader?.TotalDifficulty ?? 0))
{
if (header.IsGenesis)
{
Genesis = header;
}
BestSuggestedHeader = header;
if (block != null && shouldProcess)
{
BestSuggestedBody = block;
NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block));
}
}
return AddBlockResult.Added;
}
public AddBlockResult SuggestHeader(BlockHeader header)
{
return Suggest(null, header);
}
public AddBlockResult SuggestBlock(Block block, bool shouldProcess = true)
{
if (Genesis == null && !block.IsGenesis)
{
throw new InvalidOperationException("Block tree should be initialized with genesis before suggesting other blocks.");
}
return Suggest(block, block.Header, shouldProcess);
}
public BlockHeader FindHeader(long number, BlockTreeLookupOptions options)
{
Keccak blockHash = GetBlockHashOnMainOrBestDifficultyHash(number);
return blockHash == null ? null : FindHeader(blockHash, options);
}
public Keccak FindBlockHash(long blockNumber) => GetBlockHashOnMainOrBestDifficultyHash(blockNumber);
public BlockHeader FindHeader(Keccak blockHash, BlockTreeLookupOptions options)
{
if (blockHash == null || blockHash == Keccak.Zero)
{
// TODO: would be great to check why this is still needed (maybe it is something archaic)
return null;
}
BlockHeader header = _headerDb.Get(blockHash, _headerDecoder, _headerCache, false);
if (header == null)
{
return null;
}
bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None;
bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical;
if ((totalDifficultyNeeded && header.TotalDifficulty == null) || requiresCanonical)
{
(BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(header.Number, header.Hash);
if (level == null || blockInfo == null)
{
// TODO: this is here because storing block data is not transactional
// TODO: would be great to remove it, he?
SetTotalDifficulty(header);
blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty.Value);
UpdateOrCreateLevel(header.Number, blockInfo);
(_, level) = LoadInfo(header.Number, header.Hash);
}
else
{
header.TotalDifficulty = blockInfo.TotalDifficulty;
}
if (requiresCanonical)
{
bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true;
header = isMain ? header : null;
}
}
if (header != null && ShouldCache(header.Number))
{
_headerCache.Set(blockHash, header);
}
return header;
}
public Keccak FindHash(long number)
{
return GetBlockHashOnMainOrBestDifficultyHash(number);
}
public BlockHeader[] FindHeaders(Keccak blockHash, int numberOfBlocks, int skip, bool reverse)
{
if (numberOfBlocks == 0)
{
return Array.Empty<BlockHeader>();
}
if (blockHash == null)
{
return new BlockHeader[numberOfBlocks];
}
BlockHeader startHeader = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (startHeader == null)
{
return new BlockHeader[numberOfBlocks];
}
if (numberOfBlocks == 1)
{
return new[] {startHeader};
}
if (skip == 0)
{
/* if we do not skip and we have the last block then we can assume that all the blocks are there
and we can use the fact that we can use parent hash and that searching by hash is much faster
as it does not require the step of resolving number -> hash */
BlockHeader endHeader = FindHeader(startHeader.Number + numberOfBlocks - 1, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (endHeader != null)
{
return FindHeadersReversedFull(endHeader, numberOfBlocks);
}
}
BlockHeader[] result = new BlockHeader[numberOfBlocks];
BlockHeader current = startHeader;
int directionMultiplier = reverse ? -1 : 1;
int responseIndex = 0;
do
{
result[responseIndex] = current;
responseIndex++;
long nextNumber = startHeader.Number + directionMultiplier * (responseIndex * skip + responseIndex);
if (nextNumber < 0)
{
break;
}
current = FindHeader(nextNumber, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
} while (current != null && responseIndex < numberOfBlocks);
return result;
}
private BlockHeader[] FindHeadersReversedFull(BlockHeader startHeader, int numberOfBlocks)
{
if (startHeader == null) throw new ArgumentNullException(nameof(startHeader));
if (numberOfBlocks == 1)
{
return new[] {startHeader};
}
BlockHeader[] result = new BlockHeader[numberOfBlocks];
BlockHeader current = startHeader;
int responseIndex = numberOfBlocks - 1;
do
{
result[responseIndex] = current;
responseIndex--;
if (responseIndex < 0)
{
break;
}
current = this.FindParentHeader(current, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
} while (current != null && responseIndex < numberOfBlocks);
return result;
}
private Keccak GetBlockHashOnMainOrBestDifficultyHash(long blockNumber)
{
if (blockNumber < 0)
{
throw new ArgumentException($"{nameof(blockNumber)} must be greater or equal zero and is {blockNumber}",
nameof(blockNumber));
}
ChainLevelInfo level = LoadLevel(blockNumber);
if (level == null)
{
return null;
}
if (level.HasBlockOnMainChain)
{
return level.BlockInfos[0].BlockHash;
}
UInt256 bestDifficultySoFar = UInt256.Zero;
Keccak bestHash = null;
for (int i = 0; i < level.BlockInfos.Length; i++)
{
BlockInfo current = level.BlockInfos[i];
if (level.BlockInfos[i].TotalDifficulty > bestDifficultySoFar)
{
bestDifficultySoFar = current.TotalDifficulty;
bestHash = current.BlockHash;
}
}
return bestHash;
}
public Block FindBlock(long blockNumber, BlockTreeLookupOptions options)
{
Keccak hash = GetBlockHashOnMainOrBestDifficultyHash(blockNumber);
return FindBlock(hash, options);
}
public void DeleteInvalidBlock(Block invalidBlock)
{
if (_logger.IsDebug) _logger.Debug($"Deleting invalid block {invalidBlock.ToString(Block.Format.FullHashAndNumber)}");
var invalidBlocksWithThisNumber = _invalidBlocks.Get(invalidBlock.Number) ?? new HashSet<Keccak>();
invalidBlocksWithThisNumber.Add(invalidBlock.Hash);
_invalidBlocks.Set(invalidBlock.Number, invalidBlocksWithThisNumber);
BestSuggestedHeader = Head?.Header;
BestSuggestedBody = Head;
try
{
CanAcceptNewBlocks = false;
}
finally
{
DeleteBlocks(invalidBlock.Hash);
CanAcceptNewBlocks = true;
}
}
private void DeleteBlocks(Keccak deletePointer)
{
BlockHeader deleteHeader = FindHeader(deletePointer, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
long currentNumber = deleteHeader.Number;
Keccak currentHash = deleteHeader.Hash;
Keccak nextHash = null;
ChainLevelInfo nextLevel = null;
using var batch = _chainLevelInfoRepository.StartBatch();
while (true)
{
ChainLevelInfo currentLevel = nextLevel ?? LoadLevel(currentNumber);
nextLevel = LoadLevel(currentNumber + 1);
bool shouldRemoveLevel = false;
if (currentLevel != null) // preparing update of the level (removal of the invalid branch block)
{
if (currentLevel.BlockInfos.Length == 1)
{
shouldRemoveLevel = true;
}
else
{
for (int i = 0; i < currentLevel.BlockInfos.Length; i++)
{
if (currentLevel.BlockInfos[0].BlockHash == currentHash)
{
currentLevel.BlockInfos = currentLevel.BlockInfos.Where(bi => bi.BlockHash != currentHash).ToArray();
break;
}
}
}
}
// just finding what the next descendant will be
if (nextLevel != null)
{
nextHash = FindChild(nextLevel, currentHash);
}
UpdateDeletePointer(nextHash);
if (shouldRemoveLevel)
{
BestKnownNumber = Math.Min(BestKnownNumber, currentNumber - 1);
_chainLevelInfoRepository.Delete(currentNumber, batch);
}
else
{
_chainLevelInfoRepository.PersistLevel(currentNumber, currentLevel, batch);
}
if (_logger.IsInfo) _logger.Info($"Deleting invalid block {currentHash} at level {currentNumber}");
_blockCache.Delete(currentHash);
_blockDb.Delete(currentHash);
_headerCache.Delete(currentHash);
_headerDb.Delete(currentHash);
if (nextHash == null)
{
break;
}
currentNumber++;
currentHash = nextHash;
nextHash = null;
}
}
private Keccak FindChild(ChainLevelInfo level, Keccak parentHash)
{
Keccak childHash = null;
for (int i = 0; i < level.BlockInfos.Length; i++)
{
BlockHeader potentialChild = FindHeader(level.BlockInfos[i].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (potentialChild.ParentHash == parentHash)
{
childHash = potentialChild.Hash;
break;
}
}
return childHash;
}
public bool IsMainChain(BlockHeader blockHeader)
{
ChainLevelInfo chainLevelInfo = LoadLevel(blockHeader.Number);
bool isMain = chainLevelInfo.MainChainBlock?.BlockHash.Equals(blockHeader.Hash) == true;
return isMain;
}
public bool IsMainChain(Keccak blockHash)
{
BlockHeader header = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (header == null)
{
throw new InvalidOperationException($"Not able to retrieve block number for an unknown block {blockHash}");
}
return IsMainChain(header);
}
public bool WasProcessed(long number, Keccak blockHash)
{
ChainLevelInfo levelInfo = LoadLevel(number);
int? index = FindIndex(blockHash, levelInfo);
if (index == null)
{
throw new InvalidOperationException($"Not able to find block {blockHash} index on the chain level");
}
return levelInfo.BlockInfos[index.Value].WasProcessed;
}
public void UpdateMainChain(Block[] processedBlocks, bool wereProcessed)
{
if (processedBlocks.Length == 0)
{
return;
}
bool ascendingOrder = true;
if (processedBlocks.Length > 1)
{
if (processedBlocks[^1].Number < processedBlocks[0].Number)
{
ascendingOrder = false;
}
}
#if DEBUG
for (int i = 0; i < processedBlocks.Length; i++)
{
if (i != 0)
{
if (ascendingOrder && processedBlocks[i].Number != processedBlocks[i - 1].Number + 1)
{
throw new InvalidOperationException("Update main chain invoked with gaps");
}
if (!ascendingOrder && processedBlocks[i - 1].Number != processedBlocks[i].Number + 1)
{
throw new InvalidOperationException("Update main chain invoked with gaps");
}
}
}
#endif
long lastNumber = ascendingOrder ? processedBlocks[^1].Number : processedBlocks[0].Number;
long previousHeadNumber = Head?.Number ?? 0L;
using BatchWrite batch = _chainLevelInfoRepository.StartBatch();
if (previousHeadNumber > lastNumber)
{
for (long i = 0; i < previousHeadNumber - lastNumber; i++)
{
long levelNumber = previousHeadNumber - i;
ChainLevelInfo level = LoadLevel(levelNumber);
level.HasBlockOnMainChain = false;
_chainLevelInfoRepository.PersistLevel(levelNumber, level, batch);
}
}
for (int i = 0; i < processedBlocks.Length; i++)
{
Block block = processedBlocks[i];
if (ShouldCache(block.Number))
{
_blockCache.Set(block.Hash, processedBlocks[i]);
_headerCache.Set(block.Hash, block.Header);
}
MoveToMain(processedBlocks[i], batch, wereProcessed);
}
}
[Todo(Improve.MissingFunctionality, "Recalculate bloom storage on reorg.")]
private void MoveToMain(Block block, BatchWrite batch, bool wasProcessed)
{
ChainLevelInfo level = LoadLevel(block.Number);
int? index = FindIndex(block.Hash, level);
if (index == null)
{
throw new InvalidOperationException($"Cannot move unknown block {block.ToString(Block.Format.FullHashAndNumber)} to main");
}
Keccak hashOfThePreviousMainBlock = level.MainChainBlock?.BlockHash;
BlockInfo info = level.BlockInfos[index.Value];
info.WasProcessed = wasProcessed;
if (index.Value != 0)
{
(level.BlockInfos[index.Value], level.BlockInfos[0]) = (level.BlockInfos[0], level.BlockInfos[index.Value]);
}
level.HasBlockOnMainChain = true;
_chainLevelInfoRepository.PersistLevel(block.Number, level, batch);
_bloomStorage.Store(block.Number, block.Bloom);
BlockAddedToMain?.Invoke(this, new BlockEventArgs(block));
if (block.IsGenesis || block.TotalDifficulty > (Head?.TotalDifficulty ?? 0))
{
if (block.Number == 0)
{
Genesis = block.Header;
}
if (block.TotalDifficulty == null)
{
throw new InvalidOperationException("Head block with null total difficulty");
}
if (wasProcessed)
{
UpdateHeadBlock(block);
}
}
for (int i = 0; i < block.Transactions.Length; i++)
{
_txPool.RemoveTransaction(block.Transactions[i].Hash, block.Number);
}
// the hash will only be the same during perf test runs / modified DB states
if (hashOfThePreviousMainBlock != null && hashOfThePreviousMainBlock != block.Hash)
{
Block previous = FindBlock(hashOfThePreviousMainBlock, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
bool isEip155Enabled = _specProvider.GetSpec(previous.Number).IsEip155Enabled;
for (int i = 0; i < previous?.Transactions.Length; i++)
{
Transaction tx = previous.Transactions[i];
_txPool.AddTransaction(tx, isEip155Enabled ? TxHandlingOptions.None : TxHandlingOptions.PreEip155Signing);
}
}
if (_logger.IsTrace) _logger.Trace($"Block {block.ToString(Block.Format.Short)} added to main chain");
}
private void LoadHeadBlockAtStart()
{
byte[] data = _blockInfoDb.Get(HeadAddressInDb);
if (data != null)
{
Block headBlock = FindBlock(new Keccak(data), BlockTreeLookupOptions.None);
ChainLevelInfo level = LoadLevel(headBlock.Number);
int? index = FindIndex(headBlock.Hash, level);
if (!index.HasValue)
{
throw new InvalidDataException("Head block data missing from chain info");
}
headBlock.Header.TotalDifficulty = level.BlockInfos[index.Value].TotalDifficulty;
Head = headBlock;
}
}
public bool IsKnownBlock(long number, Keccak blockHash)
{
if (number > BestKnownNumber)
{
return false;
}
// IsKnownBlock will be mainly called when new blocks are incoming
// and these are very likely to be all at the head of the chain
if (blockHash == Head?.Hash)
{
return true;
}
if (_headerCache.Get(blockHash) != null)
{
return true;
}
ChainLevelInfo level = LoadLevel(number);
return level != null && FindIndex(blockHash, level).HasValue;
}
private void UpdateDeletePointer(Keccak hash)
{
if (hash == null)
{
_blockInfoDb.Delete(DeletePointerAddressInDb);
}
else
{
if (_logger.IsInfo) _logger.Info($"Deleting an invalid block or its descendant {hash}");
_blockInfoDb.Set(DeletePointerAddressInDb, hash.Bytes);
}
}
private void UpdateHeadBlock(Block block)
{
if (block.IsGenesis)
{
Genesis = block.Header;
}
Head = block;
_blockInfoDb.Set(HeadAddressInDb, Head.Hash.Bytes);
NewHeadBlock?.Invoke(this, new BlockEventArgs(block));
}
private void UpdateOrCreateLevel(long number, BlockInfo blockInfo, bool setAsMain = false)
{
using (var batch = _chainLevelInfoRepository.StartBatch())
{
ChainLevelInfo level = LoadLevel(number, false);
if (level != null)
{
BlockInfo[] blockInfos = level.BlockInfos;
Array.Resize(ref blockInfos, blockInfos.Length + 1);
if (setAsMain)
{
blockInfos[^1] = blockInfos[0];
blockInfos[0] = blockInfo;
}
else
{
blockInfos[^1] = blockInfo;
}
level.BlockInfos = blockInfos;
}
else
{
if (number > BestKnownNumber)
{
BestKnownNumber = number;
}
level = new ChainLevelInfo(false, new[] {blockInfo});
}
if (setAsMain)
{
level.HasBlockOnMainChain = true;
}
_chainLevelInfoRepository.PersistLevel(number, level, batch);
}
}
private (BlockInfo Info, ChainLevelInfo Level) LoadInfo(long number, Keccak blockHash)
{
ChainLevelInfo chainLevelInfo = LoadLevel(number);
if (chainLevelInfo == null)
{
return (null, null);
}
int? index = FindIndex(blockHash, chainLevelInfo);
return index.HasValue ? (chainLevelInfo.BlockInfos[index.Value], chainLevelInfo) : (null, chainLevelInfo);
}
private int? FindIndex(Keccak blockHash, ChainLevelInfo level)
{
for (int i = 0; i < level.BlockInfos.Length; i++)
{
if (level.BlockInfos[i].BlockHash.Equals(blockHash))
{
return i;
}
}
return null;
}
private ChainLevelInfo LoadLevel(long number, bool forceLoad = true)
{
if (number > BestKnownNumber && !forceLoad)
{
return null;
}
return _chainLevelInfoRepository.LoadLevel(number);
}
/// <summary>
/// To make cache useful even when we handle sync requests
/// </summary>
/// <param name="number"></param>
/// <returns></returns>
private bool ShouldCache(long number)
{
return number == 0L || Head == null || number > Head.Number - CacheSize && number <= Head.Number + 1;
}
public ChainLevelInfo FindLevel(long number)
{
return _chainLevelInfoRepository.LoadLevel(number);
}
public Keccak HeadHash => Head?.Hash;
public Keccak GenesisHash => Genesis?.Hash;
public Keccak PendingHash => Head?.Hash;
public Block FindBlock(Keccak blockHash, BlockTreeLookupOptions options)
{
if (blockHash == null || blockHash == Keccak.Zero)
{
return null;
}
Block block = _blockDb.Get(blockHash, _blockDecoder, _blockCache, false);
if (block == null)
{
return null;
}
bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None;
bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical;
if ((totalDifficultyNeeded && block.TotalDifficulty == null) || requiresCanonical)
{
(BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(block.Number, block.Hash);
if (level == null || blockInfo == null)
{
// TODO: this is here because storing block data is not transactional
// TODO: would be great to remove it, he?
SetTotalDifficulty(block.Header);
blockInfo = new BlockInfo(block.Hash, block.TotalDifficulty.Value);
UpdateOrCreateLevel(block.Number, blockInfo);
(_, level) = LoadInfo(block.Number, block.Hash);
}
else
{
block.Header.TotalDifficulty = blockInfo.TotalDifficulty;
}
if (requiresCanonical)
{
bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true;
block = isMain ? block : null;
}
}
if (block != null && ShouldCache(block.Number))
{
_blockCache.Set(blockHash, block);
_headerCache.Set(blockHash, block.Header);
}
return block;
}
private void SetTotalDifficulty(BlockHeader header)
{
if (header.TotalDifficulty != null)
{
return;
}
if (_logger.IsTrace)
{
_logger.Trace($"Calculating total difficulty for {header}");
}
if (header.Number == 0)
{
header.TotalDifficulty = header.Difficulty;
}
else
{
BlockHeader parentHeader = this.FindParentHeader(header, BlockTreeLookupOptions.None);
if (parentHeader == null)
{
throw new InvalidOperationException($"An orphaned block on the chain {header}");
}
if (parentHeader.TotalDifficulty == null)
{
throw new InvalidOperationException(
$"Parent's {nameof(parentHeader.TotalDifficulty)} unknown when calculating for {header}");
}
header.TotalDifficulty = parentHeader.TotalDifficulty + header.Difficulty;
}
if (_logger.IsTrace)
{
_logger.Trace($"Calculated total difficulty for {header} is {header.TotalDifficulty}");
}
}
public event EventHandler<BlockEventArgs> BlockAddedToMain;
public event EventHandler<BlockEventArgs> NewBestSuggestedBlock;
public event EventHandler<BlockEventArgs> NewHeadBlock;
/// <summary>
/// Can delete a slice of the chain (usually invoked when the chain is corrupted in the DB).
/// This will only allow to delete a slice starting somewhere before the head of the chain
/// and ending somewhere after the head (in case there are some hanging levels later).
/// </summary>
/// <param name="startNumber">Start level of the slice to delete</param>
/// <param name="endNumber">End level of the slice to delete</param>
/// <exception cref="ArgumentException">Thrown when <paramref name="startNumber"/> ot <paramref name="endNumber"/> do not satisfy the slice position rules</exception>
public int DeleteChainSlice(in long startNumber, long? endNumber)
{
int deleted = 0;
endNumber ??= BestKnownNumber;
if (endNumber - startNumber < 0)
{
throw new ArgumentException("Start number must be equal or greater end number.", nameof(startNumber));
}
if (endNumber - startNumber > 50000)
{
throw new ArgumentException($"Cannot delete that many blocks at once (start: {startNumber}, end {endNumber}).", nameof(startNumber));
}
if (startNumber < 1)
{
throw new ArgumentException("Start number must be strictly greater than 0", nameof(startNumber));
}
Block newHeadBlock = null;
// we are running these checks before all the deletes
if (Head.Number >= startNumber)
{
// greater than zero so will not fail
ChainLevelInfo chainLevelInfo = _chainLevelInfoRepository.LoadLevel(startNumber - 1);
// there may be no canonical block marked on this level - then we just hack to genesis
Keccak newHeadHash = chainLevelInfo.HasBlockOnMainChain ? chainLevelInfo.BlockInfos[0].BlockHash : Genesis.Hash;
newHeadBlock = FindBlock(newHeadHash, BlockTreeLookupOptions.None);
}
using (_chainLevelInfoRepository.StartBatch())
{
for (long i = endNumber.Value; i >= startNumber; i--)
{
ChainLevelInfo chainLevelInfo = _chainLevelInfoRepository.LoadLevel(i);
if (chainLevelInfo == null)
{
continue;
}
_chainLevelInfoRepository.Delete(i);
deleted++;
foreach (BlockInfo blockInfo in chainLevelInfo.BlockInfos)
{
Keccak blockHash = blockInfo.BlockHash;
_blockInfoDb.Delete(blockHash);
_blockDb.Delete(blockHash);
_headerDb.Delete(blockHash);
}
}
}
if (newHeadBlock != null)
{
UpdateHeadBlock(newHeadBlock);
}
return deleted;
}
}
} | 1 | 23,887 | if lowest inserted header is non null non zero then pivot should be higher and a better best known on the left. maybe Head?.Number ?? Math.Max(Pivot, LowestInsertedHeader))? | NethermindEth-nethermind | .cs |
@@ -0,0 +1,14 @@
+package azkaban.execapp.fake;
+
+import azkaban.execapp.AzkabanExecutorServer;
+import azkaban.execapp.jmx.JmxJobMBeanManager;
+import azkaban.utils.Props;
+
+public class FakeApp extends AzkabanExecutorServer {
+
+ public FakeApp() throws Exception {
+ super(new Props(), null, null, new FakeServer(), null);
+ JmxJobMBeanManager.getInstance().initialize(new Props());
+ }
+
+} | 1 | 1 | 13,289 | Is a FakeApp class needed? Can test use a Mockito mock instance instead? | azkaban-azkaban | java |
|
@@ -1020,7 +1020,8 @@ VariableSP SymbolFilePDB::ParseVariableForPDBData(
var_sp = std::make_shared<Variable>(
var_uid, var_name.c_str(), mangled_cstr, type_sp, scope, context_scope,
- ranges, &decl, location, is_external, is_artificial, is_static_member);
+ ranges, &decl, location, is_external, is_artificial, is_static_member,
+ is_constant);
var_sp->SetLocationIsConstantValueData(is_constant);
m_variables.insert(std::make_pair(var_uid, var_sp)); | 1 | //===-- SymbolFilePDB.cpp ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "SymbolFilePDB.h"
#include "PDBASTParser.h"
#include "PDBLocationToDWARFExpression.h"
#include "clang/Lex/Lexer.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Symbol/ClangASTContext.h"
#include "lldb/Symbol/CompileUnit.h"
#include "lldb/Symbol/LineTable.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/SymbolContext.h"
#include "lldb/Symbol/SymbolVendor.h"
#include "lldb/Symbol/TypeList.h"
#include "lldb/Symbol/TypeMap.h"
#include "lldb/Symbol/Variable.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/RegularExpression.h"
#include "llvm/DebugInfo/PDB/GenericError.h"
#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"
#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
#include "llvm/DebugInfo/PDB/IPDBTable.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
#include "llvm/DebugInfo/PDB/PDBSymbolBlock.h"
#include "llvm/DebugInfo/PDB/PDBSymbolCompiland.h"
#include "llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h"
#include "llvm/DebugInfo/PDB/PDBSymbolData.h"
#include "llvm/DebugInfo/PDB/PDBSymbolExe.h"
#include "llvm/DebugInfo/PDB/PDBSymbolFunc.h"
#include "llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h"
#include "llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h"
#include "llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h"
#include "Plugins/Language/CPlusPlus/CPlusPlusLanguage.h"
#include "Plugins/Language/CPlusPlus/MSVCUndecoratedNameParser.h"
#include "Plugins/SymbolFile/NativePDB/SymbolFileNativePDB.h"
#include <regex>
using namespace lldb;
using namespace lldb_private;
using namespace llvm::pdb;
namespace {
lldb::LanguageType TranslateLanguage(PDB_Lang lang) {
switch (lang) {
case PDB_Lang::Cpp:
return lldb::LanguageType::eLanguageTypeC_plus_plus;
case PDB_Lang::C:
return lldb::LanguageType::eLanguageTypeC;
case PDB_Lang::Swift:
return lldb::LanguageType::eLanguageTypeSwift;
default:
return lldb::LanguageType::eLanguageTypeUnknown;
}
}
bool ShouldAddLine(uint32_t requested_line, uint32_t actual_line,
uint32_t addr_length) {
return ((requested_line == 0 || actual_line == requested_line) &&
addr_length > 0);
}
} // namespace
static bool ShouldUseNativeReader() {
#if defined(_WIN32)
llvm::StringRef use_native = ::getenv("LLDB_USE_NATIVE_PDB_READER");
return use_native.equals_lower("on") || use_native.equals_lower("yes") ||
use_native.equals_lower("1") || use_native.equals_lower("true");
#else
return true;
#endif
}
void SymbolFilePDB::Initialize() {
if (ShouldUseNativeReader()) {
npdb::SymbolFileNativePDB::Initialize();
} else {
PluginManager::RegisterPlugin(GetPluginNameStatic(),
GetPluginDescriptionStatic(), CreateInstance,
DebuggerInitialize);
}
}
void SymbolFilePDB::Terminate() {
if (ShouldUseNativeReader()) {
npdb::SymbolFileNativePDB::Terminate();
} else {
PluginManager::UnregisterPlugin(CreateInstance);
}
}
void SymbolFilePDB::DebuggerInitialize(lldb_private::Debugger &debugger) {}
lldb_private::ConstString SymbolFilePDB::GetPluginNameStatic() {
static ConstString g_name("pdb");
return g_name;
}
const char *SymbolFilePDB::GetPluginDescriptionStatic() {
return "Microsoft PDB debug symbol file reader.";
}
lldb_private::SymbolFile *
SymbolFilePDB::CreateInstance(ObjectFileSP objfile_sp) {
return new SymbolFilePDB(std::move(objfile_sp));
}
SymbolFilePDB::SymbolFilePDB(lldb::ObjectFileSP objfile_sp)
: SymbolFile(std::move(objfile_sp)), m_session_up(), m_global_scope_up() {}
SymbolFilePDB::~SymbolFilePDB() {}
uint32_t SymbolFilePDB::CalculateAbilities() {
uint32_t abilities = 0;
if (!m_objfile_sp)
return 0;
if (!m_session_up) {
// Lazily load and match the PDB file, but only do this once.
std::string exePath = m_objfile_sp->GetFileSpec().GetPath();
auto error = loadDataForEXE(PDB_ReaderType::DIA, llvm::StringRef(exePath),
m_session_up);
if (error) {
llvm::consumeError(std::move(error));
auto module_sp = m_objfile_sp->GetModule();
if (!module_sp)
return 0;
// See if any symbol file is specified through `--symfile` option.
FileSpec symfile = module_sp->GetSymbolFileFileSpec();
if (!symfile)
return 0;
error = loadDataForPDB(PDB_ReaderType::DIA,
llvm::StringRef(symfile.GetPath()), m_session_up);
if (error) {
llvm::consumeError(std::move(error));
return 0;
}
}
}
if (!m_session_up)
return 0;
auto enum_tables_up = m_session_up->getEnumTables();
if (!enum_tables_up)
return 0;
while (auto table_up = enum_tables_up->getNext()) {
if (table_up->getItemCount() == 0)
continue;
auto type = table_up->getTableType();
switch (type) {
case PDB_TableType::Symbols:
// This table represents a store of symbols with types listed in
// PDBSym_Type
abilities |= (CompileUnits | Functions | Blocks | GlobalVariables |
LocalVariables | VariableTypes);
break;
case PDB_TableType::LineNumbers:
abilities |= LineTables;
break;
default:
break;
}
}
return abilities;
}
void SymbolFilePDB::InitializeObject() {
lldb::addr_t obj_load_address =
m_objfile_sp->GetBaseAddress().GetFileAddress();
lldbassert(obj_load_address && obj_load_address != LLDB_INVALID_ADDRESS);
m_session_up->setLoadAddress(obj_load_address);
if (!m_global_scope_up)
m_global_scope_up = m_session_up->getGlobalScope();
lldbassert(m_global_scope_up.get());
}
uint32_t SymbolFilePDB::CalculateNumCompileUnits() {
auto compilands = m_global_scope_up->findAllChildren<PDBSymbolCompiland>();
if (!compilands)
return 0;
// The linker could link *.dll (compiland language = LINK), or import
// *.dll. For example, a compiland with name `Import:KERNEL32.dll` could be
// found as a child of the global scope (PDB executable). Usually, such
// compilands contain `thunk` symbols in which we are not interested for
// now. However we still count them in the compiland list. If we perform
// any compiland related activity, like finding symbols through
// llvm::pdb::IPDBSession methods, such compilands will all be searched
// automatically no matter whether we include them or not.
uint32_t compile_unit_count = compilands->getChildCount();
// The linker can inject an additional "dummy" compilation unit into the
// PDB. Ignore this special compile unit for our purposes, if it is there.
// It is always the last one.
auto last_compiland_up = compilands->getChildAtIndex(compile_unit_count - 1);
lldbassert(last_compiland_up.get());
std::string name = last_compiland_up->getName();
if (name == "* Linker *")
--compile_unit_count;
return compile_unit_count;
}
void SymbolFilePDB::GetCompileUnitIndex(
const llvm::pdb::PDBSymbolCompiland &pdb_compiland, uint32_t &index) {
auto results_up = m_global_scope_up->findAllChildren<PDBSymbolCompiland>();
if (!results_up)
return;
auto uid = pdb_compiland.getSymIndexId();
for (uint32_t cu_idx = 0; cu_idx < GetNumCompileUnits(); ++cu_idx) {
auto compiland_up = results_up->getChildAtIndex(cu_idx);
if (!compiland_up)
continue;
if (compiland_up->getSymIndexId() == uid) {
index = cu_idx;
return;
}
}
index = UINT32_MAX;
return;
}
std::unique_ptr<llvm::pdb::PDBSymbolCompiland>
SymbolFilePDB::GetPDBCompilandByUID(uint32_t uid) {
return m_session_up->getConcreteSymbolById<PDBSymbolCompiland>(uid);
}
lldb::CompUnitSP SymbolFilePDB::ParseCompileUnitAtIndex(uint32_t index) {
if (index >= GetNumCompileUnits())
return CompUnitSP();
// Assuming we always retrieve same compilands listed in same order through
// `PDBSymbolExe::findAllChildren` method, otherwise using `index` to get a
// compile unit makes no sense.
auto results = m_global_scope_up->findAllChildren<PDBSymbolCompiland>();
if (!results)
return CompUnitSP();
auto compiland_up = results->getChildAtIndex(index);
if (!compiland_up)
return CompUnitSP();
return ParseCompileUnitForUID(compiland_up->getSymIndexId(), index);
}
lldb::LanguageType SymbolFilePDB::ParseLanguage(CompileUnit &comp_unit) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
auto compiland_up = GetPDBCompilandByUID(comp_unit.GetID());
if (!compiland_up)
return lldb::eLanguageTypeUnknown;
auto details = compiland_up->findOneChild<PDBSymbolCompilandDetails>();
if (!details)
return lldb::eLanguageTypeUnknown;
return TranslateLanguage(details->getLanguage());
}
lldb_private::Function *
SymbolFilePDB::ParseCompileUnitFunctionForPDBFunc(const PDBSymbolFunc &pdb_func,
CompileUnit &comp_unit) {
if (FunctionSP result = comp_unit.FindFunctionByUID(pdb_func.getSymIndexId()))
return result.get();
auto file_vm_addr = pdb_func.getVirtualAddress();
if (file_vm_addr == LLDB_INVALID_ADDRESS || file_vm_addr == 0)
return nullptr;
auto func_length = pdb_func.getLength();
AddressRange func_range =
AddressRange(file_vm_addr, func_length,
GetObjectFile()->GetModule()->GetSectionList());
if (!func_range.GetBaseAddress().IsValid())
return nullptr;
lldb_private::Type *func_type = ResolveTypeUID(pdb_func.getSymIndexId());
if (!func_type)
return nullptr;
user_id_t func_type_uid = pdb_func.getSignatureId();
Mangled mangled = GetMangledForPDBFunc(pdb_func);
FunctionSP func_sp =
std::make_shared<Function>(&comp_unit, pdb_func.getSymIndexId(),
func_type_uid, mangled, func_type, func_range);
comp_unit.AddFunction(func_sp);
LanguageType lang = ParseLanguage(comp_unit);
auto type_system_or_err = GetTypeSystemForLanguage(lang);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to parse PDBFunc");
return nullptr;
}
ClangASTContext *clang_type_system =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_type_system)
return nullptr;
clang_type_system->GetPDBParser()->GetDeclForSymbol(pdb_func);
return func_sp.get();
}
size_t SymbolFilePDB::ParseFunctions(CompileUnit &comp_unit) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
size_t func_added = 0;
auto compiland_up = GetPDBCompilandByUID(comp_unit.GetID());
if (!compiland_up)
return 0;
auto results_up = compiland_up->findAllChildren<PDBSymbolFunc>();
if (!results_up)
return 0;
while (auto pdb_func_up = results_up->getNext()) {
auto func_sp = comp_unit.FindFunctionByUID(pdb_func_up->getSymIndexId());
if (!func_sp) {
if (ParseCompileUnitFunctionForPDBFunc(*pdb_func_up, comp_unit))
++func_added;
}
}
return func_added;
}
bool SymbolFilePDB::ParseLineTable(CompileUnit &comp_unit) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (comp_unit.GetLineTable())
return true;
return ParseCompileUnitLineTable(comp_unit, 0);
}
bool SymbolFilePDB::ParseDebugMacros(CompileUnit &comp_unit) {
// PDB doesn't contain information about macros
return false;
}
bool SymbolFilePDB::ParseSupportFiles(
CompileUnit &comp_unit, lldb_private::FileSpecList &support_files) {
// In theory this is unnecessary work for us, because all of this information
// is easily (and quickly) accessible from DebugInfoPDB, so caching it a
// second time seems like a waste. Unfortunately, there's no good way around
// this short of a moderate refactor since SymbolVendor depends on being able
// to cache this list.
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
auto compiland_up = GetPDBCompilandByUID(comp_unit.GetID());
if (!compiland_up)
return false;
auto files = m_session_up->getSourceFilesForCompiland(*compiland_up);
if (!files || files->getChildCount() == 0)
return false;
while (auto file = files->getNext()) {
FileSpec spec(file->getFileName(), FileSpec::Style::windows);
support_files.AppendIfUnique(spec);
}
// LLDB uses the DWARF-like file numeration (one based),
// the zeroth file is the compile unit itself
support_files.Insert(0, comp_unit);
return true;
}
bool SymbolFilePDB::ParseImportedModules(
const lldb_private::SymbolContext &sc,
std::vector<SourceModule> &imported_modules) {
// PDB does not yet support module debug info
return false;
}
static size_t ParseFunctionBlocksForPDBSymbol(
uint64_t func_file_vm_addr, const llvm::pdb::PDBSymbol *pdb_symbol,
lldb_private::Block *parent_block, bool is_top_parent) {
assert(pdb_symbol && parent_block);
size_t num_added = 0;
switch (pdb_symbol->getSymTag()) {
case PDB_SymType::Block:
case PDB_SymType::Function: {
Block *block = nullptr;
auto &raw_sym = pdb_symbol->getRawSymbol();
if (auto *pdb_func = llvm::dyn_cast<PDBSymbolFunc>(pdb_symbol)) {
if (pdb_func->hasNoInlineAttribute())
break;
if (is_top_parent)
block = parent_block;
else
break;
} else if (llvm::dyn_cast<PDBSymbolBlock>(pdb_symbol)) {
auto uid = pdb_symbol->getSymIndexId();
if (parent_block->FindBlockByID(uid))
break;
if (raw_sym.getVirtualAddress() < func_file_vm_addr)
break;
auto block_sp = std::make_shared<Block>(pdb_symbol->getSymIndexId());
parent_block->AddChild(block_sp);
block = block_sp.get();
} else
llvm_unreachable("Unexpected PDB symbol!");
block->AddRange(Block::Range(
raw_sym.getVirtualAddress() - func_file_vm_addr, raw_sym.getLength()));
block->FinalizeRanges();
++num_added;
auto results_up = pdb_symbol->findAllChildren();
if (!results_up)
break;
while (auto symbol_up = results_up->getNext()) {
num_added += ParseFunctionBlocksForPDBSymbol(
func_file_vm_addr, symbol_up.get(), block, false);
}
} break;
default:
break;
}
return num_added;
}
size_t SymbolFilePDB::ParseBlocksRecursive(Function &func) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
size_t num_added = 0;
auto uid = func.GetID();
auto pdb_func_up = m_session_up->getConcreteSymbolById<PDBSymbolFunc>(uid);
if (!pdb_func_up)
return 0;
Block &parent_block = func.GetBlock(false);
num_added = ParseFunctionBlocksForPDBSymbol(
pdb_func_up->getVirtualAddress(), pdb_func_up.get(), &parent_block, true);
return num_added;
}
size_t SymbolFilePDB::ParseTypes(CompileUnit &comp_unit) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
size_t num_added = 0;
auto compiland = GetPDBCompilandByUID(comp_unit.GetID());
if (!compiland)
return 0;
auto ParseTypesByTagFn = [&num_added, this](const PDBSymbol &raw_sym) {
std::unique_ptr<IPDBEnumSymbols> results;
PDB_SymType tags_to_search[] = {PDB_SymType::Enum, PDB_SymType::Typedef,
PDB_SymType::UDT};
for (auto tag : tags_to_search) {
results = raw_sym.findAllChildren(tag);
if (!results || results->getChildCount() == 0)
continue;
while (auto symbol = results->getNext()) {
switch (symbol->getSymTag()) {
case PDB_SymType::Enum:
case PDB_SymType::UDT:
case PDB_SymType::Typedef:
break;
default:
continue;
}
// This should cause the type to get cached and stored in the `m_types`
// lookup.
if (auto type = ResolveTypeUID(symbol->getSymIndexId())) {
// Resolve the type completely to avoid a completion
// (and so a list change, which causes an iterators invalidation)
// during a TypeList dumping
type->GetFullCompilerType();
++num_added;
}
}
}
};
ParseTypesByTagFn(*compiland);
// Also parse global types particularly coming from this compiland.
// Unfortunately, PDB has no compiland information for each global type. We
// have to parse them all. But ensure we only do this once.
static bool parse_all_global_types = false;
if (!parse_all_global_types) {
ParseTypesByTagFn(*m_global_scope_up);
parse_all_global_types = true;
}
return num_added;
}
size_t
SymbolFilePDB::ParseVariablesForContext(const lldb_private::SymbolContext &sc) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (!sc.comp_unit)
return 0;
size_t num_added = 0;
if (sc.function) {
auto pdb_func = m_session_up->getConcreteSymbolById<PDBSymbolFunc>(
sc.function->GetID());
if (!pdb_func)
return 0;
num_added += ParseVariables(sc, *pdb_func);
sc.function->GetBlock(false).SetDidParseVariables(true, true);
} else if (sc.comp_unit) {
auto compiland = GetPDBCompilandByUID(sc.comp_unit->GetID());
if (!compiland)
return 0;
if (sc.comp_unit->GetVariableList(false))
return 0;
auto results = m_global_scope_up->findAllChildren<PDBSymbolData>();
if (results && results->getChildCount()) {
while (auto result = results->getNext()) {
auto cu_id = GetCompilandId(*result);
// FIXME: We are not able to determine variable's compile unit.
if (cu_id == 0)
continue;
if (cu_id == sc.comp_unit->GetID())
num_added += ParseVariables(sc, *result);
}
}
// FIXME: A `file static` or `global constant` variable appears both in
// compiland's children and global scope's children with unexpectedly
// different symbol's Id making it ambiguous.
// FIXME: 'local constant', for example, const char var[] = "abc", declared
// in a function scope, can't be found in PDB.
// Parse variables in this compiland.
num_added += ParseVariables(sc, *compiland);
}
return num_added;
}
lldb_private::Type *SymbolFilePDB::ResolveTypeUID(lldb::user_id_t type_uid) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
auto find_result = m_types.find(type_uid);
if (find_result != m_types.end())
return find_result->second.get();
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to ResolveTypeUID");
return nullptr;
}
ClangASTContext *clang_type_system =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_type_system)
return nullptr;
PDBASTParser *pdb = clang_type_system->GetPDBParser();
if (!pdb)
return nullptr;
auto pdb_type = m_session_up->getSymbolById(type_uid);
if (pdb_type == nullptr)
return nullptr;
lldb::TypeSP result = pdb->CreateLLDBTypeFromPDBType(*pdb_type);
if (result) {
m_types.insert(std::make_pair(type_uid, result));
GetTypeList().Insert(result);
}
return result.get();
}
llvm::Optional<SymbolFile::ArrayInfo> SymbolFilePDB::GetDynamicArrayInfoForUID(
lldb::user_id_t type_uid, const lldb_private::ExecutionContext *exe_ctx) {
return llvm::None;
}
bool SymbolFilePDB::CompleteType(lldb_private::CompilerType &compiler_type) {
std::lock_guard<std::recursive_mutex> guard(
GetObjectFile()->GetModule()->GetMutex());
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to get dynamic array info for UID");
return false;
}
ClangASTContext *clang_ast_ctx =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_ast_ctx)
return false;
PDBASTParser *pdb = clang_ast_ctx->GetPDBParser();
if (!pdb)
return false;
return pdb->CompleteTypeFromPDB(compiler_type);
}
lldb_private::CompilerDecl SymbolFilePDB::GetDeclForUID(lldb::user_id_t uid) {
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to get decl for UID");
return CompilerDecl();
}
ClangASTContext *clang_ast_ctx =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_ast_ctx)
return CompilerDecl();
PDBASTParser *pdb = clang_ast_ctx->GetPDBParser();
if (!pdb)
return CompilerDecl();
auto symbol = m_session_up->getSymbolById(uid);
if (!symbol)
return CompilerDecl();
auto decl = pdb->GetDeclForSymbol(*symbol);
if (!decl)
return CompilerDecl();
return CompilerDecl(clang_ast_ctx, decl);
}
lldb_private::CompilerDeclContext
SymbolFilePDB::GetDeclContextForUID(lldb::user_id_t uid) {
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to get DeclContext for UID");
return CompilerDeclContext();
}
ClangASTContext *clang_ast_ctx =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_ast_ctx)
return CompilerDeclContext();
PDBASTParser *pdb = clang_ast_ctx->GetPDBParser();
if (!pdb)
return CompilerDeclContext();
auto symbol = m_session_up->getSymbolById(uid);
if (!symbol)
return CompilerDeclContext();
auto decl_context = pdb->GetDeclContextForSymbol(*symbol);
if (!decl_context)
return GetDeclContextContainingUID(uid);
return CompilerDeclContext(clang_ast_ctx, decl_context);
}
lldb_private::CompilerDeclContext
SymbolFilePDB::GetDeclContextContainingUID(lldb::user_id_t uid) {
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to get DeclContext containing UID");
return CompilerDeclContext();
}
ClangASTContext *clang_ast_ctx =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_ast_ctx)
return CompilerDeclContext();
PDBASTParser *pdb = clang_ast_ctx->GetPDBParser();
if (!pdb)
return CompilerDeclContext();
auto symbol = m_session_up->getSymbolById(uid);
if (!symbol)
return CompilerDeclContext();
auto decl_context = pdb->GetDeclContextContainingSymbol(*symbol);
assert(decl_context);
return CompilerDeclContext(clang_ast_ctx, decl_context);
}
void SymbolFilePDB::ParseDeclsForContext(
lldb_private::CompilerDeclContext decl_ctx) {
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to parse decls for context");
return;
}
ClangASTContext *clang_ast_ctx =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_ast_ctx)
return;
PDBASTParser *pdb = clang_ast_ctx->GetPDBParser();
if (!pdb)
return;
pdb->ParseDeclsForDeclContext(
static_cast<clang::DeclContext *>(decl_ctx.GetOpaqueDeclContext()));
}
uint32_t
SymbolFilePDB::ResolveSymbolContext(const lldb_private::Address &so_addr,
SymbolContextItem resolve_scope,
lldb_private::SymbolContext &sc) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
uint32_t resolved_flags = 0;
if (resolve_scope & eSymbolContextCompUnit ||
resolve_scope & eSymbolContextVariable ||
resolve_scope & eSymbolContextFunction ||
resolve_scope & eSymbolContextBlock ||
resolve_scope & eSymbolContextLineEntry) {
auto cu_sp = GetCompileUnitContainsAddress(so_addr);
if (!cu_sp) {
if (resolved_flags & eSymbolContextVariable) {
// TODO: Resolve variables
}
return 0;
}
sc.comp_unit = cu_sp.get();
resolved_flags |= eSymbolContextCompUnit;
lldbassert(sc.module_sp == cu_sp->GetModule());
}
if (resolve_scope & eSymbolContextFunction ||
resolve_scope & eSymbolContextBlock) {
addr_t file_vm_addr = so_addr.GetFileAddress();
auto symbol_up =
m_session_up->findSymbolByAddress(file_vm_addr, PDB_SymType::Function);
if (symbol_up) {
auto *pdb_func = llvm::dyn_cast<PDBSymbolFunc>(symbol_up.get());
assert(pdb_func);
auto func_uid = pdb_func->getSymIndexId();
sc.function = sc.comp_unit->FindFunctionByUID(func_uid).get();
if (sc.function == nullptr)
sc.function =
ParseCompileUnitFunctionForPDBFunc(*pdb_func, *sc.comp_unit);
if (sc.function) {
resolved_flags |= eSymbolContextFunction;
if (resolve_scope & eSymbolContextBlock) {
auto block_symbol = m_session_up->findSymbolByAddress(
file_vm_addr, PDB_SymType::Block);
auto block_id = block_symbol ? block_symbol->getSymIndexId()
: sc.function->GetID();
sc.block = sc.function->GetBlock(true).FindBlockByID(block_id);
if (sc.block)
resolved_flags |= eSymbolContextBlock;
}
}
}
}
if (resolve_scope & eSymbolContextLineEntry) {
if (auto *line_table = sc.comp_unit->GetLineTable()) {
Address addr(so_addr);
if (line_table->FindLineEntryByAddress(addr, sc.line_entry))
resolved_flags |= eSymbolContextLineEntry;
}
}
return resolved_flags;
}
uint32_t SymbolFilePDB::ResolveSymbolContext(
const lldb_private::FileSpec &file_spec, uint32_t line, bool check_inlines,
SymbolContextItem resolve_scope, lldb_private::SymbolContextList &sc_list) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
const size_t old_size = sc_list.GetSize();
if (resolve_scope & lldb::eSymbolContextCompUnit) {
// Locate all compilation units with line numbers referencing the specified
// file. For example, if `file_spec` is <vector>, then this should return
// all source files and header files that reference <vector>, either
// directly or indirectly.
auto compilands = m_session_up->findCompilandsForSourceFile(
file_spec.GetPath(), PDB_NameSearchFlags::NS_CaseInsensitive);
if (!compilands)
return 0;
// For each one, either find its previously parsed data or parse it afresh
// and add it to the symbol context list.
while (auto compiland = compilands->getNext()) {
// If we're not checking inlines, then don't add line information for
// this file unless the FileSpec matches. For inline functions, we don't
// have to match the FileSpec since they could be defined in headers
// other than file specified in FileSpec.
if (!check_inlines) {
std::string source_file = compiland->getSourceFileFullPath();
if (source_file.empty())
continue;
FileSpec this_spec(source_file, FileSpec::Style::windows);
bool need_full_match = !file_spec.GetDirectory().IsEmpty();
if (FileSpec::Compare(file_spec, this_spec, need_full_match) != 0)
continue;
}
SymbolContext sc;
auto cu = ParseCompileUnitForUID(compiland->getSymIndexId());
if (!cu)
continue;
sc.comp_unit = cu.get();
sc.module_sp = cu->GetModule();
// If we were asked to resolve line entries, add all entries to the line
// table that match the requested line (or all lines if `line` == 0).
if (resolve_scope & (eSymbolContextFunction | eSymbolContextBlock |
eSymbolContextLineEntry)) {
bool has_line_table = ParseCompileUnitLineTable(*sc.comp_unit, line);
if ((resolve_scope & eSymbolContextLineEntry) && !has_line_table) {
// The query asks for line entries, but we can't get them for the
// compile unit. This is not normal for `line` = 0. So just assert
// it.
assert(line && "Couldn't get all line entries!\n");
// Current compiland does not have the requested line. Search next.
continue;
}
if (resolve_scope & (eSymbolContextFunction | eSymbolContextBlock)) {
if (!has_line_table)
continue;
auto *line_table = sc.comp_unit->GetLineTable();
lldbassert(line_table);
uint32_t num_line_entries = line_table->GetSize();
// Skip the terminal line entry.
--num_line_entries;
// If `line `!= 0, see if we can resolve function for each line entry
// in the line table.
for (uint32_t line_idx = 0; line && line_idx < num_line_entries;
++line_idx) {
if (!line_table->GetLineEntryAtIndex(line_idx, sc.line_entry))
continue;
auto file_vm_addr =
sc.line_entry.range.GetBaseAddress().GetFileAddress();
if (file_vm_addr == LLDB_INVALID_ADDRESS || file_vm_addr == 0)
continue;
auto symbol_up = m_session_up->findSymbolByAddress(
file_vm_addr, PDB_SymType::Function);
if (symbol_up) {
auto func_uid = symbol_up->getSymIndexId();
sc.function = sc.comp_unit->FindFunctionByUID(func_uid).get();
if (sc.function == nullptr) {
auto pdb_func = llvm::dyn_cast<PDBSymbolFunc>(symbol_up.get());
assert(pdb_func);
sc.function = ParseCompileUnitFunctionForPDBFunc(*pdb_func,
*sc.comp_unit);
}
if (sc.function && (resolve_scope & eSymbolContextBlock)) {
Block &block = sc.function->GetBlock(true);
sc.block = block.FindBlockByID(sc.function->GetID());
}
}
sc_list.Append(sc);
}
} else if (has_line_table) {
// We can parse line table for the compile unit. But no query to
// resolve function or block. We append `sc` to the list anyway.
sc_list.Append(sc);
}
} else {
// No query for line entry, function or block. But we have a valid
// compile unit, append `sc` to the list.
sc_list.Append(sc);
}
}
}
return sc_list.GetSize() - old_size;
}
std::string SymbolFilePDB::GetMangledForPDBData(const PDBSymbolData &pdb_data) {
// Cache public names at first
if (m_public_names.empty())
if (auto result_up =
m_global_scope_up->findAllChildren(PDB_SymType::PublicSymbol))
while (auto symbol_up = result_up->getNext())
if (auto addr = symbol_up->getRawSymbol().getVirtualAddress())
m_public_names[addr] = symbol_up->getRawSymbol().getName();
// Look up the name in the cache
return m_public_names.lookup(pdb_data.getVirtualAddress());
}
VariableSP SymbolFilePDB::ParseVariableForPDBData(
const lldb_private::SymbolContext &sc,
const llvm::pdb::PDBSymbolData &pdb_data) {
VariableSP var_sp;
uint32_t var_uid = pdb_data.getSymIndexId();
auto result = m_variables.find(var_uid);
if (result != m_variables.end())
return result->second;
ValueType scope = eValueTypeInvalid;
bool is_static_member = false;
bool is_external = false;
bool is_artificial = false;
switch (pdb_data.getDataKind()) {
case PDB_DataKind::Global:
scope = eValueTypeVariableGlobal;
is_external = true;
break;
case PDB_DataKind::Local:
scope = eValueTypeVariableLocal;
break;
case PDB_DataKind::FileStatic:
scope = eValueTypeVariableStatic;
break;
case PDB_DataKind::StaticMember:
is_static_member = true;
scope = eValueTypeVariableStatic;
break;
case PDB_DataKind::Member:
scope = eValueTypeVariableStatic;
break;
case PDB_DataKind::Param:
scope = eValueTypeVariableArgument;
break;
case PDB_DataKind::Constant:
scope = eValueTypeConstResult;
break;
default:
break;
}
switch (pdb_data.getLocationType()) {
case PDB_LocType::TLS:
scope = eValueTypeVariableThreadLocal;
break;
case PDB_LocType::RegRel: {
// It is a `this` pointer.
if (pdb_data.getDataKind() == PDB_DataKind::ObjectPtr) {
scope = eValueTypeVariableArgument;
is_artificial = true;
}
} break;
default:
break;
}
Declaration decl;
if (!is_artificial && !pdb_data.isCompilerGenerated()) {
if (auto lines = pdb_data.getLineNumbers()) {
if (auto first_line = lines->getNext()) {
uint32_t src_file_id = first_line->getSourceFileId();
auto src_file = m_session_up->getSourceFileById(src_file_id);
if (src_file) {
FileSpec spec(src_file->getFileName());
decl.SetFile(spec);
decl.SetColumn(first_line->getColumnNumber());
decl.SetLine(first_line->getLineNumber());
}
}
}
}
Variable::RangeList ranges;
SymbolContextScope *context_scope = sc.comp_unit;
if (scope == eValueTypeVariableLocal || scope == eValueTypeVariableArgument) {
if (sc.function) {
Block &function_block = sc.function->GetBlock(true);
Block *block =
function_block.FindBlockByID(pdb_data.getLexicalParentId());
if (!block)
block = &function_block;
context_scope = block;
for (size_t i = 0, num_ranges = block->GetNumRanges(); i < num_ranges;
++i) {
AddressRange range;
if (!block->GetRangeAtIndex(i, range))
continue;
ranges.Append(range.GetBaseAddress().GetFileAddress(),
range.GetByteSize());
}
}
}
SymbolFileTypeSP type_sp =
std::make_shared<SymbolFileType>(*this, pdb_data.getTypeId());
auto var_name = pdb_data.getName();
auto mangled = GetMangledForPDBData(pdb_data);
auto mangled_cstr = mangled.empty() ? nullptr : mangled.c_str();
bool is_constant;
DWARFExpression location = ConvertPDBLocationToDWARFExpression(
GetObjectFile()->GetModule(), pdb_data, ranges, is_constant);
var_sp = std::make_shared<Variable>(
var_uid, var_name.c_str(), mangled_cstr, type_sp, scope, context_scope,
ranges, &decl, location, is_external, is_artificial, is_static_member);
var_sp->SetLocationIsConstantValueData(is_constant);
m_variables.insert(std::make_pair(var_uid, var_sp));
return var_sp;
}
size_t
SymbolFilePDB::ParseVariables(const lldb_private::SymbolContext &sc,
const llvm::pdb::PDBSymbol &pdb_symbol,
lldb_private::VariableList *variable_list) {
size_t num_added = 0;
if (auto pdb_data = llvm::dyn_cast<PDBSymbolData>(&pdb_symbol)) {
VariableListSP local_variable_list_sp;
auto result = m_variables.find(pdb_data->getSymIndexId());
if (result != m_variables.end()) {
if (variable_list)
variable_list->AddVariableIfUnique(result->second);
} else {
// Prepare right VariableList for this variable.
if (auto lexical_parent = pdb_data->getLexicalParent()) {
switch (lexical_parent->getSymTag()) {
case PDB_SymType::Exe:
assert(sc.comp_unit);
LLVM_FALLTHROUGH;
case PDB_SymType::Compiland: {
if (sc.comp_unit) {
local_variable_list_sp = sc.comp_unit->GetVariableList(false);
if (!local_variable_list_sp) {
local_variable_list_sp = std::make_shared<VariableList>();
sc.comp_unit->SetVariableList(local_variable_list_sp);
}
}
} break;
case PDB_SymType::Block:
case PDB_SymType::Function: {
if (sc.function) {
Block *block = sc.function->GetBlock(true).FindBlockByID(
lexical_parent->getSymIndexId());
if (block) {
local_variable_list_sp = block->GetBlockVariableList(false);
if (!local_variable_list_sp) {
local_variable_list_sp = std::make_shared<VariableList>();
block->SetVariableList(local_variable_list_sp);
}
}
}
} break;
default:
break;
}
}
if (local_variable_list_sp) {
if (auto var_sp = ParseVariableForPDBData(sc, *pdb_data)) {
local_variable_list_sp->AddVariableIfUnique(var_sp);
if (variable_list)
variable_list->AddVariableIfUnique(var_sp);
++num_added;
PDBASTParser *ast = GetPDBAstParser();
if (ast)
ast->GetDeclForSymbol(*pdb_data);
}
}
}
}
if (auto results = pdb_symbol.findAllChildren()) {
while (auto result = results->getNext())
num_added += ParseVariables(sc, *result, variable_list);
}
return num_added;
}
uint32_t SymbolFilePDB::FindGlobalVariables(
lldb_private::ConstString name,
const lldb_private::CompilerDeclContext *parent_decl_ctx,
uint32_t max_matches, lldb_private::VariableList &variables) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (!DeclContextMatchesThisSymbolFile(parent_decl_ctx))
return 0;
if (name.IsEmpty())
return 0;
auto results = m_global_scope_up->findAllChildren<PDBSymbolData>();
if (!results)
return 0;
uint32_t matches = 0;
size_t old_size = variables.GetSize();
while (auto result = results->getNext()) {
auto pdb_data = llvm::dyn_cast<PDBSymbolData>(result.get());
if (max_matches > 0 && matches >= max_matches)
break;
SymbolContext sc;
sc.module_sp = m_objfile_sp->GetModule();
lldbassert(sc.module_sp.get());
if (!name.GetStringRef().equals(
MSVCUndecoratedNameParser::DropScope(pdb_data->getName())))
continue;
sc.comp_unit = ParseCompileUnitForUID(GetCompilandId(*pdb_data)).get();
// FIXME: We are not able to determine the compile unit.
if (sc.comp_unit == nullptr)
continue;
if (parent_decl_ctx && GetDeclContextContainingUID(
result->getSymIndexId()) != *parent_decl_ctx)
continue;
ParseVariables(sc, *pdb_data, &variables);
matches = variables.GetSize() - old_size;
}
return matches;
}
uint32_t
SymbolFilePDB::FindGlobalVariables(const lldb_private::RegularExpression ®ex,
uint32_t max_matches,
lldb_private::VariableList &variables) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (!regex.IsValid())
return 0;
auto results = m_global_scope_up->findAllChildren<PDBSymbolData>();
if (!results)
return 0;
uint32_t matches = 0;
size_t old_size = variables.GetSize();
while (auto pdb_data = results->getNext()) {
if (max_matches > 0 && matches >= max_matches)
break;
auto var_name = pdb_data->getName();
if (var_name.empty())
continue;
if (!regex.Execute(var_name))
continue;
SymbolContext sc;
sc.module_sp = m_objfile_sp->GetModule();
lldbassert(sc.module_sp.get());
sc.comp_unit = ParseCompileUnitForUID(GetCompilandId(*pdb_data)).get();
// FIXME: We are not able to determine the compile unit.
if (sc.comp_unit == nullptr)
continue;
ParseVariables(sc, *pdb_data, &variables);
matches = variables.GetSize() - old_size;
}
return matches;
}
bool SymbolFilePDB::ResolveFunction(const llvm::pdb::PDBSymbolFunc &pdb_func,
bool include_inlines,
lldb_private::SymbolContextList &sc_list) {
lldb_private::SymbolContext sc;
sc.comp_unit = ParseCompileUnitForUID(pdb_func.getCompilandId()).get();
if (!sc.comp_unit)
return false;
sc.module_sp = sc.comp_unit->GetModule();
sc.function = ParseCompileUnitFunctionForPDBFunc(pdb_func, *sc.comp_unit);
if (!sc.function)
return false;
sc_list.Append(sc);
return true;
}
bool SymbolFilePDB::ResolveFunction(uint32_t uid, bool include_inlines,
lldb_private::SymbolContextList &sc_list) {
auto pdb_func_up = m_session_up->getConcreteSymbolById<PDBSymbolFunc>(uid);
if (!pdb_func_up && !(include_inlines && pdb_func_up->hasInlineAttribute()))
return false;
return ResolveFunction(*pdb_func_up, include_inlines, sc_list);
}
void SymbolFilePDB::CacheFunctionNames() {
if (!m_func_full_names.IsEmpty())
return;
std::map<uint64_t, uint32_t> addr_ids;
if (auto results_up = m_global_scope_up->findAllChildren<PDBSymbolFunc>()) {
while (auto pdb_func_up = results_up->getNext()) {
if (pdb_func_up->isCompilerGenerated())
continue;
auto name = pdb_func_up->getName();
auto demangled_name = pdb_func_up->getUndecoratedName();
if (name.empty() && demangled_name.empty())
continue;
auto uid = pdb_func_up->getSymIndexId();
if (!demangled_name.empty() && pdb_func_up->getVirtualAddress())
addr_ids.insert(std::make_pair(pdb_func_up->getVirtualAddress(), uid));
if (auto parent = pdb_func_up->getClassParent()) {
// PDB have symbols for class/struct methods or static methods in Enum
// Class. We won't bother to check if the parent is UDT or Enum here.
m_func_method_names.Append(ConstString(name), uid);
// To search a method name, like NS::Class:MemberFunc, LLDB searches
// its base name, i.e. MemberFunc by default. Since PDBSymbolFunc does
// not have inforamtion of this, we extract base names and cache them
// by our own effort.
llvm::StringRef basename = MSVCUndecoratedNameParser::DropScope(name);
if (!basename.empty())
m_func_base_names.Append(ConstString(basename), uid);
else {
m_func_base_names.Append(ConstString(name), uid);
}
if (!demangled_name.empty())
m_func_full_names.Append(ConstString(demangled_name), uid);
} else {
// Handle not-method symbols.
// The function name might contain namespace, or its lexical scope.
llvm::StringRef basename = MSVCUndecoratedNameParser::DropScope(name);
if (!basename.empty())
m_func_base_names.Append(ConstString(basename), uid);
else
m_func_base_names.Append(ConstString(name), uid);
if (name == "main") {
m_func_full_names.Append(ConstString(name), uid);
if (!demangled_name.empty() && name != demangled_name) {
m_func_full_names.Append(ConstString(demangled_name), uid);
m_func_base_names.Append(ConstString(demangled_name), uid);
}
} else if (!demangled_name.empty()) {
m_func_full_names.Append(ConstString(demangled_name), uid);
} else {
m_func_full_names.Append(ConstString(name), uid);
}
}
}
}
if (auto results_up =
m_global_scope_up->findAllChildren<PDBSymbolPublicSymbol>()) {
while (auto pub_sym_up = results_up->getNext()) {
if (!pub_sym_up->isFunction())
continue;
auto name = pub_sym_up->getName();
if (name.empty())
continue;
if (CPlusPlusLanguage::IsCPPMangledName(name.c_str())) {
auto vm_addr = pub_sym_up->getVirtualAddress();
// PDB public symbol has mangled name for its associated function.
if (vm_addr && addr_ids.find(vm_addr) != addr_ids.end()) {
// Cache mangled name.
m_func_full_names.Append(ConstString(name), addr_ids[vm_addr]);
}
}
}
}
// Sort them before value searching is working properly
m_func_full_names.Sort();
m_func_full_names.SizeToFit();
m_func_method_names.Sort();
m_func_method_names.SizeToFit();
m_func_base_names.Sort();
m_func_base_names.SizeToFit();
}
uint32_t SymbolFilePDB::FindFunctions(
lldb_private::ConstString name,
const lldb_private::CompilerDeclContext *parent_decl_ctx,
FunctionNameType name_type_mask, bool include_inlines, bool append,
lldb_private::SymbolContextList &sc_list) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (!append)
sc_list.Clear();
lldbassert((name_type_mask & eFunctionNameTypeAuto) == 0);
if (name_type_mask == eFunctionNameTypeNone)
return 0;
if (!DeclContextMatchesThisSymbolFile(parent_decl_ctx))
return 0;
if (name.IsEmpty())
return 0;
auto old_size = sc_list.GetSize();
if (name_type_mask & eFunctionNameTypeFull ||
name_type_mask & eFunctionNameTypeBase ||
name_type_mask & eFunctionNameTypeMethod) {
CacheFunctionNames();
std::set<uint32_t> resolved_ids;
auto ResolveFn = [this, &name, parent_decl_ctx, include_inlines, &sc_list,
&resolved_ids](UniqueCStringMap<uint32_t> &Names) {
std::vector<uint32_t> ids;
if (!Names.GetValues(name, ids))
return;
for (uint32_t id : ids) {
if (resolved_ids.find(id) != resolved_ids.end())
continue;
if (parent_decl_ctx &&
GetDeclContextContainingUID(id) != *parent_decl_ctx)
continue;
if (ResolveFunction(id, include_inlines, sc_list))
resolved_ids.insert(id);
}
};
if (name_type_mask & eFunctionNameTypeFull) {
ResolveFn(m_func_full_names);
ResolveFn(m_func_base_names);
ResolveFn(m_func_method_names);
}
if (name_type_mask & eFunctionNameTypeBase) {
ResolveFn(m_func_base_names);
}
if (name_type_mask & eFunctionNameTypeMethod) {
ResolveFn(m_func_method_names);
}
}
return sc_list.GetSize() - old_size;
}
uint32_t
SymbolFilePDB::FindFunctions(const lldb_private::RegularExpression ®ex,
bool include_inlines, bool append,
lldb_private::SymbolContextList &sc_list) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (!append)
sc_list.Clear();
if (!regex.IsValid())
return 0;
auto old_size = sc_list.GetSize();
CacheFunctionNames();
std::set<uint32_t> resolved_ids;
auto ResolveFn = [®ex, include_inlines, &sc_list, &resolved_ids,
this](UniqueCStringMap<uint32_t> &Names) {
std::vector<uint32_t> ids;
if (Names.GetValues(regex, ids)) {
for (auto id : ids) {
if (resolved_ids.find(id) == resolved_ids.end())
if (ResolveFunction(id, include_inlines, sc_list))
resolved_ids.insert(id);
}
}
};
ResolveFn(m_func_full_names);
ResolveFn(m_func_base_names);
return sc_list.GetSize() - old_size;
}
void SymbolFilePDB::GetMangledNamesForFunction(
const std::string &scope_qualified_name,
std::vector<lldb_private::ConstString> &mangled_names) {}
void SymbolFilePDB::AddSymbols(lldb_private::Symtab &symtab) {
std::set<lldb::addr_t> sym_addresses;
for (size_t i = 0; i < symtab.GetNumSymbols(); i++)
sym_addresses.insert(symtab.SymbolAtIndex(i)->GetFileAddress());
auto results = m_global_scope_up->findAllChildren<PDBSymbolPublicSymbol>();
if (!results)
return;
auto section_list = m_objfile_sp->GetSectionList();
if (!section_list)
return;
while (auto pub_symbol = results->getNext()) {
auto section_id = pub_symbol->getAddressSection();
auto section = section_list->FindSectionByID(section_id);
if (!section)
continue;
auto offset = pub_symbol->getAddressOffset();
auto file_addr = section->GetFileAddress() + offset;
if (sym_addresses.find(file_addr) != sym_addresses.end())
continue;
sym_addresses.insert(file_addr);
auto size = pub_symbol->getLength();
symtab.AddSymbol(
Symbol(pub_symbol->getSymIndexId(), // symID
pub_symbol->getName().c_str(), // name
true, // name_is_mangled
pub_symbol->isCode() ? eSymbolTypeCode : eSymbolTypeData, // type
true, // external
false, // is_debug
false, // is_trampoline
false, // is_artificial
section, // section_sp
offset, // value
size, // size
size != 0, // size_is_valid
false, // contains_linker_annotations
0 // flags
));
}
symtab.CalculateSymbolSizes();
symtab.Finalize();
}
uint32_t SymbolFilePDB::FindTypes(
lldb_private::ConstString name,
const lldb_private::CompilerDeclContext *parent_decl_ctx, bool append,
uint32_t max_matches,
llvm::DenseSet<lldb_private::SymbolFile *> &searched_symbol_files,
lldb_private::TypeMap &types) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
if (!append)
types.Clear();
if (!name)
return 0;
if (!DeclContextMatchesThisSymbolFile(parent_decl_ctx))
return 0;
searched_symbol_files.clear();
searched_symbol_files.insert(this);
// There is an assumption 'name' is not a regex
FindTypesByName(name.GetStringRef(), parent_decl_ctx, max_matches, types);
return types.GetSize();
}
void SymbolFilePDB::DumpClangAST(Stream &s) {
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to dump ClangAST");
return;
}
auto *clang_type_system =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_type_system)
return;
clang_type_system->Dump(s);
}
void SymbolFilePDB::FindTypesByRegex(
const lldb_private::RegularExpression ®ex, uint32_t max_matches,
lldb_private::TypeMap &types) {
// When searching by regex, we need to go out of our way to limit the search
// space as much as possible since this searches EVERYTHING in the PDB,
// manually doing regex comparisons. PDB library isn't optimized for regex
// searches or searches across multiple symbol types at the same time, so the
// best we can do is to search enums, then typedefs, then classes one by one,
// and do a regex comparison against each of them.
PDB_SymType tags_to_search[] = {PDB_SymType::Enum, PDB_SymType::Typedef,
PDB_SymType::UDT};
std::unique_ptr<IPDBEnumSymbols> results;
uint32_t matches = 0;
for (auto tag : tags_to_search) {
results = m_global_scope_up->findAllChildren(tag);
if (!results)
continue;
while (auto result = results->getNext()) {
if (max_matches > 0 && matches >= max_matches)
break;
std::string type_name;
if (auto enum_type = llvm::dyn_cast<PDBSymbolTypeEnum>(result.get()))
type_name = enum_type->getName();
else if (auto typedef_type =
llvm::dyn_cast<PDBSymbolTypeTypedef>(result.get()))
type_name = typedef_type->getName();
else if (auto class_type = llvm::dyn_cast<PDBSymbolTypeUDT>(result.get()))
type_name = class_type->getName();
else {
// We're looking only for types that have names. Skip symbols, as well
// as unnamed types such as arrays, pointers, etc.
continue;
}
if (!regex.Execute(type_name))
continue;
// This should cause the type to get cached and stored in the `m_types`
// lookup.
if (!ResolveTypeUID(result->getSymIndexId()))
continue;
auto iter = m_types.find(result->getSymIndexId());
if (iter == m_types.end())
continue;
types.Insert(iter->second);
++matches;
}
}
}
void SymbolFilePDB::FindTypesByName(
llvm::StringRef name,
const lldb_private::CompilerDeclContext *parent_decl_ctx,
uint32_t max_matches, lldb_private::TypeMap &types) {
std::unique_ptr<IPDBEnumSymbols> results;
if (name.empty())
return;
results = m_global_scope_up->findAllChildren(PDB_SymType::None);
if (!results)
return;
uint32_t matches = 0;
while (auto result = results->getNext()) {
if (max_matches > 0 && matches >= max_matches)
break;
if (MSVCUndecoratedNameParser::DropScope(
result->getRawSymbol().getName()) != name)
continue;
switch (result->getSymTag()) {
case PDB_SymType::Enum:
case PDB_SymType::UDT:
case PDB_SymType::Typedef:
break;
default:
// We're looking only for types that have names. Skip symbols, as well
// as unnamed types such as arrays, pointers, etc.
continue;
}
// This should cause the type to get cached and stored in the `m_types`
// lookup.
if (!ResolveTypeUID(result->getSymIndexId()))
continue;
if (parent_decl_ctx && GetDeclContextContainingUID(
result->getSymIndexId()) != *parent_decl_ctx)
continue;
auto iter = m_types.find(result->getSymIndexId());
if (iter == m_types.end())
continue;
types.Insert(iter->second);
++matches;
}
}
size_t SymbolFilePDB::FindTypes(llvm::ArrayRef<CompilerContext> pattern,
LanguageSet languages, bool append,
lldb_private::TypeMap &types) {
if (!append)
types.Clear();
return 0;
}
void SymbolFilePDB::GetTypesForPDBSymbol(const llvm::pdb::PDBSymbol &pdb_symbol,
uint32_t type_mask,
TypeCollection &type_collection) {
bool can_parse = false;
switch (pdb_symbol.getSymTag()) {
case PDB_SymType::ArrayType:
can_parse = ((type_mask & eTypeClassArray) != 0);
break;
case PDB_SymType::BuiltinType:
can_parse = ((type_mask & eTypeClassBuiltin) != 0);
break;
case PDB_SymType::Enum:
can_parse = ((type_mask & eTypeClassEnumeration) != 0);
break;
case PDB_SymType::Function:
case PDB_SymType::FunctionSig:
can_parse = ((type_mask & eTypeClassFunction) != 0);
break;
case PDB_SymType::PointerType:
can_parse = ((type_mask & (eTypeClassPointer | eTypeClassBlockPointer |
eTypeClassMemberPointer)) != 0);
break;
case PDB_SymType::Typedef:
can_parse = ((type_mask & eTypeClassTypedef) != 0);
break;
case PDB_SymType::UDT: {
auto *udt = llvm::dyn_cast<PDBSymbolTypeUDT>(&pdb_symbol);
assert(udt);
can_parse = (udt->getUdtKind() != PDB_UdtType::Interface &&
((type_mask & (eTypeClassClass | eTypeClassStruct |
eTypeClassUnion)) != 0));
} break;
default:
break;
}
if (can_parse) {
if (auto *type = ResolveTypeUID(pdb_symbol.getSymIndexId())) {
auto result =
std::find(type_collection.begin(), type_collection.end(), type);
if (result == type_collection.end())
type_collection.push_back(type);
}
}
auto results_up = pdb_symbol.findAllChildren();
while (auto symbol_up = results_up->getNext())
GetTypesForPDBSymbol(*symbol_up, type_mask, type_collection);
}
size_t SymbolFilePDB::GetTypes(lldb_private::SymbolContextScope *sc_scope,
TypeClass type_mask,
lldb_private::TypeList &type_list) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
TypeCollection type_collection;
uint32_t old_size = type_list.GetSize();
CompileUnit *cu =
sc_scope ? sc_scope->CalculateSymbolContextCompileUnit() : nullptr;
if (cu) {
auto compiland_up = GetPDBCompilandByUID(cu->GetID());
if (!compiland_up)
return 0;
GetTypesForPDBSymbol(*compiland_up, type_mask, type_collection);
} else {
for (uint32_t cu_idx = 0; cu_idx < GetNumCompileUnits(); ++cu_idx) {
auto cu_sp = ParseCompileUnitAtIndex(cu_idx);
if (cu_sp) {
if (auto compiland_up = GetPDBCompilandByUID(cu_sp->GetID()))
GetTypesForPDBSymbol(*compiland_up, type_mask, type_collection);
}
}
}
for (auto type : type_collection) {
type->GetForwardCompilerType();
type_list.Insert(type->shared_from_this());
}
return type_list.GetSize() - old_size;
}
llvm::Expected<lldb_private::TypeSystem &>
SymbolFilePDB::GetTypeSystemForLanguage(lldb::LanguageType language) {
auto type_system_or_err =
m_objfile_sp->GetModule()->GetTypeSystemForLanguage(language);
if (type_system_or_err) {
type_system_or_err->SetSymbolFile(this);
}
return type_system_or_err;
}
PDBASTParser *SymbolFilePDB::GetPDBAstParser() {
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to get PDB AST parser");
return nullptr;
}
auto *clang_type_system =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_type_system)
return nullptr;
return clang_type_system->GetPDBParser();
}
lldb_private::CompilerDeclContext SymbolFilePDB::FindNamespace(
lldb_private::ConstString name,
const lldb_private::CompilerDeclContext *parent_decl_ctx) {
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
auto type_system_or_err =
GetTypeSystemForLanguage(lldb::eLanguageTypeC_plus_plus);
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err), "Unable to find namespace {}",
name.AsCString());
return CompilerDeclContext();
}
auto *clang_type_system =
llvm::dyn_cast_or_null<ClangASTContext>(&type_system_or_err.get());
if (!clang_type_system)
return CompilerDeclContext();
PDBASTParser *pdb = clang_type_system->GetPDBParser();
if (!pdb)
return CompilerDeclContext();
clang::DeclContext *decl_context = nullptr;
if (parent_decl_ctx)
decl_context = static_cast<clang::DeclContext *>(
parent_decl_ctx->GetOpaqueDeclContext());
auto namespace_decl =
pdb->FindNamespaceDecl(decl_context, name.GetStringRef());
if (!namespace_decl)
return CompilerDeclContext();
return CompilerDeclContext(clang_type_system,
static_cast<clang::DeclContext *>(namespace_decl));
}
lldb_private::ConstString SymbolFilePDB::GetPluginName() {
static ConstString g_name("pdb");
return g_name;
}
uint32_t SymbolFilePDB::GetPluginVersion() { return 1; }
IPDBSession &SymbolFilePDB::GetPDBSession() { return *m_session_up; }
const IPDBSession &SymbolFilePDB::GetPDBSession() const {
return *m_session_up;
}
lldb::CompUnitSP SymbolFilePDB::ParseCompileUnitForUID(uint32_t id,
uint32_t index) {
auto found_cu = m_comp_units.find(id);
if (found_cu != m_comp_units.end())
return found_cu->second;
auto compiland_up = GetPDBCompilandByUID(id);
if (!compiland_up)
return CompUnitSP();
lldb::LanguageType lang;
auto details = compiland_up->findOneChild<PDBSymbolCompilandDetails>();
if (!details)
lang = lldb::eLanguageTypeC_plus_plus;
else
lang = TranslateLanguage(details->getLanguage());
if (lang == lldb::LanguageType::eLanguageTypeUnknown)
return CompUnitSP();
std::string path = compiland_up->getSourceFileFullPath();
if (path.empty())
return CompUnitSP();
// Don't support optimized code for now, DebugInfoPDB does not return this
// information.
LazyBool optimized = eLazyBoolNo;
auto cu_sp = std::make_shared<CompileUnit>(m_objfile_sp->GetModule(), nullptr,
path.c_str(), id, lang, optimized);
if (!cu_sp)
return CompUnitSP();
m_comp_units.insert(std::make_pair(id, cu_sp));
if (index == UINT32_MAX)
GetCompileUnitIndex(*compiland_up, index);
lldbassert(index != UINT32_MAX);
SetCompileUnitAtIndex(index, cu_sp);
return cu_sp;
}
bool SymbolFilePDB::ParseCompileUnitLineTable(CompileUnit &comp_unit,
uint32_t match_line) {
auto compiland_up = GetPDBCompilandByUID(comp_unit.GetID());
if (!compiland_up)
return false;
// LineEntry needs the *index* of the file into the list of support files
// returned by ParseCompileUnitSupportFiles. But the underlying SDK gives us
// a globally unique idenfitifier in the namespace of the PDB. So, we have
// to do a mapping so that we can hand out indices.
llvm::DenseMap<uint32_t, uint32_t> index_map;
BuildSupportFileIdToSupportFileIndexMap(*compiland_up, index_map);
auto line_table = std::make_unique<LineTable>(&comp_unit);
// Find contributions to `compiland` from all source and header files.
std::string path = comp_unit.GetPath();
auto files = m_session_up->getSourceFilesForCompiland(*compiland_up);
if (!files)
return false;
// For each source and header file, create a LineSequence for contributions
// to the compiland from that file, and add the sequence.
while (auto file = files->getNext()) {
std::unique_ptr<LineSequence> sequence(
line_table->CreateLineSequenceContainer());
auto lines = m_session_up->findLineNumbers(*compiland_up, *file);
if (!lines)
continue;
int entry_count = lines->getChildCount();
uint64_t prev_addr;
uint32_t prev_length;
uint32_t prev_line;
uint32_t prev_source_idx;
for (int i = 0; i < entry_count; ++i) {
auto line = lines->getChildAtIndex(i);
uint64_t lno = line->getLineNumber();
uint64_t addr = line->getVirtualAddress();
uint32_t length = line->getLength();
uint32_t source_id = line->getSourceFileId();
uint32_t col = line->getColumnNumber();
uint32_t source_idx = index_map[source_id];
// There was a gap between the current entry and the previous entry if
// the addresses don't perfectly line up.
bool is_gap = (i > 0) && (prev_addr + prev_length < addr);
// Before inserting the current entry, insert a terminal entry at the end
// of the previous entry's address range if the current entry resulted in
// a gap from the previous entry.
if (is_gap && ShouldAddLine(match_line, prev_line, prev_length)) {
line_table->AppendLineEntryToSequence(
sequence.get(), prev_addr + prev_length, prev_line, 0,
prev_source_idx, false, false, false, false, true);
line_table->InsertSequence(sequence.release());
sequence.reset(line_table->CreateLineSequenceContainer());
}
if (ShouldAddLine(match_line, lno, length)) {
bool is_statement = line->isStatement();
bool is_prologue = false;
bool is_epilogue = false;
auto func =
m_session_up->findSymbolByAddress(addr, PDB_SymType::Function);
if (func) {
auto prologue = func->findOneChild<PDBSymbolFuncDebugStart>();
if (prologue)
is_prologue = (addr == prologue->getVirtualAddress());
auto epilogue = func->findOneChild<PDBSymbolFuncDebugEnd>();
if (epilogue)
is_epilogue = (addr == epilogue->getVirtualAddress());
}
line_table->AppendLineEntryToSequence(sequence.get(), addr, lno, col,
source_idx, is_statement, false,
is_prologue, is_epilogue, false);
}
prev_addr = addr;
prev_length = length;
prev_line = lno;
prev_source_idx = source_idx;
}
if (entry_count > 0 && ShouldAddLine(match_line, prev_line, prev_length)) {
// The end is always a terminal entry, so insert it regardless.
line_table->AppendLineEntryToSequence(
sequence.get(), prev_addr + prev_length, prev_line, 0,
prev_source_idx, false, false, false, false, true);
}
line_table->InsertSequence(sequence.release());
}
if (line_table->GetSize()) {
comp_unit.SetLineTable(line_table.release());
return true;
}
return false;
}
void SymbolFilePDB::BuildSupportFileIdToSupportFileIndexMap(
const PDBSymbolCompiland &compiland,
llvm::DenseMap<uint32_t, uint32_t> &index_map) const {
// This is a hack, but we need to convert the source id into an index into
// the support files array. We don't want to do path comparisons to avoid
// basename / full path issues that may or may not even be a problem, so we
// use the globally unique source file identifiers. Ideally we could use the
// global identifiers everywhere, but LineEntry currently assumes indices.
auto source_files = m_session_up->getSourceFilesForCompiland(compiland);
if (!source_files)
return;
// LLDB uses the DWARF-like file numeration (one based)
int index = 1;
while (auto file = source_files->getNext()) {
uint32_t source_id = file->getUniqueId();
index_map[source_id] = index++;
}
}
lldb::CompUnitSP SymbolFilePDB::GetCompileUnitContainsAddress(
const lldb_private::Address &so_addr) {
lldb::addr_t file_vm_addr = so_addr.GetFileAddress();
if (file_vm_addr == LLDB_INVALID_ADDRESS || file_vm_addr == 0)
return nullptr;
// If it is a PDB function's vm addr, this is the first sure bet.
if (auto lines =
m_session_up->findLineNumbersByAddress(file_vm_addr, /*Length=*/1)) {
if (auto first_line = lines->getNext())
return ParseCompileUnitForUID(first_line->getCompilandId());
}
// Otherwise we resort to section contributions.
if (auto sec_contribs = m_session_up->getSectionContribs()) {
while (auto section = sec_contribs->getNext()) {
auto va = section->getVirtualAddress();
if (file_vm_addr >= va && file_vm_addr < va + section->getLength())
return ParseCompileUnitForUID(section->getCompilandId());
}
}
return nullptr;
}
Mangled
SymbolFilePDB::GetMangledForPDBFunc(const llvm::pdb::PDBSymbolFunc &pdb_func) {
Mangled mangled;
auto func_name = pdb_func.getName();
auto func_undecorated_name = pdb_func.getUndecoratedName();
std::string func_decorated_name;
// Seek from public symbols for non-static function's decorated name if any.
// For static functions, they don't have undecorated names and aren't exposed
// in Public Symbols either.
if (!func_undecorated_name.empty()) {
auto result_up = m_global_scope_up->findChildren(
PDB_SymType::PublicSymbol, func_undecorated_name,
PDB_NameSearchFlags::NS_UndecoratedName);
if (result_up) {
while (auto symbol_up = result_up->getNext()) {
// For a public symbol, it is unique.
lldbassert(result_up->getChildCount() == 1);
if (auto *pdb_public_sym =
llvm::dyn_cast_or_null<PDBSymbolPublicSymbol>(
symbol_up.get())) {
if (pdb_public_sym->isFunction()) {
func_decorated_name = pdb_public_sym->getName();
break;
}
}
}
}
}
if (!func_decorated_name.empty()) {
mangled.SetMangledName(ConstString(func_decorated_name));
// For MSVC, format of C funciton's decorated name depends on calling
// conventon. Unfortunately none of the format is recognized by current
// LLDB. For example, `_purecall` is a __cdecl C function. From PDB,
// `__purecall` is retrieved as both its decorated and undecorated name
// (using PDBSymbolFunc::getUndecoratedName method). However `__purecall`
// string is not treated as mangled in LLDB (neither `?` nor `_Z` prefix).
// Mangled::GetDemangledName method will fail internally and caches an
// empty string as its undecorated name. So we will face a contradition
// here for the same symbol:
// non-empty undecorated name from PDB
// empty undecorated name from LLDB
if (!func_undecorated_name.empty() &&
mangled.GetDemangledName(mangled.GuessLanguage()).IsEmpty())
mangled.SetDemangledName(ConstString(func_undecorated_name));
// LLDB uses several flags to control how a C++ decorated name is
// undecorated for MSVC. See `safeUndecorateName` in Class Mangled. So the
// yielded name could be different from what we retrieve from
// PDB source unless we also apply same flags in getting undecorated
// name through PDBSymbolFunc::getUndecoratedNameEx method.
if (!func_undecorated_name.empty() &&
mangled.GetDemangledName(mangled.GuessLanguage()) !=
ConstString(func_undecorated_name))
mangled.SetDemangledName(ConstString(func_undecorated_name));
} else if (!func_undecorated_name.empty()) {
mangled.SetDemangledName(ConstString(func_undecorated_name));
} else if (!func_name.empty())
mangled.SetValue(ConstString(func_name), false);
return mangled;
}
bool SymbolFilePDB::DeclContextMatchesThisSymbolFile(
const lldb_private::CompilerDeclContext *decl_ctx) {
if (decl_ctx == nullptr || !decl_ctx->IsValid())
return true;
TypeSystem *decl_ctx_type_system = decl_ctx->GetTypeSystem();
if (!decl_ctx_type_system)
return false;
auto type_system_or_err = GetTypeSystemForLanguage(
decl_ctx_type_system->GetMinimumLanguage(nullptr));
if (auto err = type_system_or_err.takeError()) {
LLDB_LOG_ERROR(
lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_SYMBOLS),
std::move(err),
"Unable to determine if DeclContext matches this symbol file");
return false;
}
if (decl_ctx_type_system == &type_system_or_err.get())
return true; // The type systems match, return true
return false;
}
uint32_t SymbolFilePDB::GetCompilandId(const llvm::pdb::PDBSymbolData &data) {
static const auto pred_upper = [](uint32_t lhs, SecContribInfo rhs) {
return lhs < rhs.Offset;
};
// Cache section contributions
if (m_sec_contribs.empty()) {
if (auto SecContribs = m_session_up->getSectionContribs()) {
while (auto SectionContrib = SecContribs->getNext()) {
auto comp_id = SectionContrib->getCompilandId();
if (!comp_id)
continue;
auto sec = SectionContrib->getAddressSection();
auto &sec_cs = m_sec_contribs[sec];
auto offset = SectionContrib->getAddressOffset();
auto it =
std::upper_bound(sec_cs.begin(), sec_cs.end(), offset, pred_upper);
auto size = SectionContrib->getLength();
sec_cs.insert(it, {offset, size, comp_id});
}
}
}
// Check by line number
if (auto Lines = data.getLineNumbers()) {
if (auto FirstLine = Lines->getNext())
return FirstLine->getCompilandId();
}
// Retrieve section + offset
uint32_t DataSection = data.getAddressSection();
uint32_t DataOffset = data.getAddressOffset();
if (DataSection == 0) {
if (auto RVA = data.getRelativeVirtualAddress())
m_session_up->addressForRVA(RVA, DataSection, DataOffset);
}
if (DataSection) {
// Search by section contributions
auto &sec_cs = m_sec_contribs[DataSection];
auto it =
std::upper_bound(sec_cs.begin(), sec_cs.end(), DataOffset, pred_upper);
if (it != sec_cs.begin()) {
--it;
if (DataOffset < it->Offset + it->Size)
return it->CompilandId;
}
} else {
// Search in lexical tree
auto LexParentId = data.getLexicalParentId();
while (auto LexParent = m_session_up->getSymbolById(LexParentId)) {
if (LexParent->getSymTag() == PDB_SymType::Exe)
break;
if (LexParent->getSymTag() == PDB_SymType::Compiland)
return LexParentId;
LexParentId = LexParent->getRawSymbol().getLexicalParentId();
}
}
return 0;
}
| 1 | 20,201 | We should make this an enum. | apple-swift-lldb | cpp |
@@ -15,7 +15,6 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
private const string TestRuleId = "TST0001";
private const string TestFormatSpecifier = "testFormatSpecifier";
private const string TestAnalysisTarget = @"C:\dir\file";
- private static readonly string DisplayedTarget = TestAnalysisTarget.Replace('\\', '/');
private static readonly RuleDescriptor TestRule = new RuleDescriptor(
TestRuleId, | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System.Collections.Generic;
using FluentAssertions;
using Xunit;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
// These tests test the extension method Result.FormatForVisualStudio.
// But by providing various Region objects and ResultKind values, they
// also exercise Region.FormatForVisualStudio and ResultKind.FormatForVisualStudio.
public class FormatForVisualStudioTests
{
private const string TestRuleId = "TST0001";
private const string TestFormatSpecifier = "testFormatSpecifier";
private const string TestAnalysisTarget = @"C:\dir\file";
private static readonly string DisplayedTarget = TestAnalysisTarget.Replace('\\', '/');
private static readonly RuleDescriptor TestRule = new RuleDescriptor(
TestRuleId,
"ThisIsATest",
"short description",
"full description",
null, // options
new Dictionary<string, string>
{
[TestFormatSpecifier] = "First: {0}, Second: {1}"
},
null, // helpUri
null, // properties
null); // tags
private static readonly Region MultiLineTestRegion = new Region
{
StartLine = 2,
StartColumn = 4,
EndLine = 3,
EndColumn = 5
};
private static readonly Region SingleLineMultiColumnTestRegion = new Region
{
StartLine = 2,
StartColumn = 4,
EndLine = 2,
EndColumn = 5
};
private static readonly Region SingleLineSingleColumnTestRegion = new Region
{
StartLine = 2,
StartColumn = 4
};
private static readonly Region SingleLineNoColumnTestRegion = new Region
{
StartLine = 2
};
private static readonly Region MultiLineNoColumnTestRegion = new Region
{
StartLine = 2,
EndLine = 3
};
public static IEnumerable<object[]> ResultFormatForVisualStudioTestCases => new[]
{
// Test each ResultKind value.
new object[]
{
ResultKind.Error,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): error {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.ConfigurationError,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): error {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.InternalError,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): error {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Warning,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): warning {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.NotApplicable,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Note,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Pass,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
new object[]
{
ResultKind.Unknown,
MultiLineTestRegion,
$"{DisplayedTarget}(2,4,3,5): info {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a single-line multi-column region (previous tests used a multi-line region).
new object[]
{
ResultKind.Error,
SingleLineMultiColumnTestRegion,
$"{DisplayedTarget}(2,4-5): error {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a single-line single-column region.
new object[]
{
ResultKind.Error,
SingleLineSingleColumnTestRegion,
$"{DisplayedTarget}(2,4): error {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a single-line region with no column specified.
new object[]
{
ResultKind.Error,
SingleLineNoColumnTestRegion,
$"{DisplayedTarget}(2): error {TestRuleId}: First: 42, Second: 54"
},
// Test formatting of a multi-line region with no columns specified.
new object[]
{
ResultKind.Error,
MultiLineNoColumnTestRegion,
$"{DisplayedTarget}(2-3): error {TestRuleId}: First: 42, Second: 54"
},
};
[Theory]
[MemberData(nameof(ResultFormatForVisualStudioTestCases))]
public void Result_FormatForVisualStudioTests(ResultKind kind, Region region, string expected)
{
Result result = MakeResultFromTestCase(kind, region);
string actual = result.FormatForVisualStudio(TestRule);
actual.Should().Be(expected);
}
private Result MakeResultFromTestCase(ResultKind kind, Region region)
{
return new Result
{
RuleId = TestRuleId,
Kind = kind,
Locations = new List<Location>
{
new Location
{
AnalysisTarget = new List<PhysicalLocationComponent>
{
new PhysicalLocationComponent
{
Uri = TestAnalysisTarget.CreateUriForJsonSerialization(),
Region = region
},
}
}
},
FormattedMessage = new FormattedMessage
{
SpecifierId = TestFormatSpecifier,
Arguments = new List<string>
{
"42",
"54"
}
}
};
}
}
}
| 1 | 10,255 | @lgolding, note I fixed the forward slashes that appeared in the VS output (which was certainly not by design). | microsoft-sarif-sdk | .cs |
@@ -724,7 +724,7 @@ class ElementPlot(BokehPlot, GenericElementPlot):
else:
self.handles['xaxis'] = plot.xaxis[0]
self.handles['x_range'] = plot.x_range
- self.handles['y_axis'] = plot.yaxis[0]
+ self.handles['yaxis'] = plot.yaxis[0]
self.handles['y_range'] = plot.y_range
self.handles['plot'] = plot
| 1 | import warnings
import param
import numpy as np
import bokeh
import bokeh.plotting
from bokeh.core.properties import value
from bokeh.models import (HoverTool, Renderer, Range1d, DataRange1d, Title,
FactorRange, FuncTickFormatter, Tool, Legend)
from bokeh.models.tickers import Ticker, BasicTicker, FixedTicker, LogTicker
from bokeh.models.widgets import Panel, Tabs
from bokeh.models.mappers import LinearColorMapper
try:
from bokeh.models import ColorBar
from bokeh.models.mappers import LogColorMapper, CategoricalColorMapper
except ImportError:
LogColorMapper, ColorBar = None, None
from bokeh.plotting.helpers import _known_tools as known_tools
from ...core import DynamicMap, CompositeOverlay, Element, Dimension
from ...core.options import abbreviated_exception, SkipRendering
from ...core import util
from ...streams import Buffer
from ..plot import GenericElementPlot, GenericOverlayPlot
from ..util import dynamic_update, process_cmap, color_intervals
from .plot import BokehPlot, TOOLS
from .util import (mpl_to_bokeh, get_tab_title, py2js_tickformatter,
rgba_tuple, recursive_model_update, glyph_order,
decode_bytes, bokeh_version)
property_prefixes = ['selection', 'nonselection', 'muted', 'hover']
# Define shared style properties for bokeh plots
line_properties = ['line_color', 'line_alpha', 'color', 'alpha', 'line_width',
'line_join', 'line_cap', 'line_dash']
line_properties += ['_'.join([prefix, prop]) for prop in line_properties[:4]
for prefix in property_prefixes]
fill_properties = ['fill_color', 'fill_alpha']
fill_properties += ['_'.join([prefix, prop]) for prop in fill_properties
for prefix in property_prefixes]
text_properties = ['text_font', 'text_font_size', 'text_font_style', 'text_color',
'text_alpha', 'text_align', 'text_baseline']
legend_dimensions = ['label_standoff', 'label_width', 'label_height', 'glyph_width',
'glyph_height', 'legend_padding', 'legend_spacing', 'click_policy']
class ElementPlot(BokehPlot, GenericElementPlot):
border = param.Number(default=10, doc="""
Minimum border around plot.""")
finalize_hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing an axis.
The hook is passed the plot object and the displayed
object, other plotting handles can be accessed via plot.handles.""")
fontsize = param.Parameter(default={'title': '12pt'}, allow_None=True, doc="""
Specifies various fontsizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys reverts to the default sizes, e.g:
{'ticks': '20pt', 'title': '15pt', 'ylabel': '5px', 'xlabel': '5px'}""")
gridstyle = param.Dict(default={}, doc="""
Allows customizing the grid style, e.g. grid_line_color defines
the line color for both grids while xgrid_line_color exclusively
customizes the x-axis grid lines.""")
labelled = param.List(default=['x', 'y'], doc="""
Whether to plot the 'x' and 'y' labels.""")
lod = param.Dict(default={'factor': 10, 'interval': 300,
'threshold': 2000, 'timeout': 500}, doc="""
Bokeh plots offer "Level of Detail" (LOD) capability to
accommodate large (but not huge) amounts of data. The available
options are:
* factor - Decimation factor to use when applying
decimation.
* interval - Interval (in ms) downsampling will be enabled
after an interactive event.
* threshold - Number of samples before downsampling is enabled.
* timeout - Timeout (in ms) for checking whether interactive
tool events are still occurring.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
shared_axes = param.Boolean(default=True, doc="""
Whether to invert the share axes across plots
for linked panning and zooming.""")
default_tools = param.List(default=['save', 'pan', 'wheel_zoom',
'box_zoom', 'reset'],
doc="A list of plugin tools to use on the plot.")
tools = param.List(default=[], doc="""
A list of plugin tools to use on the plot.""")
toolbar = param.ObjectSelector(default='right',
objects=["above", "below",
"left", "right", None],
doc="""
The toolbar location, must be one of 'above', 'below',
'left', 'right', None.""")
_categorical = False
# Declares the default types for continuous x- and y-axes
_x_range_type = Range1d
_y_range_type = Range1d
# Whether the plot supports streaming data
_stream_data = True
def __init__(self, element, plot=None, **params):
self.current_ranges = None
super(ElementPlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
self.static = len(self.hmap) == 1 and len(self.keys) == len(self.hmap)
self.callbacks = self._construct_callbacks()
self.static_source = False
self.streaming = [s for s in self.streams if isinstance(s, Buffer)]
# Whether axes are shared between plots
self._shared = {'x': False, 'y': False}
def _hover_opts(self, element):
if self.batched:
dims = list(self.hmap.last.kdims)
else:
dims = list(self.overlay_dims.keys())
dims += element.dimensions()
return list(util.unique_iterator(dims)), {}
def _init_tools(self, element, callbacks=[]):
"""
Processes the list of tools to be supplied to the plot.
"""
tooltips, hover_opts = self._hover_opts(element)
tooltips = [(ttp.pprint_label, '@{%s}' % util.dimension_sanitizer(ttp.name))
if isinstance(ttp, Dimension) else ttp for ttp in tooltips]
if not tooltips: tooltips = None
callbacks = callbacks+self.callbacks
cb_tools, tool_names = [], []
hover = False
for cb in callbacks:
for handle in cb.models+cb.extra_models:
if handle and handle in known_tools:
tool_names.append(handle)
if handle == 'hover':
tool = HoverTool(tooltips=tooltips, **hover_opts)
hover = tool
else:
tool = known_tools[handle]()
cb_tools.append(tool)
self.handles[handle] = tool
tools = [t for t in cb_tools + self.default_tools + self.tools
if t not in tool_names]
copied_tools = []
for tool in tools:
if isinstance(tool, Tool):
properties = tool.properties_with_values(include_defaults=False)
tool = type(tool)(**properties)
copied_tools.append(tool)
hover_tools = [t for t in copied_tools if isinstance(t, HoverTool)]
if 'hover' in copied_tools:
hover = HoverTool(tooltips=tooltips, **hover_opts)
copied_tools[copied_tools.index('hover')] = hover
elif any(hover_tools):
hover = hover_tools[0]
if hover:
self.handles['hover'] = hover
return copied_tools
def _get_hover_data(self, data, element, dimensions=None):
"""
Initializes hover data based on Element dimension values.
If empty initializes with no data.
"""
if 'hover' not in self.handles or self.static_source:
return
for d in (dimensions or element.dimensions()):
dim = util.dimension_sanitizer(d.name)
if dim not in data:
data[dim] = element.dimension_values(d)
elif isinstance(data[dim], np.ndarray) and data[dim].dtype.kind == 'M':
data[dim+'_dt_strings'] = [d.pprint_value(v) for v in data[dim]]
for k, v in self.overlay_dims.items():
dim = util.dimension_sanitizer(k.name)
if dim not in data:
data[dim] = [v for _ in range(len(list(data.values())[0]))]
def _merge_ranges(self, plots, xlabel, ylabel):
"""
Given a list of other plots return axes that are shared
with another plot by matching the axes labels
"""
plot_ranges = {}
for plot in plots:
if plot is None:
continue
if hasattr(plot, 'xaxis'):
if plot.xaxis[0].axis_label == xlabel:
plot_ranges['x_range'] = plot.x_range
if plot.xaxis[0].axis_label == ylabel:
plot_ranges['y_range'] = plot.x_range
if hasattr(plot, 'yaxis'):
if plot.yaxis[0].axis_label == ylabel:
plot_ranges['y_range'] = plot.y_range
if plot.yaxis[0].axis_label == xlabel:
plot_ranges['x_range'] = plot.y_range
return plot_ranges
def _axes_props(self, plots, subplots, element, ranges):
# Get the bottom layer and range element
el = element.traverse(lambda x: x, [Element])
el = el[0] if el else element
dims = el.dimensions()
xlabel, ylabel, zlabel = self._get_axis_labels(dims)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
plot_ranges = {}
# Try finding shared ranges in other plots in the same Layout
norm_opts = self.lookup_options(el, 'norm').options
if plots and self.shared_axes and not norm_opts.get('axiswise', False):
plot_ranges = self._merge_ranges(plots, xlabel, ylabel)
# Get the Element that determines the range and get_extents
range_el = el if self.batched and not isinstance(self, OverlayPlot) else element
l, b, r, t = self.get_extents(range_el, ranges)
if self.invert_axes:
l, b, r, t = b, l, t, r
xtype = el.get_dimension_type(0)
if ((xtype is np.object_ and type(l) in util.datetime_types) or
xtype in util.datetime_types):
x_axis_type = 'datetime'
else:
x_axis_type = 'log' if self.logx else 'auto'
y_axis_type = 'log' if self.logy else 'auto'
if len(dims) > 1:
ytype = el.get_dimension_type(1)
if ((ytype is np.object_ and type(b) in util.datetime_types)
or ytype in util.datetime_types):
y_axis_type = 'datetime'
# Declare shared axes
if 'x_range' in plot_ranges:
self._shared['x'] = True
if 'y_range' in plot_ranges:
self._shared['y'] = True
categorical = any(self.traverse(lambda x: x._categorical))
categorical_x = any(isinstance(x, util.basestring) for x in (l, r))
categorical_y = any(isinstance(y, util.basestring) for y in (b, t))
range_types = (self._x_range_type, self._y_range_type)
if self.invert_axes: range_types = range_types[::-1]
x_range_type, y_range_type = range_types
if categorical or categorical_x:
x_axis_type = 'auto'
plot_ranges['x_range'] = FactorRange()
elif 'x_range' not in plot_ranges:
plot_ranges['x_range'] = x_range_type()
if categorical or categorical_y:
y_axis_type = 'auto'
plot_ranges['y_range'] = FactorRange()
elif 'y_range' not in plot_ranges:
plot_ranges['y_range'] = y_range_type()
return (x_axis_type, y_axis_type), (xlabel, ylabel, zlabel), plot_ranges
def _init_plot(self, key, element, plots, ranges=None):
"""
Initializes Bokeh figure to draw Element into and sets basic
figure and axis attributes including axes types, labels,
titles and plot height and width.
"""
subplots = list(self.subplots.values()) if self.subplots else []
axis_types, labels, plot_ranges = self._axes_props(plots, subplots, element, ranges)
xlabel, ylabel, _ = labels
x_axis_type, y_axis_type = axis_types
properties = dict(plot_ranges)
properties['x_axis_label'] = xlabel if 'x' in self.labelled else ' '
properties['y_axis_label'] = ylabel if 'y' in self.labelled else ' '
if not self.show_frame:
properties['outline_line_alpha'] = 0
if self.show_title:
title = self._format_title(key, separator=' ')
else:
title = ''
if self.toolbar:
tools = self._init_tools(element)
properties['tools'] = tools
properties['toolbar_location'] = self.toolbar
if self.renderer.webgl:
properties['output_backend'] = 'webgl'
with warnings.catch_warnings():
# Bokeh raises warnings about duplicate tools but these
# are not really an issue
warnings.simplefilter('ignore', UserWarning)
return bokeh.plotting.Figure(x_axis_type=x_axis_type,
y_axis_type=y_axis_type, title=title,
**properties)
def _plot_properties(self, key, plot, element):
"""
Returns a dictionary of plot properties.
"""
size_multiplier = self.renderer.size/100.
plot_props = dict(plot_height=int(self.height*size_multiplier),
plot_width=int(self.width*size_multiplier),
sizing_mode=self.sizing_mode)
if self.bgcolor:
plot_props['background_fill_color'] = self.bgcolor
if self.border is not None:
for p in ['left', 'right', 'top', 'bottom']:
plot_props['min_border_'+p] = self.border
lod = dict(self.defaults().get('lod', {}), **self.lod)
for lod_prop, v in lod.items():
plot_props['lod_'+lod_prop] = v
return plot_props
def _title_properties(self, key, plot, element):
if self.show_title:
title = self._format_title(key, separator=' ')
else:
title = ''
opts = dict(text=title, text_color='black')
title_font = self._fontsize('title').get('fontsize')
if title_font:
opts['text_font_size'] = value(title_font)
return opts
def _init_axes(self, plot):
if self.xaxis is None:
plot.xaxis.visible = False
elif 'top' in self.xaxis:
plot.above = plot.below
plot.below = []
plot.xaxis[:] = plot.above
self.handles['xaxis'] = plot.xaxis[0]
self.handles['x_range'] = plot.x_range
if self.yaxis is None:
plot.yaxis.visible = False
elif 'right' in self.yaxis:
plot.right = plot.left
plot.left = []
plot.yaxis[:] = plot.right
self.handles['yaxis'] = plot.yaxis[0]
self.handles['y_range'] = plot.y_range
def _axis_properties(self, axis, key, plot, dimension=None,
ax_mapping={'x': 0, 'y': 1}):
"""
Returns a dictionary of axis properties depending
on the specified axis.
"""
axis_props = {}
if ((axis == 'x' and self.xaxis in ['bottom-bare', 'top-bare']) or
(axis == 'y' and self.yaxis in ['left-bare', 'right-bare'])):
axis_props['axis_label_text_font_size'] = value('0pt')
axis_props['major_label_text_font_size'] = value('0pt')
axis_props['major_tick_line_color'] = None
axis_props['minor_tick_line_color'] = None
else:
labelsize = self._fontsize('%slabel' % axis).get('fontsize')
if labelsize:
axis_props['axis_label_text_font_size'] = labelsize
ticksize = self._fontsize('%sticks' % axis, common=False).get('fontsize')
if ticksize:
axis_props['major_label_text_font_size'] = value(ticksize)
rotation = self.xrotation if axis == 'x' else self.yrotation
if rotation:
axis_props['major_label_orientation'] = np.radians(rotation)
ticker = self.xticks if axis == 'x' else self.yticks
if isinstance(ticker, Ticker):
axis_props['ticker'] = ticker
elif isinstance(ticker, int):
axis_props['ticker'] = BasicTicker(desired_num_ticks=ticker)
elif isinstance(ticker, (tuple, list)):
if all(isinstance(t, tuple) for t in ticker):
ticks, labels = zip(*ticker)
labels = [l if isinstance(l, util.basestring) else str(l)
for l in labels]
axis_props['ticker'] = FixedTicker(ticks=ticks)
axis_props['major_label_overrides'] = dict(zip(ticks, labels))
else:
axis_props['ticker'] = FixedTicker(ticks=ticker)
if FuncTickFormatter is not None and ax_mapping and dimension:
formatter = None
if dimension.value_format:
formatter = dimension.value_format
elif dimension.type in dimension.type_formatters:
formatter = dimension.type_formatters[dimension.type]
if formatter:
msg = ('%s dimension formatter could not be '
'converted to tick formatter. ' % dimension.name)
jsfunc = py2js_tickformatter(formatter, msg)
if jsfunc:
formatter = FuncTickFormatter(code=jsfunc)
axis_props['formatter'] = formatter
return axis_props
def _update_plot(self, key, plot, element=None):
"""
Updates plot parameters on every frame
"""
el = element.traverse(lambda x: x, [Element])
dimensions = el[0].dimensions() if el else el.dimensions()
if not len(dimensions) >= 2:
dimensions = dimensions+[None]
plot.update(**self._plot_properties(key, plot, element))
props = {axis: self._axis_properties(axis, key, plot, dim)
for axis, dim in zip(['x', 'y'], dimensions)}
xlabel, ylabel, zlabel = self._get_axis_labels(dimensions)
if self.invert_axes: xlabel, ylabel = ylabel, xlabel
props['x']['axis_label'] = xlabel if 'x' in self.labelled else ''
props['y']['axis_label'] = ylabel if 'y' in self.labelled else ''
recursive_model_update(plot.xaxis[0], props.get('x', {}))
recursive_model_update(plot.yaxis[0], props.get('y', {}))
if plot.title:
plot.title.update(**self._title_properties(key, plot, element))
else:
plot.title = Title(**self._title_properties(key, plot, element))
if not self.show_grid:
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
else:
replace = ['bounds', 'bands']
style_items = list(self.gridstyle.items())
both = {k: v for k, v in style_items if k.startswith('grid_') or k.startswith('minor_grid')}
xgrid = {k.replace('xgrid', 'grid'): v for k, v in style_items if 'xgrid' in k}
ygrid = {k.replace('ygrid', 'grid'): v for k, v in style_items if 'ygrid' in k}
xopts = {k.replace('grid_', '') if any(r in k for r in replace) else k: v
for k, v in dict(both, **xgrid).items()}
yopts = {k.replace('grid_', '') if any(r in k for r in replace) else k: v
for k, v in dict(both, **ygrid).items()}
plot.xgrid[0].update(**xopts)
plot.ygrid[0].update(**yopts)
def _update_ranges(self, element, ranges):
x_range = self.handles['x_range']
y_range = self.handles['y_range']
l, b, r, t = None, None, None, None
if any(isinstance(r, (Range1d, DataRange1d)) for r in [x_range, y_range]):
l, b, r, t = self.get_extents(element, ranges)
if self.invert_axes:
l, b, r, t = b, l, t, r
xfactors, yfactors = None, None
if any(isinstance(ax_range, FactorRange) for ax_range in [x_range, y_range]):
xfactors, yfactors = self._get_factors(element)
framewise = self.framewise
streaming = (self.streaming and any(stream._triggering for stream in self.streaming))
xupdate = ((not self.model_changed(x_range) and (framewise or streaming))
or xfactors is not None)
yupdate = ((not self.model_changed(y_range) and (framewise or streaming))
or yfactors is not None)
if not self.drawn or xupdate:
self._update_range(x_range, l, r, xfactors, self.invert_xaxis,
self._shared['x'], self.logx, streaming)
if not self.drawn or yupdate:
self._update_range(y_range, b, t, yfactors, self.invert_yaxis,
self._shared['y'], self.logy, streaming)
def _update_range(self, axis_range, low, high, factors, invert, shared, log, streaming=False):
if isinstance(axis_range, (Range1d, DataRange1d)) and self.apply_ranges:
if (low == high and low is not None):
if isinstance(low, util.datetime_types):
offset = np.timedelta64(500, 'ms')
low -= offset
high += offset
else:
offset = abs(low*0.1 if low else 0.5)
low -= offset
high += offset
if invert: low, high = high, low
if shared:
shared = (axis_range.start, axis_range.end)
low, high = util.max_range([(low, high), shared])
if log and (low is None or low <= 0):
low = 0.01 if high < 0.01 else 10**(np.log10(high)-2)
self.warning("Logarithmic axis range encountered value less than or equal to zero, "
"please supply explicit lower-bound to override default of %.3f." % low)
updates = {}
reset_supported = bokeh_version > '0.12.16'
if util.isfinite(low):
updates['start'] = (axis_range.start, low)
if reset_supported:
updates['reset_start'] = updates['start']
if util.isfinite(high):
updates['end'] = (axis_range.end, high)
if reset_supported:
updates['reset_end'] = updates['end']
for k, (old, new) in updates.items():
axis_range.update(**{k:new})
if streaming and not k.startswith('reset_'):
axis_range.trigger(k, old, new)
elif isinstance(axis_range, FactorRange):
factors = list(decode_bytes(factors))
if invert: factors = factors[::-1]
axis_range.factors = factors
def _categorize_data(self, data, cols, dims):
"""
Transforms non-string or integer types in datasource if the
axis to be plotted on is categorical. Accepts the column data
source data, the columns corresponding to the axes and the
dimensions for each axis, changing the data inplace.
"""
if self.invert_axes:
cols = cols[::-1]
dims = dims[:2][::-1]
ranges = [self.handles['%s_range' % ax] for ax in 'xy']
for i, col in enumerate(cols):
column = data[col]
if (isinstance(ranges[i], FactorRange) and
(isinstance(column, list) or column.dtype.kind not in 'SU')):
data[col] = [dims[i].pprint_value(v) for v in column]
def _get_factors(self, element):
"""
Get factors for categorical axes.
"""
xdim, ydim = element.dimensions()[:2]
xvals, yvals = [element.dimension_values(i, False)
for i in range(2)]
coords = tuple([v if vals.dtype.kind in 'SU' else dim.pprint_value(v) for v in vals]
for dim, vals in [(xdim, xvals), (ydim, yvals)])
if self.invert_axes: coords = coords[::-1]
return coords
def _process_legend(self):
"""
Disables legends if show_legend is disabled.
"""
for l in self.handles['plot'].legend:
l.items[:] = []
l.border_line_alpha = 0
l.background_fill_alpha = 0
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
plot_method = self._plot_methods.get('batched' if self.batched else 'single')
if isinstance(plot_method, tuple):
# Handle alternative plot method for flipped axes
plot_method = plot_method[int(self.invert_axes)]
renderer = getattr(plot, plot_method)(**dict(properties, **mapping))
return renderer, renderer.glyph
def _glyph_properties(self, plot, element, source, ranges, style):
properties = dict(style, source=source)
if self.show_legend:
if self.overlay_dims:
legend = ', '.join([d.pprint_value(v) for d, v in
self.overlay_dims.items()])
else:
legend = element.label
if legend:
properties['legend'] = value(legend)
return properties
def _filter_properties(self, properties, glyph_type, allowed):
glyph_props = dict(properties)
for gtype in ((glyph_type, '') if glyph_type else ('',)):
for prop in ('color', 'alpha'):
glyph_prop = properties.get(gtype+prop)
if glyph_prop and ('line_'+prop not in glyph_props or gtype):
glyph_props['line_'+prop] = glyph_prop
if glyph_prop and ('fill_'+prop not in glyph_props or gtype):
glyph_props['fill_'+prop] = glyph_prop
props = {k[len(gtype):]: v for k, v in glyph_props.items()
if k.startswith(gtype)}
if self.batched:
glyph_props = dict(props, **glyph_props)
else:
glyph_props.update(props)
return {k: v for k, v in glyph_props.items() if k in allowed}
def _update_glyph(self, renderer, properties, mapping, glyph):
allowed_properties = glyph.properties()
properties = mpl_to_bokeh(properties)
merged = dict(properties, **mapping)
legend = merged.pop('legend', None)
for glyph_type in ('', 'selection_', 'nonselection_', 'hover_', 'muted_'):
if renderer:
glyph = getattr(renderer, glyph_type+'glyph', None)
if not glyph or (not renderer and glyph_type):
continue
filtered = self._filter_properties(merged, glyph_type, allowed_properties)
glyph.update(**filtered)
if legend is not None:
for leg in self.state.legend:
for item in leg.items:
if renderer in item.renderers:
item.label = legend
def _postprocess_hover(self, renderer, source):
"""
Attaches renderer to hover tool and processes tooltips to
ensure datetime data is displayed correctly.
"""
hover = self.handles.get('hover')
if hover is None:
return
if hover.renderers == 'auto':
hover.renderers = []
hover.renderers.append(renderer)
# If datetime column is in the data replace hover formatter
for k, v in source.data.items():
if k+'_dt_strings' in source.data:
tooltips = []
for name, formatter in hover.tooltips:
if formatter == '@{%s}' % k:
formatter = '@{%s_dt_strings}' % k
tooltips.append((name, formatter))
hover.tooltips = tooltips
def _init_glyphs(self, plot, element, ranges, source):
style_element = element.last if self.batched else element
# Get data and initialize data source
if self.batched:
current_id = tuple(element.traverse(lambda x: x._plot_id, [Element]))
data, mapping, style = self.get_batched_data(element, ranges)
else:
style = self.style[self.cyclic_index]
data, mapping, style = self.get_data(element, ranges, style)
current_id = element._plot_id
if source is None:
source = self._init_datasource(data)
self.handles['previous_id'] = current_id
self.handles['source'] = source
properties = self._glyph_properties(plot, style_element, source, ranges, style)
with abbreviated_exception():
renderer, glyph = self._init_glyph(plot, mapping, properties)
self.handles['glyph'] = glyph
if isinstance(renderer, Renderer):
self.handles['glyph_renderer'] = renderer
self._postprocess_hover(renderer, source)
# Update plot, source and glyph
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping, glyph)
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
if self.batched:
element = [el for el in self.hmap.data.values() if el][-1]
else:
element = self.hmap.last
key = util.wrap_tuple(self.hmap.last_key)
ranges = self.compute_ranges(self.hmap, key, ranges)
self.current_ranges = ranges
self.current_frame = element
self.current_key = key
style_element = element.last if self.batched else element
ranges = util.match_spec(style_element, ranges)
# Initialize plot, source and glyph
if plot is None:
plot = self._init_plot(key, style_element, ranges=ranges, plots=plots)
self._init_axes(plot)
else:
self.handles['xaxis'] = plot.xaxis[0]
self.handles['x_range'] = plot.x_range
self.handles['y_axis'] = plot.yaxis[0]
self.handles['y_range'] = plot.y_range
self.handles['plot'] = plot
self._init_glyphs(plot, element, ranges, source)
if not self.overlaid:
self._update_plot(key, plot, style_element)
self._update_ranges(style_element, ranges)
for cb in self.callbacks:
cb.initialize()
if not self.overlaid:
self._process_legend()
self._execute_hooks(element)
self.drawn = True
return plot
def _update_glyphs(self, element, ranges):
plot = self.handles['plot']
glyph = self.handles.get('glyph')
source = self.handles['source']
mapping = {}
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
if self.batched:
current_id = tuple(element.traverse(lambda x: x._plot_id, [Element]))
else:
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
style = self.style[self.cyclic_index]
if self.batched:
data, mapping, style = self.get_batched_data(element, ranges)
else:
data, mapping, style = self.get_data(element, ranges, style)
if not self.static_source:
self._update_datasource(source, data)
if glyph:
properties = self._glyph_properties(plot, element, source, ranges, style)
renderer = self.handles.get('glyph_renderer')
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping, glyph)
def update_frame(self, key, ranges=None, plot=None, element=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
reused = isinstance(self.hmap, DynamicMap) and (self.overlaid or self.batched)
if not reused and element is None:
element = self._get_frame(key)
elif element is not None:
self.current_key = key
self.current_frame = element
renderer = self.handles.get('glyph_renderer', None)
glyph = self.handles.get('glyph', None)
visible = element is not None
if hasattr(renderer, 'visible'):
renderer.visible = visible
if hasattr(glyph, 'visible'):
glyph.visible = visible
if ((self.batched and not element) or element is None or (not self.dynamic and self.static) or
(self.streaming and self.streaming[0].data is self.current_frame.data and not self.streaming[0]._triggering)):
return
if self.batched:
style_element = element.last
max_cycles = None
else:
style_element = element
max_cycles = self.style._max_cycles
style = self.lookup_options(style_element, 'style')
self.style = style.max_cycles(max_cycles) if max_cycles else style
ranges = self.compute_ranges(self.hmap, key, ranges)
self.set_param(**self.lookup_options(style_element, 'plot').options)
ranges = util.match_spec(style_element, ranges)
self.current_ranges = ranges
plot = self.handles['plot']
if not self.overlaid:
self._update_ranges(style_element, ranges)
self._update_plot(key, plot, style_element)
self._update_glyphs(element, ranges)
self._execute_hooks(element)
def model_changed(self, model):
"""
Determines if the bokeh model was just changed on the frontend.
Useful to suppress boomeranging events, e.g. when the frontend
just sent an update to the x_range this should not trigger an
update on the backend.
"""
callbacks = [cb for cbs in self.traverse(lambda x: x.callbacks)
for cb in cbs]
stream_metadata = [stream._metadata for cb in callbacks
for stream in cb.streams if stream._metadata]
return any(md['id'] == model.ref['id'] for models in stream_metadata
for md in models.values())
@property
def framewise(self):
"""
Property to determine whether the current frame should have
framewise normalization enabled. Required for bokeh plotting
classes to determine whether to send updated ranges for each
frame.
"""
current_frames = [el for f in self.traverse(lambda x: x.current_frame)
for el in (f.traverse(lambda x: x, [Element])
if f else [])]
current_frames = util.unique_iterator(current_frames)
return any(self.lookup_options(frame, 'norm').options.get('framewise')
for frame in current_frames)
class CompositeElementPlot(ElementPlot):
"""
A CompositeElementPlot is an Element plot type that coordinates
drawing of multiple glyphs.
"""
# Mapping between glyph names and style groups
_style_groups = {}
# Defines the order in which glyphs are drawn, defined by glyph name
_draw_order = []
def _init_glyphs(self, plot, element, ranges, source, data=None, mapping=None, style=None):
# Get data and initialize data source
if None in (data, mapping):
style = self.style[self.cyclic_index]
data, mapping, style = self.get_data(element, ranges, style)
keys = glyph_order(dict(data, **mapping), self._draw_order)
source_cache = {}
current_id = element._plot_id
self.handles['previous_id'] = current_id
for key in keys:
ds_data = data.get(key, {})
if id(ds_data) in source_cache:
source = source_cache[id(ds_data)]
else:
source = self._init_datasource(ds_data)
source_cache[id(ds_data)] = source
self.handles[key+'_source'] = source
properties = self._glyph_properties(plot, element, source, ranges, style)
properties = self._process_properties(key, properties, mapping.get(key, {}))
with abbreviated_exception():
renderer, glyph = self._init_glyph(plot, mapping.get(key, {}), properties, key)
self.handles[key+'_glyph'] = glyph
if isinstance(renderer, Renderer):
self.handles[key+'_glyph_renderer'] = renderer
self._postprocess_hover(renderer, source)
# Update plot, source and glyph
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping.get(key, {}), glyph)
def _process_properties(self, key, properties, mapping):
key = '_'.join(key.split('_')[:-1]) if '_' in key else key
style_group = self._style_groups[key]
group_props = {}
for k, v in properties.items():
if k in self.style_opts:
group = k.split('_')[0]
if group == style_group:
if k in mapping:
v = mapping[k]
k = '_'.join(k.split('_')[1:])
else:
continue
group_props[k] = v
return group_props
def _update_glyphs(self, element, ranges):
plot = self.handles['plot']
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
if self.batched:
current_id = tuple(element.traverse(lambda x: x._plot_id, [Element]))
else:
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
style = self.style[self.cyclic_index]
data, mapping, style = self.get_data(element, ranges, style)
keys = glyph_order(dict(data, **mapping), self._draw_order)
for key in keys:
gdata = data.get(key)
source = self.handles[key+'_source']
glyph = self.handles.get(key+'_glyph')
if not self.static_source and gdata is not None:
self._update_datasource(source, gdata)
if glyph:
properties = self._glyph_properties(plot, element, source, ranges, style)
properties = self._process_properties(key, properties, mapping[key])
renderer = self.handles.get(key+'_glyph_renderer')
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping[key], glyph)
def _init_glyph(self, plot, mapping, properties, key):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
plot_method = '_'.join(key.split('_')[:-1])
renderer = getattr(plot, plot_method)(**dict(properties, **mapping))
return renderer, renderer.glyph
class ColorbarPlot(ElementPlot):
"""
ColorbarPlot provides methods to create colormappers and colorbar
models which can be added to a glyph. Additionally it provides
parameters to control the position and other styling options of
the colorbar. The default colorbar_position options are defined
by the colorbar_specs, but may be overridden by the colorbar_opts.
"""
colorbar_specs = {'right': {'pos': 'right',
'opts': {'location': (0, 0)}},
'left': {'pos': 'left',
'opts':{'location':(0, 0)}},
'bottom': {'pos': 'below',
'opts': {'location': (0, 0),
'orientation':'horizontal'}},
'top': {'pos': 'above',
'opts': {'location':(0, 0),
'orientation':'horizontal'}},
'top_right': {'pos': 'center',
'opts': {'location': 'top_right'}},
'top_left': {'pos': 'center',
'opts': {'location': 'top_left'}},
'bottom_left': {'pos': 'center',
'opts': {'location': 'bottom_left',
'orientation': 'horizontal'}},
'bottom_right': {'pos': 'center',
'opts': {'location': 'bottom_right',
'orientation': 'horizontal'}}}
color_levels = param.ClassSelector(default=None, class_=(int, list), doc="""
Number of discrete colors to use when colormapping or a set of color
intervals defining the range of values to map each color to.""")
colorbar = param.Boolean(default=False, doc="""
Whether to display a colorbar.""")
colorbar_position = param.ObjectSelector(objects=list(colorbar_specs),
default="right", doc="""
Allows selecting between a number of predefined colorbar position
options. The predefined options may be customized in the
colorbar_specs class attribute.""")
colorbar_opts = param.Dict(default={}, doc="""
Allows setting specific styling options for the colorbar overriding
the options defined in the colorbar_specs class attribute. Includes
location, orientation, height, width, scale_alpha, title, title_props,
margin, padding, background_fill_color and more.""")
clipping_colors = param.Dict(default={}, doc="""
Dictionary to specify colors for clipped values, allows
setting color for NaN values and for values above and below
the min and max value. The min, max or NaN color may specify
an RGB(A) color as a color hex string of the form #FFFFFF or
#FFFFFFFF or a length 3 or length 4 tuple specifying values in
the range 0-1 or a named HTML color.""")
logz = param.Boolean(default=False, doc="""
Whether to apply log scaling to the z-axis.""")
symmetric = param.Boolean(default=False, doc="""
Whether to make the colormap symmetric around zero.""")
_colorbar_defaults = dict(bar_line_color='black', label_standoff=8,
major_tick_line_color='black')
_default_nan = '#8b8b8b'
def _draw_colorbar(self, plot, color_mapper):
if CategoricalColorMapper and isinstance(color_mapper, CategoricalColorMapper):
return
if LogColorMapper and isinstance(color_mapper, LogColorMapper):
ticker = LogTicker()
else:
ticker = BasicTicker()
cbar_opts = dict(self.colorbar_specs[self.colorbar_position])
# Check if there is a colorbar in the same position
pos = cbar_opts['pos']
if any(isinstance(model, ColorBar) for model in getattr(plot, pos, [])):
return
opts = dict(cbar_opts['opts'], **self._colorbar_defaults)
color_bar = ColorBar(color_mapper=color_mapper, ticker=ticker,
**dict(opts, **self.colorbar_opts))
plot.add_layout(color_bar, pos)
self.handles['colorbar'] = color_bar
def _get_colormapper(self, dim, element, ranges, style, factors=None, colors=None,
name='color_mapper'):
# The initial colormapper instance is cached the first time
# and then only updated
if dim is None and colors is None:
return None
if self.adjoined:
cmappers = self.adjoined.traverse(lambda x: (x.handles.get('color_dim'),
x.handles.get(name)))
cmappers = [cmap for cdim, cmap in cmappers if cdim == dim]
if cmappers:
cmapper = cmappers[0]
self.handles['color_mapper'] = cmapper
return cmapper
else:
return None
ncolors = None if factors is None else len(factors)
if dim:
if dim.name in ranges:
low, high = ranges.get(dim.name)
else:
low, high = element.range(dim.name)
if self.symmetric:
sym_max = max(abs(low), high)
low, high = -sym_max, sym_max
else:
low, high = None, None
cmap = colors or style.pop('cmap', 'viridis')
nan_colors = {k: rgba_tuple(v) for k, v in self.clipping_colors.items()}
if isinstance(cmap, dict) and factors:
palette = [cmap.get(f, nan_colors.get('NaN', self._default_nan)) for f in factors]
else:
categorical = ncolors is not None
if isinstance(self.color_levels, int):
ncolors = self.color_levels
elif isinstance(self.color_levels, list):
ncolors = len(self.color_levels) - 1
if isinstance(cmap, list) and len(cmap) != ncolors:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (ncolors, len(cmap)))
palette = process_cmap(cmap, ncolors, categorical=categorical)
if isinstance(self.color_levels, list):
palette = color_intervals(palette, self.color_levels, clip=(low, high))
colormapper, opts = self._get_cmapper_opts(low, high, factors, nan_colors)
cmapper = self.handles.get(name)
if cmapper is not None:
if cmapper.palette != palette:
cmapper.palette = palette
opts = {k: opt for k, opt in opts.items()
if getattr(cmapper, k) != opt}
if opts:
cmapper.update(**opts)
else:
cmapper = colormapper(palette=palette, **opts)
self.handles[name] = cmapper
self.handles['color_dim'] = dim
return cmapper
def _get_color_data(self, element, ranges, style, name='color', factors=None, colors=None,
int_categories=False):
data, mapping = {}, {}
cdim = element.get_dimension(self.color_index)
if not cdim:
return data, mapping
cdata = element.dimension_values(cdim)
field = util.dimension_sanitizer(cdim.name)
dtypes = 'iOSU' if int_categories else 'OSU'
if factors is None and (isinstance(cdata, list) or cdata.dtype.kind in dtypes):
factors = list(util.unique_array(cdata))
if factors and int_categories and cdata.dtype.kind == 'i':
field += '_str'
cdata = [str(f) for f in cdata]
factors = [str(f) for f in factors]
mapper = self._get_colormapper(cdim, element, ranges, style,
factors, colors)
data[field] = cdata
if factors is not None and self.show_legend:
mapping['legend'] = {'field': field}
mapping[name] = {'field': field, 'transform': mapper}
return data, mapping
def _get_cmapper_opts(self, low, high, factors, colors):
if factors is None:
colormapper = LogColorMapper if self.logz else LinearColorMapper
if isinstance(low, (bool, np.bool_)): low = int(low)
if isinstance(high, (bool, np.bool_)): high = int(high)
opts = {}
if util.isfinite(low):
opts['low'] = low
if util.isfinite(high):
opts['high'] = high
color_opts = [('NaN', 'nan_color'), ('max', 'high_color'), ('min', 'low_color')]
opts.update({opt: colors[name] for name, opt in color_opts if name in colors})
else:
colormapper = CategoricalColorMapper
factors = decode_bytes(factors)
opts = dict(factors=factors)
if 'NaN' in colors:
opts['nan_color'] = colors['NaN']
return colormapper, opts
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object and optionally creates a colorbar.
"""
ret = super(ColorbarPlot, self)._init_glyph(plot, mapping, properties)
if self.colorbar and 'color_mapper' in self.handles:
self._draw_colorbar(plot, self.handles['color_mapper'])
return ret
class LegendPlot(ElementPlot):
legend_position = param.ObjectSelector(objects=["top_right",
"top_left",
"bottom_left",
"bottom_right",
'right', 'left',
'top', 'bottom'],
default="top_right",
doc="""
Allows selecting between a number of predefined legend position
options. The predefined options may be customized in the
legend_specs class attribute.""")
legend_offset = param.NumericTuple(default=(0, 0), doc="""
If legend is placed outside the axis, this determines the
(width, height) offset in pixels from the original position.""")
legend_cols = param.Integer(default=False, doc="""
Whether to lay out the legend as columns.""")
legend_specs = {'right': 'right', 'left': 'left', 'top': 'above',
'bottom': 'below'}
def _process_legend(self, plot=None):
plot = plot or self.handles['plot']
if not plot.legend:
return
legend = plot.legend[0]
cmapper = self.handles.get('color_mapper')
if cmapper:
categorical = isinstance(cmapper, CategoricalColorMapper)
else:
categorical = False
if (not categorical and not self.overlaid and len(legend.items) == 1) or not self.show_legend:
legend.items[:] = []
else:
plot.legend.orientation = 'horizontal' if self.legend_cols else 'vertical'
pos = self.legend_position
if pos in self.legend_specs:
plot.legend[:] = []
legend.plot = None
legend.location = self.legend_offset
if pos in ['top', 'bottom']:
plot.legend.orientation = 'horizontal'
plot.add_layout(legend, self.legend_specs[pos])
else:
legend.location = pos
class OverlayPlot(GenericOverlayPlot, LegendPlot):
tabs = param.Boolean(default=False, doc="""
Whether to display overlaid plots in separate panes""")
style_opts = (legend_dimensions + ['border_'+p for p in line_properties] +
text_properties + ['background_fill_color', 'background_fill_alpha'])
multiple_legends = param.Boolean(default=False, doc="""
Whether to split the legend for subplots into multiple legends.""")
_propagate_options = ['width', 'height', 'xaxis', 'yaxis', 'labelled',
'bgcolor', 'fontsize', 'invert_axes', 'show_frame',
'show_grid', 'logx', 'logy', 'xticks', 'toolbar',
'yticks', 'xrotation', 'yrotation', 'lod',
'border', 'invert_xaxis', 'invert_yaxis', 'sizing_mode',
'title_format', 'legend_position', 'legend_offset',
'legend_cols', 'gridstyle']
def _process_legend(self):
plot = self.handles['plot']
if not self.show_legend or len(plot.legend) == 0:
return super(OverlayPlot, self)._process_legend()
options = {}
properties = self.lookup_options(self.hmap.last, 'style')[self.cyclic_index]
for k, v in properties.items():
if k in line_properties and 'line' not in k:
ksplit = k.split('_')
k = '_'.join(ksplit[:1]+'line'+ksplit[1:])
if k in text_properties:
k = 'label_' + k
if k.startswith('legend_'):
k = k[7:]
options[k] = v
if not plot.legend:
return
pos = self.legend_position
orientation = 'horizontal' if self.legend_cols else 'vertical'
if pos in ['top', 'bottom']:
orientation = 'horizontal'
legend_fontsize = self._fontsize('legend', 'size').get('size',False)
legend = plot.legend[0]
legend.update(**options)
if legend_fontsize:
legend.label_text_font_size = value(legend_fontsize)
if pos in self.legend_specs:
pos = self.legend_specs[pos]
else:
legend.location = pos
legend.orientation = orientation
legend_items = []
legend_labels = {}
for item in legend.items:
label = tuple(item.label.items()) if isinstance(item.label, dict) else item.label
if not label or (isinstance(item.label, dict) and not item.label.get('value', True)):
continue
if label in legend_labels:
prev_item = legend_labels[label]
prev_item.renderers += item.renderers
else:
legend_labels[label] = item
legend_items.append(item)
legend.items[:] = legend_items
if self.multiple_legends:
plot.legend.pop(plot.legend.index(legend))
legend.plot = None
properties = legend.properties_with_values(include_defaults=False)
legend_group = []
for item in legend.items:
if not isinstance(item.label, dict) or 'value' in item.label:
legend_group.append(item)
continue
new_legend = Legend(**dict(properties, items=[item]))
new_legend.location = self.legend_offset
plot.add_layout(new_legend, pos)
if legend_group:
new_legend = Legend(**dict(properties, items=legend_group))
new_legend.location = self.legend_offset
plot.add_layout(new_legend, pos)
legend.items[:] = []
elif pos in ['above', 'below', 'right', 'left']:
plot.legend.pop(plot.legend.index(legend))
legend.plot = None
legend.location = self.legend_offset
plot.add_layout(legend, pos)
def _init_tools(self, element, callbacks=[]):
"""
Processes the list of tools to be supplied to the plot.
"""
tools = []
hover_tools = {}
tool_types = []
for key, subplot in self.subplots.items():
el = element.get(key)
if el is not None:
el_tools = subplot._init_tools(el, self.callbacks)
for tool in el_tools:
if isinstance(tool, util.basestring):
tool_type = TOOLS.get(tool)
else:
tool_type = type(tool)
if isinstance(tool, HoverTool):
tooltips = tuple(tool.tooltips) if tool.tooltips else ()
if tooltips in hover_tools:
continue
else:
hover_tools[tooltips] = tool
elif tool_type in tool_types:
continue
else:
tool_types.append(tool_type)
tools.append(tool)
self.handles['hover_tools'] = hover_tools
return tools
def _merge_tools(self, subplot):
"""
Merges tools on the overlay with those on the subplots.
"""
if self.batched and 'hover' in subplot.handles:
self.handles['hover'] = subplot.handles['hover']
elif 'hover' in subplot.handles and 'hover_tools' in self.handles:
hover = subplot.handles['hover']
# Datetime formatter may have been applied, remove _dt_strings
# to match on the hover tooltips, then merge tool renderers
if hover.tooltips and not isinstance(hover.tooltips, util.basestring):
tooltips = tuple((name, spec.replace('_dt_strings', ''))
for name, spec in hover.tooltips)
else:
tooltips = ()
tool = self.handles['hover_tools'].get(tooltips)
if tool:
tool_renderers = [] if tool.renderers == 'auto' else tool.renderers
hover_renderers = [] if hover.renderers == 'auto' else hover.renderers
renderers = tool_renderers + hover_renderers
tool.renderers = list(util.unique_iterator(renderers))
if 'hover' not in self.handles:
self.handles['hover'] = tool
def _get_factors(self, overlay):
xfactors, yfactors = [], []
for k, sp in self.subplots.items():
el = overlay.data.get(k)
if el is not None:
xfs, yfs = sp._get_factors(el)
xfactors.append(xfs)
yfactors.append(yfs)
if xfactors:
xfactors = np.concatenate(xfactors)
if yfactors:
yfactors = np.concatenate(yfactors)
return util.unique_array(xfactors), util.unique_array(yfactors)
def initialize_plot(self, ranges=None, plot=None, plots=None):
key = util.wrap_tuple(self.hmap.last_key)
nonempty = [el for el in self.hmap.data.values() if el]
if not nonempty:
raise SkipRendering('All Overlays empty, cannot initialize plot.')
element = nonempty[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
if plot is None and not self.tabs and not self.batched:
plot = self._init_plot(key, element, ranges=ranges, plots=plots)
self._init_axes(plot)
self.handles['plot'] = plot
if plot and not self.overlaid:
self._update_plot(key, plot, element)
self._update_ranges(element, ranges)
panels = []
for key, subplot in self.subplots.items():
frame = None
if self.tabs:
subplot.overlaid = False
child = subplot.initialize_plot(ranges, plot, plots)
if isinstance(element, CompositeOverlay):
frame = element.get(key, None)
subplot.current_frame = frame
if self.batched:
self.handles['plot'] = child
if self.tabs:
title = subplot._format_title(key, dimensions=False)
if not title:
title = get_tab_title(key, frame, self.hmap.last)
panels.append(Panel(child=child, title=title))
self._merge_tools(subplot)
if self.tabs:
self.handles['plot'] = Tabs(tabs=panels)
elif not self.overlaid:
self._process_legend()
self.drawn = True
self.handles['plots'] = plots
self._update_callbacks(self.handles['plot'])
if 'plot' in self.handles and not self.tabs:
plot = self.handles['plot']
self.handles['xaxis'] = plot.xaxis[0]
self.handles['yaxis'] = plot.yaxis[0]
self.handles['x_range'] = plot.x_range
self.handles['y_range'] = plot.y_range
for cb in self.callbacks:
cb.initialize()
self._execute_hooks(element)
return self.handles['plot']
def update_frame(self, key, ranges=None, element=None):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
reused = isinstance(self.hmap, DynamicMap) and self.overlaid
if not reused and element is None:
element = self._get_frame(key)
elif element is not None:
self.current_frame = element
self.current_key = key
items = [] if element is None else list(element.data.items())
if isinstance(self.hmap, DynamicMap):
range_obj = element
else:
range_obj = self.hmap
if element is not None:
ranges = self.compute_ranges(range_obj, key, ranges)
if element and not self.overlaid and not self.tabs and not self.batched:
self._update_ranges(element, ranges)
# Determine which stream (if any) triggered the update
triggering = [stream for stream in self.streams if stream._triggering]
for k, subplot in self.subplots.items():
el = None
# If in Dynamic mode propagate elements to subplots
if isinstance(self.hmap, DynamicMap) and element:
# In batched mode NdOverlay is passed to subplot directly
if self.batched:
el = element
# If not batched get the Element matching the subplot
elif element is not None:
idx, spec, exact = dynamic_update(self, subplot, k, element, items)
if idx is not None:
_, el = items.pop(idx)
if not exact:
self._update_subplot(subplot, spec)
# Skip updates to subplots when its streams is not one of
# the streams that initiated the update
if triggering and all(s not in triggering for s in subplot.streams):
continue
subplot.update_frame(key, ranges, element=el)
if not self.batched and isinstance(self.hmap, DynamicMap) and items:
init_kwargs = {'plots': self.handles['plots']}
if not self.tabs:
init_kwargs['plot'] = self.handles['plot']
self._create_dynamic_subplots(key, items, ranges, **init_kwargs)
if not self.overlaid and not self.tabs:
self._process_legend()
if element and not self.overlaid and not self.tabs and not self.batched:
self._update_plot(key, self.handles['plot'], element)
self._execute_hooks(element)
| 1 | 21,283 | Minor unrelated fix that snuck in. | holoviz-holoviews | py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.